diff --git a/.gitea/workflows/docs.yml b/.gitea/workflows/docs.yml index c7e19ad1..e73fb71e 100755 --- a/.gitea/workflows/docs.yml +++ b/.gitea/workflows/docs.yml @@ -34,18 +34,24 @@ jobs: with: node-version: ${{ env.NODE_VERSION }} - - name: Install markdown linters - run: | - npm install markdown-link-check remark-cli remark-preset-lint-recommended + - name: Install documentation toolchain + run: | + npm install --no-save markdown-link-check remark-cli remark-preset-lint-recommended ajv ajv-cli ajv-formats - name: Link check run: | find docs -name '*.md' -print0 | \ xargs -0 -n1 -I{} npx markdown-link-check --quiet '{}' - - name: Remark lint - run: | - npx remark docs -qf + - name: Remark lint + run: | + npx remark docs -qf + + - name: Validate event schemas + run: | + for schema in docs/events/*.json; do + npx ajv compile -c ajv-formats -s "$schema" + done - name: Setup Python uses: actions/setup-python@v5 diff --git a/SPRINTS.md b/SPRINTS.md index 38ae9843..17bb5faa 100644 --- a/SPRINTS.md +++ b/SPRINTS.md @@ -168,38 +168,38 @@ This file describe implementation of Stella Ops (docs/README.md). Implementation | Sprint 8 | Mirror Distribution | src/StellaOps.Concelier.WebService/TASKS.md | TODO | Concelier WebService Guild | CONCELIER-WEB-08-201 | Mirror distribution endpoints – expose domain-scoped index/download APIs with auth/quota. | | Sprint 8 | Mirror Distribution | src/StellaOps.Concelier.Connector.StellaOpsMirror/TASKS.md | TODO | BE-Conn-Stella | FEEDCONN-STELLA-08-001 | Concelier mirror connector – fetch mirror manifest, verify signatures, and hydrate canonical DTOs with resume support. | | Sprint 8 | Mirror Distribution | ops/devops/TASKS.md | TODO | DevOps Guild | DEVOPS-MIRROR-08-001 | Managed mirror deployments for `*.stella-ops.org` – Helm/Compose overlays, CDN, runbooks. | -| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Core/TASKS.md | TODO | Team Scanner Core | SCANNER-CORE-09-501 | Define shared DTOs (ScanJob, ProgressEvent), error taxonomy, and deterministic ID/timestamp helpers aligning with `ARCHITECTURE_SCANNER.md` §3–§4. | -| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Core/TASKS.md | TODO | Team Scanner Core | SCANNER-CORE-09-502 | Observability helpers (correlation IDs, logging scopes, metric namespacing, deterministic hashes) consumed by WebService/Worker. | -| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Core/TASKS.md | TODO | Team Scanner Core | SCANNER-CORE-09-503 | Security utilities: Authority client factory, OpTok caching, DPoP verifier, restart-time plug-in guardrails for scanner components. | -| Sprint 9 | Scanner Build-time | src/StellaOps.Scanner.Sbomer.BuildXPlugin/TASKS.md | TODO | BuildX Guild | SP9-BLDX-09-001 | Buildx driver scaffold + handshake with Scanner.Emit (local CAS). | -| Sprint 9 | Scanner Build-time | src/StellaOps.Scanner.Sbomer.BuildXPlugin/TASKS.md | TODO | BuildX Guild | SP9-BLDX-09-002 | OCI annotations + provenance hand-off to Attestor. | -| Sprint 9 | Scanner Build-time | src/StellaOps.Scanner.Sbomer.BuildXPlugin/TASKS.md | TODO | BuildX Guild | SP9-BLDX-09-003 | CI demo: minimal SBOM push & backend report wiring. | -| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.WebService/TASKS.md | TODO | Team Scanner WebService | SCANNER-WEB-09-101 | Minimal API host with Authority enforcement, health/ready endpoints, and restart-time plug-in loader per architecture §1, §4. | -| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.WebService/TASKS.md | TODO | Team Scanner WebService | SCANNER-WEB-09-102 | `/api/v1/scans` submission/status endpoints with deterministic IDs, validation, and cancellation support. | +| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Core/TASKS.md | DONE (2025-10-18) | Team Scanner Core | SCANNER-CORE-09-501 | Define shared DTOs (ScanJob, ProgressEvent), error taxonomy, and deterministic ID/timestamp helpers aligning with `ARCHITECTURE_SCANNER.md` §3–§4. | +| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Core/TASKS.md | DONE (2025-10-18) | Team Scanner Core | SCANNER-CORE-09-502 | Observability helpers (correlation IDs, logging scopes, metric namespacing, deterministic hashes) consumed by WebService/Worker. | +| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Core/TASKS.md | DONE (2025-10-18) | Team Scanner Core | SCANNER-CORE-09-503 | Security utilities: Authority client factory, OpTok caching, DPoP verifier, restart-time plug-in guardrails for scanner components. | +| Sprint 9 | Scanner Build-time | src/StellaOps.Scanner.Sbomer.BuildXPlugin/TASKS.md | DONE | BuildX Guild | SP9-BLDX-09-001 | Buildx driver scaffold + handshake with Scanner.Emit (local CAS). | +| Sprint 9 | Scanner Build-time | src/StellaOps.Scanner.Sbomer.BuildXPlugin/TASKS.md | DONE | BuildX Guild | SP9-BLDX-09-002 | OCI annotations + provenance hand-off to Attestor. | +| Sprint 9 | Scanner Build-time | src/StellaOps.Scanner.Sbomer.BuildXPlugin/TASKS.md | DONE | BuildX Guild | SP9-BLDX-09-003 | CI demo: minimal SBOM push & backend report wiring. | +| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.WebService/TASKS.md | DONE (2025-10-18) | Team Scanner WebService | SCANNER-WEB-09-101 | Minimal API host with Authority enforcement, health/ready endpoints, and restart-time plug-in loader per architecture §1, §4. | +| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.WebService/TASKS.md | DONE (2025-10-18) | Team Scanner WebService | SCANNER-WEB-09-102 | `/api/v1/scans` submission/status endpoints with deterministic IDs, validation, and cancellation support. | | Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.WebService/TASKS.md | TODO | Team Scanner WebService | SCANNER-WEB-09-103 | Progress streaming (SSE/JSONL) with correlation IDs and ISO-8601 UTC timestamps, documented in API reference. | -| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.WebService/TASKS.md | TODO | Team Scanner WebService | SCANNER-WEB-09-104 | Configuration binding for Mongo, MinIO, queue, feature flags; startup diagnostics and fail-fast policy. | +| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.WebService/TASKS.md | DONE (2025-10-19) | Team Scanner WebService | SCANNER-WEB-09-104 | Configuration binding for Mongo, MinIO, queue, feature flags; startup diagnostics and fail-fast policy. | | Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.WebService/TASKS.md | TODO | Team Scanner WebService | SCANNER-POLICY-09-105 | Policy snapshot loader + schema + OpenAPI (YAML ignore rules, VEX include/exclude, vendor precedence). | | Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.WebService/TASKS.md | TODO | Team Scanner WebService | SCANNER-POLICY-09-106 | `/reports` verdict assembly (Feedser+Vexer+Policy) + signed response envelope. | | Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.WebService/TASKS.md | TODO | Team Scanner WebService | SCANNER-POLICY-09-107 | Expose score inputs, config version, and quiet provenance in `/reports` JSON and signed payload. | -| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Worker/TASKS.md | TODO | Team Scanner Worker | SCANNER-WORKER-09-201 | Worker host bootstrap with Authority auth, hosted services, and graceful shutdown semantics. | -| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Worker/TASKS.md | TODO | Team Scanner Worker | SCANNER-WORKER-09-202 | Lease/heartbeat loop with retry+jitter, poison-job quarantine, structured logging. | -| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Worker/TASKS.md | TODO | Team Scanner Worker | SCANNER-WORKER-09-203 | Analyzer dispatch skeleton emitting deterministic stage progress and honoring cancellation tokens. | -| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Worker/TASKS.md | TODO | Team Scanner Worker | SCANNER-WORKER-09-204 | Worker metrics (queue latency, stage duration, failure counts) with OpenTelemetry resource wiring. | -| Sprint 9 | Policy Foundations | src/StellaOps.Policy/TASKS.md | TODO | Policy Guild | POLICY-CORE-09-001 | Policy schema + binder + diagnostics. | -| Sprint 9 | Policy Foundations | src/StellaOps.Policy/TASKS.md | TODO | Policy Guild | POLICY-CORE-09-002 | Policy snapshot store + revision digests. | -| Sprint 9 | Policy Foundations | src/StellaOps.Policy/TASKS.md | TODO | Policy Guild | POLICY-CORE-09-003 | `/policy/preview` API (image digest → projected verdict diff). | +| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Worker/TASKS.md | DONE (2025-10-19) | Team Scanner Worker | SCANNER-WORKER-09-201 | Worker host bootstrap with Authority auth, hosted services, and graceful shutdown semantics. | +| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Worker/TASKS.md | DONE (2025-10-19) | Team Scanner Worker | SCANNER-WORKER-09-202 | Lease/heartbeat loop with retry+jitter, poison-job quarantine, structured logging. | +| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Worker/TASKS.md | DONE (2025-10-19) | Team Scanner Worker | SCANNER-WORKER-09-203 | Analyzer dispatch skeleton emitting deterministic stage progress and honoring cancellation tokens. | +| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Worker/TASKS.md | DONE (2025-10-19) | Team Scanner Worker | SCANNER-WORKER-09-204 | Worker metrics (queue latency, stage duration, failure counts) with OpenTelemetry resource wiring. | +| Sprint 9 | Policy Foundations | src/StellaOps.Policy/TASKS.md | DONE | Policy Guild | POLICY-CORE-09-001 | Policy schema + binder + diagnostics. | +| Sprint 9 | Policy Foundations | src/StellaOps.Policy/TASKS.md | DONE | Policy Guild | POLICY-CORE-09-002 | Policy snapshot store + revision digests. | +| Sprint 9 | Policy Foundations | src/StellaOps.Policy/TASKS.md | DONE | Policy Guild | POLICY-CORE-09-003 | `/policy/preview` API (image digest → projected verdict diff). | | Sprint 9 | Policy Foundations | src/StellaOps.Policy/TASKS.md | TODO | Policy Guild | POLICY-CORE-09-004 | Versioned scoring config with schema validation, trust table, and golden fixtures. | | Sprint 9 | Policy Foundations | src/StellaOps.Policy/TASKS.md | TODO | Policy Guild | POLICY-CORE-09-005 | Scoring/quiet engine – compute score, enforce VEX-only quiet rules, emit inputs and provenance. | | Sprint 9 | Policy Foundations | src/StellaOps.Policy/TASKS.md | TODO | Policy Guild | POLICY-CORE-09-006 | Unknown state & confidence decay – deterministic bands surfaced in policy outputs. | -| Sprint 9 | DevOps Foundations | ops/devops/TASKS.md | TODO | DevOps Guild | DEVOPS-HELM-09-001 | Helm/Compose environment profiles (dev/staging/airgap) with deterministic digests. | -| Sprint 9 | Docs & Governance | docs/TASKS.md | TODO | Docs Guild, DevEx | DOCS-ADR-09-001 | Establish ADR process and template. | -| Sprint 9 | Docs & Governance | docs/TASKS.md | TODO | Docs Guild, Platform Events | DOCS-EVENTS-09-002 | Publish event schema catalog (`docs/events/`) for critical envelopes. | -| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Storage/TASKS.md | TODO | Team Scanner Storage | SCANNER-STORAGE-09-301 | Mongo catalog schemas/indexes for images, layers, artifacts, jobs, lifecycle rules plus migrations. | -| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Storage/TASKS.md | TODO | Team Scanner Storage | SCANNER-STORAGE-09-302 | MinIO layout, immutability policies, client abstraction, and configuration binding. | -| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Storage/TASKS.md | TODO | Team Scanner Storage | SCANNER-STORAGE-09-303 | Repositories/services with dual-write feature flag, deterministic digests, TTL enforcement tests. | -| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Queue/TASKS.md | TODO | Team Scanner Queue | SCANNER-QUEUE-09-401 | Queue abstraction + Redis Streams adapter with ack/claim APIs and idempotency tokens. | -| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Queue/TASKS.md | TODO | Team Scanner Queue | SCANNER-QUEUE-09-402 | Pluggable backend support (Redis, NATS) with configuration binding, health probes, failover docs. | -| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Queue/TASKS.md | TODO | Team Scanner Queue | SCANNER-QUEUE-09-403 | Retry + dead-letter strategy with structured logs/metrics for offline deployments. | +| Sprint 9 | DevOps Foundations | ops/devops/TASKS.md | DONE (2025-10-19) | DevOps Guild | DEVOPS-HELM-09-001 | Helm/Compose environment profiles (dev/staging/airgap) with deterministic digests. | +| Sprint 9 | Docs & Governance | docs/TASKS.md | DONE (2025-10-19) | Docs Guild, DevEx | DOCS-ADR-09-001 | Establish ADR process and template. | +| Sprint 9 | Docs & Governance | docs/TASKS.md | DONE (2025-10-19) | Docs Guild, Platform Events | DOCS-EVENTS-09-002 | Publish event schema catalog (`docs/events/`) for critical envelopes. | +| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Storage/TASKS.md | DONE (2025-10-19) | Team Scanner Storage | SCANNER-STORAGE-09-301 | Mongo catalog schemas/indexes for images, layers, artifacts, jobs, lifecycle rules plus migrations. | +| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Storage/TASKS.md | DONE (2025-10-19) | Team Scanner Storage | SCANNER-STORAGE-09-302 | MinIO layout, immutability policies, client abstraction, and configuration binding. | +| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Storage/TASKS.md | DONE (2025-10-19) | Team Scanner Storage | SCANNER-STORAGE-09-303 | Repositories/services with dual-write feature flag, deterministic digests, TTL enforcement tests. | +| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Queue/TASKS.md | DONE (2025-10-19) | Team Scanner Queue | SCANNER-QUEUE-09-401 | Queue abstraction + Redis Streams adapter with ack/claim APIs and idempotency tokens. | +| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Queue/TASKS.md | DONE (2025-10-19) | Team Scanner Queue | SCANNER-QUEUE-09-402 | Pluggable backend support (Redis, NATS) with configuration binding, health probes, failover docs. | +| Sprint 9 | Scanner Core Foundations | src/StellaOps.Scanner.Queue/TASKS.md | DONE (2025-10-19) | Team Scanner Queue | SCANNER-QUEUE-09-403 | Retry + dead-letter strategy with structured logs/metrics for offline deployments. | | Sprint 10 | Scanner Analyzers & SBOM | src/StellaOps.Scanner.Cache/TASKS.md | TODO | Scanner Cache Guild | SCANNER-CACHE-10-101 | Implement layer cache store keyed by layer digest with metadata retention per architecture §3.3. | | Sprint 10 | Scanner Analyzers & SBOM | src/StellaOps.Scanner.Cache/TASKS.md | TODO | Scanner Cache Guild | SCANNER-CACHE-10-102 | Build file CAS with dedupe, TTL enforcement, and offline import/export hooks. | | Sprint 10 | Scanner Analyzers & SBOM | src/StellaOps.Scanner.Cache/TASKS.md | TODO | Scanner Cache Guild | SCANNER-CACHE-10-103 | Expose cache metrics/logging and configuration toggles for warm/cold thresholds. | diff --git a/SPRINTS_IMPLEMENTION_PLAN.md b/SPRINTS_IMPLEMENTION_PLAN.md index 8977c123..92952176 100644 --- a/SPRINTS_IMPLEMENTION_PLAN.md +++ b/SPRINTS_IMPLEMENTION_PLAN.md @@ -24,41 +24,47 @@ Durations are estimated work sizes (1 d ≈ one focused engineer day). Milesto - Tasks: SCANNER-QUEUE-09-401 (3 d), -402 (2 d), -403 (2 d) · `/src/StellaOps.Scanner.Queue/TASKS.md` - Acceptance: dequeue latency p95 ≤20 ms at 40 rps; chaos test retains leases. - Gate: Redis/NATS adapters docs + `QueueLeaseIntegrationTests` passing. +- Status: **DONE (2025-10-19)** – Gate satisfied via Redis/NATS adapter docs and `QueueLeaseIntegrationTests` run under fake clock. ### Group SP9-G3 — Storage Backbone (src/StellaOps.Scanner.Storage) ~1 w - Tasks: SCANNER-STORAGE-09-301 (3 d), -302 (2 d), -303 (2 d) - Acceptance: majority write/read ≤50 ms; TTL verified. - Gate: migrations checked in; `StorageDualWriteFixture` passes. +- Status: **DONE (2025-10-19)** – Mongo bootstrapper + migrations committed; MinIO dual-write service wired; `StorageDualWriteFixture` green on Mongo2Go. ### Group SP9-G4 — WebService Host & Policy Surfacing (src/StellaOps.Scanner.WebService) ~1.2 w - Tasks: SCANNER-WEB-09-101 (2 d), -102 (3 d), -103 (2 d), -104 (2 d), SCANNER-POLICY-09-105 (3 d), SCANNER-POLICY-09-106 (4 d) - Acceptance: `/api/v1/scans` enqueue p95 ≤50 ms under synthetic load; policy validation errors actionable; `/reports` response signed. - Gate SP9-G4 → SP10/SP11: `/reports` OpenAPI frozen; sample signed envelope committed in `samples/api/reports/`. +- Status: **IN PROGRESS (2025-10-19)** – Minimal host and `/api/v1/scans` endpoints delivered (SCANNER-WEB-09-101/102 done); progress streaming and policy/report surfaces remain. ### Group SP9-G5 — Worker Host (src/StellaOps.Scanner.Worker) ~1 w - Tasks: SCANNER-WORKER-09-201 (3 d), -202 (3 d), -203 (2 d), -204 (2 d) - Acceptance: job lease never drops <3× heartbeat; progress events deterministic. - Gate: `WorkerBasicScanScenario` integration recorded. +- Status: **DONE (2025-10-19)** – Host bootstrap + authority wiring, heartbeat loop, deterministic stage pipeline, and metrics landed; `WorkerBasicScanScenarioTests` green. ### Group SP9-G6 — Buildx Plug-in (src/StellaOps.Scanner.Sbomer.BuildXPlugin) ~0.8 w - Tasks: SP9-BLDX-09-001 (3 d), SP9-BLDX-09-002 (2 d), SP9-BLDX-09-003 (2 d) - Acceptance: build-time overhead ≤300 ms/layer on 4 vCPU; CAS handshake reliable in CI sample. - Gate: buildx demo workflow artifact + quickstart doc. +- Status: **DONE** (2025-10-19) — manifest+CAS scaffold, descriptor/Attestor hand-off, GitHub demo workflow, and quickstart committed. ### Group SP9-G7 — Policy Engine Core (src/StellaOps.Policy) ~1 w -- Tasks: POLICY-CORE-09-001 (2 d), -002 (3 d), -003 (3 d), -004 (3 d), -005 (4 d), -006 (2 d) +- Tasks: POLICY-CORE-09-001 (2 d) ✅, -002 (3 d) ✅, -003 (3 d) ✅, -004 (3 d), -005 (4 d), -006 (2 d) - Acceptance: policy parsing ≥200 files/s; preview diff response <200 ms for 500-component SBOM; quieting logic audited. - Gate: `policy-schema@1` published; revision digests stored; preview API doc updated. ### Group SP9-G8 — DevOps Early Guardrails (ops/devops) ~0.4 w -- Tasks: DEVOPS-HELM-09-001 (3 d) +- Tasks: DEVOPS-HELM-09-001 (3 d) — **DONE (2025-10-19)** - Acceptance: helm/compose profiles for dev/stage/airgap lint + dry-run clean; manifests pinned to digest. -- Gate: profiles merged under `deploy/`; install guide cross-link. +- Gate: profiles merged under `deploy/`; install guide cross-link satisfied via `deploy/compose/` bundles and `docs/21_INSTALL_GUIDE.md`. ### Group SP9-G9 — Documentation & Events (docs/) ~0.4 w - Tasks: DOCS-ADR-09-001 (2 d), DOCS-EVENTS-09-002 (2 d) - Acceptance: ADR process broadcast; event schemas validated via CI. - Gate: `docs/adr/index.md` linking template; `docs/events/README.md` referencing schemas. +- Status: **DONE (2025-10-19)** – ADR contribution guide + template updates merged, Docs CI Ajv validation wired, events catalog documented, guild announcement recorded. --- diff --git a/deploy/README.md b/deploy/README.md new file mode 100644 index 00000000..79554bfd --- /dev/null +++ b/deploy/README.md @@ -0,0 +1,19 @@ +# Deployment Profiles + +This directory contains deterministic deployment bundles for the core Stella Ops stack. All manifests reference immutable image digests and map 1:1 to the release manifests stored under `deploy/releases/`. + +## Structure + +- `releases/` – canonical release manifests (edge, stable, airgap) used to source image digests. +- `compose/` – Docker Compose bundles for dev/stage/airgap targets plus `.env` seed files. +- `helm/stellaops/` – multi-profile Helm chart with values files for dev/stage/airgap. +- `tools/validate-profiles.sh` – helper that runs `docker compose config` and `helm lint/template` for every profile. + +## Workflow + +1. Update or add a release manifest under `releases/` with the new digests. +2. Mirror the digests into the Compose and Helm profiles that correspond to that channel. +3. Run `deploy/tools/validate-profiles.sh` (requires Docker CLI and Helm) to ensure the bundles lint and template cleanly. +4. Commit the change alongside any documentation updates (e.g. install guide cross-links). + +Maintaining the digest linkage keeps offline/air-gapped installs reproducible and avoids tag drift between environments. diff --git a/deploy/compose/README.md b/deploy/compose/README.md new file mode 100644 index 00000000..9a1a250d --- /dev/null +++ b/deploy/compose/README.md @@ -0,0 +1,30 @@ +# Stella Ops Compose Profiles + +These Compose bundles ship the minimum services required to exercise the scanner pipeline plus control-plane dependencies. Every profile is pinned to immutable image digests sourced from `deploy/releases/*.yaml` and is linted via `docker compose config` in CI. + +## Layout + +| Path | Purpose | +| ---- | ------- | +| `docker-compose.dev.yaml` | Edge/nightly stack tuned for laptops and iterative work. | +| `docker-compose.stage.yaml` | Stable channel stack mirroring pre-production clusters. | +| `docker-compose.airgap.yaml` | Stable stack with air-gapped defaults (no outbound hostnames). | +| `env/*.env.example` | Seed `.env` files that document required secrets and ports per profile. | + +## Usage + +```bash +cp env/dev.env.example dev.env +docker compose --env-file dev.env -f docker-compose.dev.yaml config +docker compose --env-file dev.env -f docker-compose.dev.yaml up -d +``` + +The stage and airgap variants behave the same way—swap the file names accordingly. All profiles expose 443/8443 for the UI and REST APIs, and they share a `stellaops` Docker network scoped to the compose project. + +### Updating to a new release + +1. Import the new manifest into `deploy/releases/` (see `deploy/README.md`). +2. Update image digests in the relevant Compose file(s). +3. Re-run `docker compose config` to confirm the bundle is deterministic. + +Keep digests synchronized between Compose, Helm, and the release manifest to preserve reproducibility guarantees. `deploy/tools/validate-profiles.sh` performs a quick audit. diff --git a/deploy/compose/docker-compose.airgap.yaml b/deploy/compose/docker-compose.airgap.yaml new file mode 100644 index 00000000..d73dfe2a --- /dev/null +++ b/deploy/compose/docker-compose.airgap.yaml @@ -0,0 +1,190 @@ +version: "3.9" + +x-release-labels: &release-labels + com.stellaops.release.version: "2025.09.2-airgap" + com.stellaops.release.channel: "airgap" + com.stellaops.profile: "airgap" + +networks: + stellaops: + driver: bridge + +volumes: + mongo-data: + minio-data: + concelier-jobs: + nats-data: + +services: + mongo: + image: docker.io/library/mongo@sha256:c258b26dbb7774f97f52aff52231ca5f228273a84329c5f5e451c3739457db49 + command: ["mongod", "--bind_ip_all"] + restart: unless-stopped + environment: + MONGO_INITDB_ROOT_USERNAME: "${MONGO_INITDB_ROOT_USERNAME}" + MONGO_INITDB_ROOT_PASSWORD: "${MONGO_INITDB_ROOT_PASSWORD}" + volumes: + - mongo-data:/data/db + networks: + - stellaops + labels: *release-labels + + minio: + image: docker.io/minio/minio@sha256:14cea493d9a34af32f524e538b8346cf79f3321eff8e708c1e2960462bd8936e + command: ["server", "/data", "--console-address", ":9001"] + restart: unless-stopped + environment: + MINIO_ROOT_USER: "${MINIO_ROOT_USER}" + MINIO_ROOT_PASSWORD: "${MINIO_ROOT_PASSWORD}" + volumes: + - minio-data:/data + ports: + - "${MINIO_CONSOLE_PORT:-29001}:9001" + networks: + - stellaops + labels: *release-labels + + nats: + image: docker.io/library/nats@sha256:c82559e4476289481a8a5196e675ebfe67eea81d95e5161e3e78eccfe766608e + command: + - "-js" + - "-sd" + - /data + restart: unless-stopped + ports: + - "${NATS_CLIENT_PORT:-24222}:4222" + volumes: + - nats-data:/data + networks: + - stellaops + labels: *release-labels + + authority: + image: registry.stella-ops.org/stellaops/authority@sha256:5551a3269b7008cd5aceecf45df018c67459ed519557ccbe48b093b926a39bcc + restart: unless-stopped + depends_on: + - mongo + environment: + STELLAOPS_AUTHORITY__ISSUER: "${AUTHORITY_ISSUER}" + STELLAOPS_AUTHORITY__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0: "/app/plugins" + STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY: "/app/etc/authority.plugins" + volumes: + - ../../etc/authority.yaml:/etc/authority.yaml:ro + - ../../etc/authority.plugins:/app/etc/authority.plugins:ro + ports: + - "${AUTHORITY_PORT:-8440}:8440" + networks: + - stellaops + labels: *release-labels + + signer: + image: registry.stella-ops.org/stellaops/signer@sha256:ddbbd664a42846cea6b40fca6465bc679b30f72851158f300d01a8571c5478fc + restart: unless-stopped + depends_on: + - authority + environment: + SIGNER__AUTHORITY__BASEURL: "https://authority:8440" + SIGNER__POE__INTROSPECTURL: "${SIGNER_POE_INTROSPECT_URL}" + SIGNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + ports: + - "${SIGNER_PORT:-8441}:8441" + networks: + - stellaops + labels: *release-labels + + attestor: + image: registry.stella-ops.org/stellaops/attestor@sha256:1ff0a3124d66d3a2702d8e421df40fbd98cc75cb605d95510598ebbae1433c50 + restart: unless-stopped + depends_on: + - signer + environment: + ATTESTOR__SIGNER__BASEURL: "https://signer:8441" + ATTESTOR__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + ports: + - "${ATTESTOR_PORT:-8442}:8442" + networks: + - stellaops + labels: *release-labels + + concelier: + image: registry.stella-ops.org/stellaops/concelier@sha256:29e2e1a0972707e092cbd3d370701341f9fec2aa9316fb5d8100480f2a1c76b5 + restart: unless-stopped + depends_on: + - mongo + - minio + environment: + CONCELIER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + CONCELIER__STORAGE__S3__ENDPOINT: "http://minio:9000" + CONCELIER__STORAGE__S3__ACCESSKEYID: "${MINIO_ROOT_USER}" + CONCELIER__STORAGE__S3__SECRETACCESSKEY: "${MINIO_ROOT_PASSWORD}" + CONCELIER__AUTHORITY__BASEURL: "https://authority:8440" + CONCELIER__AUTHORITY__RESILIENCE__ALLOWOFFLINECACHEFALLBACK: "true" + CONCELIER__AUTHORITY__RESILIENCE__OFFLINECACHETOLERANCE: "${AUTHORITY_OFFLINE_CACHE_TOLERANCE:-00:30:00}" + volumes: + - concelier-jobs:/var/lib/concelier/jobs + ports: + - "${CONCELIER_PORT:-8445}:8445" + networks: + - stellaops + labels: *release-labels + + scanner-web: + image: registry.stella-ops.org/stellaops/scanner-web@sha256:3df8ca21878126758203c1a0444e39fd97f77ddacf04a69685cda9f1e5e94718 + restart: unless-stopped + depends_on: + - concelier + - minio + - nats + environment: + SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + SCANNER__STORAGE__S3__ENDPOINT: "http://minio:9000" + SCANNER__STORAGE__S3__ACCESSKEYID: "${MINIO_ROOT_USER}" + SCANNER__STORAGE__S3__SECRETACCESSKEY: "${MINIO_ROOT_PASSWORD}" + SCANNER__QUEUE__BROKER: "${SCANNER_QUEUE_BROKER}" + ports: + - "${SCANNER_WEB_PORT:-8444}:8444" + networks: + - stellaops + labels: *release-labels + + scanner-worker: + image: registry.stella-ops.org/stellaops/scanner-worker@sha256:eea5d6cfe7835950c5ec7a735a651f2f0d727d3e470cf9027a4a402ea89c4fb5 + restart: unless-stopped + depends_on: + - scanner-web + - nats + environment: + SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + SCANNER__STORAGE__S3__ENDPOINT: "http://minio:9000" + SCANNER__STORAGE__S3__ACCESSKEYID: "${MINIO_ROOT_USER}" + SCANNER__STORAGE__S3__SECRETACCESSKEY: "${MINIO_ROOT_PASSWORD}" + SCANNER__QUEUE__BROKER: "${SCANNER_QUEUE_BROKER}" + networks: + - stellaops + labels: *release-labels + + excititor: + image: registry.stella-ops.org/stellaops/excititor@sha256:65c0ee13f773efe920d7181512349a09d363ab3f3e177d276136bd2742325a68 + restart: unless-stopped + depends_on: + - concelier + environment: + EXCITITOR__CONCELIER__BASEURL: "https://concelier:8445" + EXCITITOR__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + networks: + - stellaops + labels: *release-labels + + web-ui: + image: registry.stella-ops.org/stellaops/web-ui@sha256:bee9668011ff414572131dc777faab4da24473fe12c230893f161cabee092a1d + restart: unless-stopped + depends_on: + - scanner-web + environment: + STELLAOPS_UI__BACKEND__BASEURL: "https://scanner-web:8444" + ports: + - "${UI_PORT:-9443}:8443" + networks: + - stellaops + labels: *release-labels diff --git a/deploy/compose/docker-compose.dev.yaml b/deploy/compose/docker-compose.dev.yaml new file mode 100644 index 00000000..e4c428de --- /dev/null +++ b/deploy/compose/docker-compose.dev.yaml @@ -0,0 +1,188 @@ +version: "3.9" + +x-release-labels: &release-labels + com.stellaops.release.version: "2025.10.0-edge" + com.stellaops.release.channel: "edge" + com.stellaops.profile: "dev" + +networks: + stellaops: + driver: bridge + +volumes: + mongo-data: + minio-data: + concelier-jobs: + nats-data: + +services: + mongo: + image: docker.io/library/mongo@sha256:c258b26dbb7774f97f52aff52231ca5f228273a84329c5f5e451c3739457db49 + command: ["mongod", "--bind_ip_all"] + restart: unless-stopped + environment: + MONGO_INITDB_ROOT_USERNAME: "${MONGO_INITDB_ROOT_USERNAME}" + MONGO_INITDB_ROOT_PASSWORD: "${MONGO_INITDB_ROOT_PASSWORD}" + volumes: + - mongo-data:/data/db + networks: + - stellaops + labels: *release-labels + + minio: + image: docker.io/minio/minio@sha256:14cea493d9a34af32f524e538b8346cf79f3321eff8e708c1e2960462bd8936e + command: ["server", "/data", "--console-address", ":9001"] + restart: unless-stopped + environment: + MINIO_ROOT_USER: "${MINIO_ROOT_USER}" + MINIO_ROOT_PASSWORD: "${MINIO_ROOT_PASSWORD}" + volumes: + - minio-data:/data + ports: + - "${MINIO_CONSOLE_PORT:-9001}:9001" + networks: + - stellaops + labels: *release-labels + + nats: + image: docker.io/library/nats@sha256:c82559e4476289481a8a5196e675ebfe67eea81d95e5161e3e78eccfe766608e + command: + - "-js" + - "-sd" + - /data + restart: unless-stopped + ports: + - "${NATS_CLIENT_PORT:-4222}:4222" + volumes: + - nats-data:/data + networks: + - stellaops + labels: *release-labels + + authority: + image: registry.stella-ops.org/stellaops/authority@sha256:a8e8faec44a579aa5714e58be835f25575710430b1ad2ccd1282a018cd9ffcdd + restart: unless-stopped + depends_on: + - mongo + environment: + STELLAOPS_AUTHORITY__ISSUER: "${AUTHORITY_ISSUER}" + STELLAOPS_AUTHORITY__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0: "/app/plugins" + STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY: "/app/etc/authority.plugins" + volumes: + - ../../etc/authority.yaml:/etc/authority.yaml:ro + - ../../etc/authority.plugins:/app/etc/authority.plugins:ro + ports: + - "${AUTHORITY_PORT:-8440}:8440" + networks: + - stellaops + labels: *release-labels + + signer: + image: registry.stella-ops.org/stellaops/signer@sha256:8bfef9a75783883d49fc18e3566553934e970b00ee090abee9cb110d2d5c3298 + restart: unless-stopped + depends_on: + - authority + environment: + SIGNER__AUTHORITY__BASEURL: "https://authority:8440" + SIGNER__POE__INTROSPECTURL: "${SIGNER_POE_INTROSPECT_URL}" + SIGNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + ports: + - "${SIGNER_PORT:-8441}:8441" + networks: + - stellaops + labels: *release-labels + + attestor: + image: registry.stella-ops.org/stellaops/attestor@sha256:5cc417948c029da01dccf36e4645d961a3f6d8de7e62fe98d845f07cd2282114 + restart: unless-stopped + depends_on: + - signer + environment: + ATTESTOR__SIGNER__BASEURL: "https://signer:8441" + ATTESTOR__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + ports: + - "${ATTESTOR_PORT:-8442}:8442" + networks: + - stellaops + labels: *release-labels + + concelier: + image: registry.stella-ops.org/stellaops/concelier@sha256:dafef3954eb4b837e2c424dd2d23e1e4d60fa83794840fac9cd3dea1d43bd085 + restart: unless-stopped + depends_on: + - mongo + - minio + environment: + CONCELIER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + CONCELIER__STORAGE__S3__ENDPOINT: "http://minio:9000" + CONCELIER__STORAGE__S3__ACCESSKEYID: "${MINIO_ROOT_USER}" + CONCELIER__STORAGE__S3__SECRETACCESSKEY: "${MINIO_ROOT_PASSWORD}" + CONCELIER__AUTHORITY__BASEURL: "https://authority:8440" + volumes: + - concelier-jobs:/var/lib/concelier/jobs + ports: + - "${CONCELIER_PORT:-8445}:8445" + networks: + - stellaops + labels: *release-labels + + scanner-web: + image: registry.stella-ops.org/stellaops/scanner-web@sha256:e0dfdb087e330585a5953029fb4757f5abdf7610820a085bd61b457dbead9a11 + restart: unless-stopped + depends_on: + - concelier + - minio + - nats + environment: + SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + SCANNER__STORAGE__S3__ENDPOINT: "http://minio:9000" + SCANNER__STORAGE__S3__ACCESSKEYID: "${MINIO_ROOT_USER}" + SCANNER__STORAGE__S3__SECRETACCESSKEY: "${MINIO_ROOT_PASSWORD}" + SCANNER__QUEUE__BROKER: "${SCANNER_QUEUE_BROKER}" + ports: + - "${SCANNER_WEB_PORT:-8444}:8444" + networks: + - stellaops + labels: *release-labels + + scanner-worker: + image: registry.stella-ops.org/stellaops/scanner-worker@sha256:92dda42f6f64b2d9522104a5c9ffb61d37b34dd193132b68457a259748008f37 + restart: unless-stopped + depends_on: + - scanner-web + - nats + environment: + SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + SCANNER__STORAGE__S3__ENDPOINT: "http://minio:9000" + SCANNER__STORAGE__S3__ACCESSKEYID: "${MINIO_ROOT_USER}" + SCANNER__STORAGE__S3__SECRETACCESSKEY: "${MINIO_ROOT_PASSWORD}" + SCANNER__QUEUE__BROKER: "${SCANNER_QUEUE_BROKER}" + networks: + - stellaops + labels: *release-labels + + excititor: + image: registry.stella-ops.org/stellaops/excititor@sha256:d9bd5cadf1eab427447ce3df7302c30ded837239771cc6433b9befb895054285 + restart: unless-stopped + depends_on: + - concelier + environment: + EXCITITOR__CONCELIER__BASEURL: "https://concelier:8445" + EXCITITOR__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + networks: + - stellaops + labels: *release-labels + + web-ui: + image: registry.stella-ops.org/stellaops/web-ui@sha256:38b225fa7767a5b94ebae4dae8696044126aac429415e93de514d5dd95748dcf + restart: unless-stopped + depends_on: + - scanner-web + environment: + STELLAOPS_UI__BACKEND__BASEURL: "https://scanner-web:8444" + ports: + - "${UI_PORT:-8443}:8443" + networks: + - stellaops + labels: *release-labels diff --git a/deploy/compose/docker-compose.stage.yaml b/deploy/compose/docker-compose.stage.yaml new file mode 100644 index 00000000..a1504a82 --- /dev/null +++ b/deploy/compose/docker-compose.stage.yaml @@ -0,0 +1,188 @@ +version: "3.9" + +x-release-labels: &release-labels + com.stellaops.release.version: "2025.09.2" + com.stellaops.release.channel: "stable" + com.stellaops.profile: "stage" + +networks: + stellaops: + driver: bridge + +volumes: + mongo-data: + minio-data: + concelier-jobs: + nats-data: + +services: + mongo: + image: docker.io/library/mongo@sha256:c258b26dbb7774f97f52aff52231ca5f228273a84329c5f5e451c3739457db49 + command: ["mongod", "--bind_ip_all"] + restart: unless-stopped + environment: + MONGO_INITDB_ROOT_USERNAME: "${MONGO_INITDB_ROOT_USERNAME}" + MONGO_INITDB_ROOT_PASSWORD: "${MONGO_INITDB_ROOT_PASSWORD}" + volumes: + - mongo-data:/data/db + networks: + - stellaops + labels: *release-labels + + minio: + image: docker.io/minio/minio@sha256:14cea493d9a34af32f524e538b8346cf79f3321eff8e708c1e2960462bd8936e + command: ["server", "/data", "--console-address", ":9001"] + restart: unless-stopped + environment: + MINIO_ROOT_USER: "${MINIO_ROOT_USER}" + MINIO_ROOT_PASSWORD: "${MINIO_ROOT_PASSWORD}" + volumes: + - minio-data:/data + ports: + - "${MINIO_CONSOLE_PORT:-9001}:9001" + networks: + - stellaops + labels: *release-labels + + nats: + image: docker.io/library/nats@sha256:c82559e4476289481a8a5196e675ebfe67eea81d95e5161e3e78eccfe766608e + command: + - "-js" + - "-sd" + - /data + restart: unless-stopped + ports: + - "${NATS_CLIENT_PORT:-4222}:4222" + volumes: + - nats-data:/data + networks: + - stellaops + labels: *release-labels + + authority: + image: registry.stella-ops.org/stellaops/authority@sha256:b0348bad1d0b401cc3c71cb40ba034c8043b6c8874546f90d4783c9dbfcc0bf5 + restart: unless-stopped + depends_on: + - mongo + environment: + STELLAOPS_AUTHORITY__ISSUER: "${AUTHORITY_ISSUER}" + STELLAOPS_AUTHORITY__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0: "/app/plugins" + STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY: "/app/etc/authority.plugins" + volumes: + - ../../etc/authority.yaml:/etc/authority.yaml:ro + - ../../etc/authority.plugins:/app/etc/authority.plugins:ro + ports: + - "${AUTHORITY_PORT:-8440}:8440" + networks: + - stellaops + labels: *release-labels + + signer: + image: registry.stella-ops.org/stellaops/signer@sha256:8ad574e61f3a9e9bda8a58eb2700ae46813284e35a150b1137bc7c2b92ac0f2e + restart: unless-stopped + depends_on: + - authority + environment: + SIGNER__AUTHORITY__BASEURL: "https://authority:8440" + SIGNER__POE__INTROSPECTURL: "${SIGNER_POE_INTROSPECT_URL}" + SIGNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + ports: + - "${SIGNER_PORT:-8441}:8441" + networks: + - stellaops + labels: *release-labels + + attestor: + image: registry.stella-ops.org/stellaops/attestor@sha256:0534985f978b0b5d220d73c96fddd962cd9135f616811cbe3bff4666c5af568f + restart: unless-stopped + depends_on: + - signer + environment: + ATTESTOR__SIGNER__BASEURL: "https://signer:8441" + ATTESTOR__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + ports: + - "${ATTESTOR_PORT:-8442}:8442" + networks: + - stellaops + labels: *release-labels + + concelier: + image: registry.stella-ops.org/stellaops/concelier@sha256:c58cdcaee1d266d68d498e41110a589dd204b487d37381096bd61ab345a867c5 + restart: unless-stopped + depends_on: + - mongo + - minio + environment: + CONCELIER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + CONCELIER__STORAGE__S3__ENDPOINT: "http://minio:9000" + CONCELIER__STORAGE__S3__ACCESSKEYID: "${MINIO_ROOT_USER}" + CONCELIER__STORAGE__S3__SECRETACCESSKEY: "${MINIO_ROOT_PASSWORD}" + CONCELIER__AUTHORITY__BASEURL: "https://authority:8440" + volumes: + - concelier-jobs:/var/lib/concelier/jobs + ports: + - "${CONCELIER_PORT:-8445}:8445" + networks: + - stellaops + labels: *release-labels + + scanner-web: + image: registry.stella-ops.org/stellaops/scanner-web@sha256:14b23448c3f9586a9156370b3e8c1991b61907efa666ca37dd3aaed1e79fe3b7 + restart: unless-stopped + depends_on: + - concelier + - minio + - nats + environment: + SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + SCANNER__STORAGE__S3__ENDPOINT: "http://minio:9000" + SCANNER__STORAGE__S3__ACCESSKEYID: "${MINIO_ROOT_USER}" + SCANNER__STORAGE__S3__SECRETACCESSKEY: "${MINIO_ROOT_PASSWORD}" + SCANNER__QUEUE__BROKER: "${SCANNER_QUEUE_BROKER}" + ports: + - "${SCANNER_WEB_PORT:-8444}:8444" + networks: + - stellaops + labels: *release-labels + + scanner-worker: + image: registry.stella-ops.org/stellaops/scanner-worker@sha256:32e25e76386eb9ea8bee0a1ad546775db9a2df989fab61ac877e351881960dab + restart: unless-stopped + depends_on: + - scanner-web + - nats + environment: + SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + SCANNER__STORAGE__S3__ENDPOINT: "http://minio:9000" + SCANNER__STORAGE__S3__ACCESSKEYID: "${MINIO_ROOT_USER}" + SCANNER__STORAGE__S3__SECRETACCESSKEY: "${MINIO_ROOT_PASSWORD}" + SCANNER__QUEUE__BROKER: "${SCANNER_QUEUE_BROKER}" + networks: + - stellaops + labels: *release-labels + + excititor: + image: registry.stella-ops.org/stellaops/excititor@sha256:59022e2016aebcef5c856d163ae705755d3f81949d41195256e935ef40a627fa + restart: unless-stopped + depends_on: + - concelier + environment: + EXCITITOR__CONCELIER__BASEURL: "https://concelier:8445" + EXCITITOR__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + networks: + - stellaops + labels: *release-labels + + web-ui: + image: registry.stella-ops.org/stellaops/web-ui@sha256:10d924808c48e4353e3a241da62eb7aefe727a1d6dc830eb23a8e181013b3a23 + restart: unless-stopped + depends_on: + - scanner-web + environment: + STELLAOPS_UI__BACKEND__BASEURL: "https://scanner-web:8444" + ports: + - "${UI_PORT:-8443}:8443" + networks: + - stellaops + labels: *release-labels diff --git a/deploy/compose/env/airgap.env.example b/deploy/compose/env/airgap.env.example new file mode 100644 index 00000000..f0f22cfa --- /dev/null +++ b/deploy/compose/env/airgap.env.example @@ -0,0 +1,17 @@ +# Substitutions for docker-compose.airgap.yaml +MONGO_INITDB_ROOT_USERNAME=stellaops +MONGO_INITDB_ROOT_PASSWORD=airgap-password +MINIO_ROOT_USER=stellaops-offline +MINIO_ROOT_PASSWORD=airgap-minio-secret +MINIO_CONSOLE_PORT=29001 +AUTHORITY_ISSUER=https://authority.airgap.local +AUTHORITY_PORT=8440 +SIGNER_POE_INTROSPECT_URL=file:///offline/poe/introspect.json +SIGNER_PORT=8441 +ATTESTOR_PORT=8442 +CONCELIER_PORT=8445 +SCANNER_WEB_PORT=8444 +UI_PORT=9443 +NATS_CLIENT_PORT=24222 +SCANNER_QUEUE_BROKER=nats://nats:4222 +AUTHORITY_OFFLINE_CACHE_TOLERANCE=00:45:00 diff --git a/deploy/compose/env/dev.env.example b/deploy/compose/env/dev.env.example new file mode 100644 index 00000000..fc2ad525 --- /dev/null +++ b/deploy/compose/env/dev.env.example @@ -0,0 +1,16 @@ +# Substitutions for docker-compose.dev.yaml +MONGO_INITDB_ROOT_USERNAME=stellaops +MONGO_INITDB_ROOT_PASSWORD=dev-password +MINIO_ROOT_USER=stellaops +MINIO_ROOT_PASSWORD=dev-minio-secret +MINIO_CONSOLE_PORT=9001 +AUTHORITY_ISSUER=https://authority.localtest.me +AUTHORITY_PORT=8440 +SIGNER_POE_INTROSPECT_URL=https://licensing.svc.local/introspect +SIGNER_PORT=8441 +ATTESTOR_PORT=8442 +CONCELIER_PORT=8445 +SCANNER_WEB_PORT=8444 +UI_PORT=8443 +NATS_CLIENT_PORT=4222 +SCANNER_QUEUE_BROKER=nats://nats:4222 diff --git a/deploy/compose/env/stage.env.example b/deploy/compose/env/stage.env.example new file mode 100644 index 00000000..c2e92809 --- /dev/null +++ b/deploy/compose/env/stage.env.example @@ -0,0 +1,16 @@ +# Substitutions for docker-compose.stage.yaml +MONGO_INITDB_ROOT_USERNAME=stellaops +MONGO_INITDB_ROOT_PASSWORD=stage-password +MINIO_ROOT_USER=stellaops-stage +MINIO_ROOT_PASSWORD=stage-minio-secret +MINIO_CONSOLE_PORT=19001 +AUTHORITY_ISSUER=https://authority.stage.stella-ops.internal +AUTHORITY_PORT=8440 +SIGNER_POE_INTROSPECT_URL=https://licensing.stage.stella-ops.internal/introspect +SIGNER_PORT=8441 +ATTESTOR_PORT=8442 +CONCELIER_PORT=8445 +SCANNER_WEB_PORT=8444 +UI_PORT=8443 +NATS_CLIENT_PORT=4222 +SCANNER_QUEUE_BROKER=nats://nats:4222 diff --git a/deploy/helm/stellaops/Chart.yaml b/deploy/helm/stellaops/Chart.yaml new file mode 100644 index 00000000..f5b57d42 --- /dev/null +++ b/deploy/helm/stellaops/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: stellaops +description: Stella Ops core stack (authority, signing, scanner, UI) with infrastructure primitives. +type: application +version: 0.1.0 +appVersion: "2025.10.0" diff --git a/deploy/helm/stellaops/templates/_helpers.tpl b/deploy/helm/stellaops/templates/_helpers.tpl new file mode 100644 index 00000000..a9a3dd88 --- /dev/null +++ b/deploy/helm/stellaops/templates/_helpers.tpl @@ -0,0 +1,31 @@ +{{- define "stellaops.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "stellaops.fullname" -}} +{{- $name := default .root.Chart.Name .root.Values.fullnameOverride -}} +{{- printf "%s-%s" $name .name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "stellaops.selectorLabels" -}} +app.kubernetes.io/name: {{ include "stellaops.name" .root | quote }} +app.kubernetes.io/instance: {{ .root.Release.Name | quote }} +app.kubernetes.io/component: {{ .name | quote }} +{{- if .svc.class }} +app.kubernetes.io/part-of: {{ printf "stellaops-%s" .svc.class | quote }} +{{- else }} +app.kubernetes.io/part-of: "stellaops-core" +{{- end }} +{{- end -}} + +{{- define "stellaops.labels" -}} +{{ include "stellaops.selectorLabels" . }} +helm.sh/chart: {{ printf "%s-%s" .root.Chart.Name .root.Chart.Version | quote }} +app.kubernetes.io/version: {{ .root.Values.global.release.version | quote }} +app.kubernetes.io/managed-by: {{ .root.Release.Service | quote }} +stellaops.release/channel: {{ .root.Values.global.release.channel | quote }} +stellaops.profile: {{ .root.Values.global.profile | quote }} +{{- range $k, $v := .root.Values.global.labels }} +{{ $k }}: {{ $v | quote }} +{{- end }} +{{- end -}} diff --git a/deploy/helm/stellaops/templates/configmap-release.yaml b/deploy/helm/stellaops/templates/configmap-release.yaml new file mode 100644 index 00000000..e788ba99 --- /dev/null +++ b/deploy/helm/stellaops/templates/configmap-release.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "stellaops.fullname" (dict "root" . "name" "release") }} + labels: + {{- include "stellaops.labels" (dict "root" . "name" "release" "svc" (dict "class" "meta")) | nindent 4 }} +data: + version: {{ .Values.global.release.version | quote }} + channel: {{ .Values.global.release.channel | quote }} + manifestSha256: {{ default "" .Values.global.release.manifestSha256 | quote }} diff --git a/deploy/helm/stellaops/templates/core.yaml b/deploy/helm/stellaops/templates/core.yaml new file mode 100644 index 00000000..4e155024 --- /dev/null +++ b/deploy/helm/stellaops/templates/core.yaml @@ -0,0 +1,125 @@ +{{- $root := . -}} +{{- range $name, $svc := .Values.services }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "stellaops.fullname" (dict "root" $root "name" $name) }} + labels: + {{- include "stellaops.labels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }} +spec: + replicas: {{ default 1 $svc.replicas }} + selector: + matchLabels: + {{- include "stellaops.selectorLabels" (dict "root" $root "name" $name "svc" $svc) | nindent 6 }} + template: + metadata: + labels: + {{- include "stellaops.selectorLabels" (dict "root" $root "name" $name "svc" $svc) | nindent 8 }} + annotations: + stellaops.release/version: {{ $root.Values.global.release.version | quote }} + stellaops.release/channel: {{ $root.Values.global.release.channel | quote }} + spec: + containers: + - name: {{ $name }} + image: {{ $svc.image | quote }} + imagePullPolicy: {{ default $root.Values.global.image.pullPolicy $svc.imagePullPolicy }} +{{- if $svc.command }} + command: +{{- range $cmd := $svc.command }} + - {{ $cmd | quote }} +{{- end }} +{{- end }} +{{- if $svc.args }} + args: +{{- range $arg := $svc.args }} + - {{ $arg | quote }} +{{- end }} +{{- end }} +{{- if $svc.env }} + env: +{{- range $envName, $envValue := $svc.env }} + - name: {{ $envName }} + value: {{ $envValue | quote }} +{{- end }} +{{- end }} +{{- if $svc.envFrom }} + envFrom: +{{ toYaml $svc.envFrom | nindent 12 }} +{{- end }} +{{- if $svc.ports }} + ports: +{{- range $port := $svc.ports }} + - name: {{ default (printf "%s-%v" $name $port.containerPort) $port.name | trunc 63 | trimSuffix "-" }} + containerPort: {{ $port.containerPort }} + protocol: {{ default "TCP" $port.protocol }} +{{- end }} +{{- else if $svc.service.port }} + ports: + - name: {{ printf "%s-http" $name | trunc 63 | trimSuffix "-" }} + containerPort: {{ $svc.service.targetPort | default $svc.service.port }} + protocol: TCP +{{- end }} +{{- if $svc.resources }} + resources: +{{ toYaml $svc.resources | nindent 12 }} +{{- end }} +{{- if $svc.livenessProbe }} + livenessProbe: +{{ toYaml $svc.livenessProbe | nindent 12 }} +{{- end }} +{{- if $svc.readinessProbe }} + readinessProbe: +{{ toYaml $svc.readinessProbe | nindent 12 }} +{{- end }} +{{- if $svc.volumeMounts }} + volumeMounts: +{{ toYaml $svc.volumeMounts | nindent 12 }} +{{- end }} + {{- if or $svc.volumes $svc.volumeClaims }} + volumes: +{{- if $svc.volumes }} +{{ toYaml $svc.volumes | nindent 8 }} +{{- end }} +{{- if $svc.volumeClaims }} +{{- range $claim := $svc.volumeClaims }} + - name: {{ $claim.name }} + persistentVolumeClaim: + claimName: {{ $claim.claimName }} +{{- end }} +{{- end }} + {{- end }} + {{- if $svc.serviceAccount }} + serviceAccountName: {{ $svc.serviceAccount | quote }} + {{- end }} + {{- if $svc.nodeSelector }} + nodeSelector: +{{ toYaml $svc.nodeSelector | nindent 8 }} + {{- end }} + {{- if $svc.affinity }} + affinity: +{{ toYaml $svc.affinity | nindent 8 }} + {{- end }} + {{- if $svc.tolerations }} + tolerations: +{{ toYaml $svc.tolerations | nindent 8 }} + {{- end }} +--- +{{- if $svc.service }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "stellaops.fullname" (dict "root" $root "name" $name) }} + labels: + {{- include "stellaops.labels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }} +spec: + type: {{ default "ClusterIP" $svc.service.type }} + selector: + {{- include "stellaops.selectorLabels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }} + ports: + - name: {{ default "http" $svc.service.portName }} + port: {{ $svc.service.port }} + targetPort: {{ $svc.service.targetPort | default $svc.service.port }} + protocol: {{ default "TCP" $svc.service.protocol }} +--- +{{- end }} +{{- end }} diff --git a/deploy/helm/stellaops/values-airgap.yaml b/deploy/helm/stellaops/values-airgap.yaml new file mode 100644 index 00000000..878ed21c --- /dev/null +++ b/deploy/helm/stellaops/values-airgap.yaml @@ -0,0 +1,133 @@ +global: + profile: airgap + release: + version: "2025.09.2-airgap" + channel: airgap + manifestSha256: "b787b833dddd73960c31338279daa0b0a0dce2ef32bd32ef1aaf953d66135f94" + image: + pullPolicy: IfNotPresent + labels: + stellaops.io/channel: airgap +services: + authority: + image: registry.stella-ops.org/stellaops/authority@sha256:5551a3269b7008cd5aceecf45df018c67459ed519557ccbe48b093b926a39bcc + service: + port: 8440 + env: + STELLAOPS_AUTHORITY__ISSUER: "https://stellaops-authority:8440" + STELLAOPS_AUTHORITY__MONGO__CONNECTIONSTRING: "mongodb://stellaops-airgap:stellaops-airgap@stellaops-mongo:27017" + STELLAOPS_AUTHORITY__ALLOWANONYMOUSFALLBACK: "false" + signer: + image: registry.stella-ops.org/stellaops/signer@sha256:ddbbd664a42846cea6b40fca6465bc679b30f72851158f300d01a8571c5478fc + service: + port: 8441 + env: + SIGNER__AUTHORITY__BASEURL: "https://stellaops-authority:8440" + SIGNER__POE__INTROSPECTURL: "file:///offline/poe/introspect.json" + SIGNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://stellaops-airgap:stellaops-airgap@stellaops-mongo:27017" + attestor: + image: registry.stella-ops.org/stellaops/attestor@sha256:1ff0a3124d66d3a2702d8e421df40fbd98cc75cb605d95510598ebbae1433c50 + service: + port: 8442 + env: + ATTESTOR__SIGNER__BASEURL: "https://stellaops-signer:8441" + ATTESTOR__MONGO__CONNECTIONSTRING: "mongodb://stellaops-airgap:stellaops-airgap@stellaops-mongo:27017" + concelier: + image: registry.stella-ops.org/stellaops/concelier@sha256:29e2e1a0972707e092cbd3d370701341f9fec2aa9316fb5d8100480f2a1c76b5 + service: + port: 8445 + env: + CONCELIER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://stellaops-airgap:stellaops-airgap@stellaops-mongo:27017" + CONCELIER__STORAGE__S3__ENDPOINT: "http://stellaops-minio:9000" + CONCELIER__STORAGE__S3__ACCESSKEYID: "stellaops-airgap" + CONCELIER__STORAGE__S3__SECRETACCESSKEY: "airgap-minio-secret" + CONCELIER__AUTHORITY__BASEURL: "https://stellaops-authority:8440" + CONCELIER__AUTHORITY__RESILIENCE__ALLOWOFFLINECACHEFALLBACK: "true" + CONCELIER__AUTHORITY__RESILIENCE__OFFLINECACHETOLERANCE: "00:45:00" + volumeMounts: + - name: concelier-jobs + mountPath: /var/lib/concelier/jobs + volumeClaims: + - name: concelier-jobs + claimName: stellaops-concelier-jobs + scanner-web: + image: registry.stella-ops.org/stellaops/scanner-web@sha256:3df8ca21878126758203c1a0444e39fd97f77ddacf04a69685cda9f1e5e94718 + service: + port: 8444 + env: + SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://stellaops-airgap:stellaops-airgap@stellaops-mongo:27017" + SCANNER__STORAGE__S3__ENDPOINT: "http://stellaops-minio:9000" + SCANNER__STORAGE__S3__ACCESSKEYID: "stellaops-airgap" + SCANNER__STORAGE__S3__SECRETACCESSKEY: "airgap-minio-secret" + SCANNER__QUEUE__BROKER: "nats://stellaops-nats:4222" + scanner-worker: + image: registry.stella-ops.org/stellaops/scanner-worker@sha256:eea5d6cfe7835950c5ec7a735a651f2f0d727d3e470cf9027a4a402ea89c4fb5 + env: + SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://stellaops-airgap:stellaops-airgap@stellaops-mongo:27017" + SCANNER__STORAGE__S3__ENDPOINT: "http://stellaops-minio:9000" + SCANNER__STORAGE__S3__ACCESSKEYID: "stellaops-airgap" + SCANNER__STORAGE__S3__SECRETACCESSKEY: "airgap-minio-secret" + SCANNER__QUEUE__BROKER: "nats://stellaops-nats:4222" + excititor: + image: registry.stella-ops.org/stellaops/excititor@sha256:65c0ee13f773efe920d7181512349a09d363ab3f3e177d276136bd2742325a68 + env: + EXCITITOR__CONCELIER__BASEURL: "https://stellaops-concelier:8445" + EXCITITOR__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://stellaops-airgap:stellaops-airgap@stellaops-mongo:27017" + web-ui: + image: registry.stella-ops.org/stellaops/web-ui@sha256:bee9668011ff414572131dc777faab4da24473fe12c230893f161cabee092a1d + service: + port: 9443 + targetPort: 8443 + env: + STELLAOPS_UI__BACKEND__BASEURL: "https://stellaops-scanner-web:8444" + mongo: + class: infrastructure + image: docker.io/library/mongo@sha256:c258b26dbb7774f97f52aff52231ca5f228273a84329c5f5e451c3739457db49 + service: + port: 27017 + command: + - mongod + - --bind_ip_all + env: + MONGO_INITDB_ROOT_USERNAME: stellaops-airgap + MONGO_INITDB_ROOT_PASSWORD: stellaops-airgap + volumeMounts: + - name: mongo-data + mountPath: /data/db + volumeClaims: + - name: mongo-data + claimName: stellaops-mongo-data + minio: + class: infrastructure + image: docker.io/minio/minio@sha256:14cea493d9a34af32f524e538b8346cf79f3321eff8e708c1e2960462bd8936e + service: + port: 9000 + command: + - server + - /data + - --console-address + - :9001 + env: + MINIO_ROOT_USER: stellaops-airgap + MINIO_ROOT_PASSWORD: airgap-minio-secret + volumeMounts: + - name: minio-data + mountPath: /data + volumeClaims: + - name: minio-data + claimName: stellaops-minio-data + nats: + class: infrastructure + image: docker.io/library/nats@sha256:c82559e4476289481a8a5196e675ebfe67eea81d95e5161e3e78eccfe766608e + service: + port: 4222 + command: + - -js + - -sd + - /data + volumeMounts: + - name: nats-data + mountPath: /data + volumeClaims: + - name: nats-data + claimName: stellaops-nats-data diff --git a/deploy/helm/stellaops/values-dev.yaml b/deploy/helm/stellaops/values-dev.yaml new file mode 100644 index 00000000..b567dadf --- /dev/null +++ b/deploy/helm/stellaops/values-dev.yaml @@ -0,0 +1,131 @@ +global: + profile: dev + release: + version: "2025.10.0-edge" + channel: edge + manifestSha256: "822f82987529ea38d2321dbdd2ef6874a4062a117116a20861c26a8df1807beb" + image: + pullPolicy: IfNotPresent + labels: + stellaops.io/channel: edge +services: + authority: + image: registry.stella-ops.org/stellaops/authority@sha256:a8e8faec44a579aa5714e58be835f25575710430b1ad2ccd1282a018cd9ffcdd + service: + port: 8440 + env: + STELLAOPS_AUTHORITY__ISSUER: "https://stellaops-authority:8440" + STELLAOPS_AUTHORITY__MONGO__CONNECTIONSTRING: "mongodb://stellaops:stellaops@stellaops-mongo:27017" + STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0: "/app/plugins" + STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY: "/app/etc/authority.plugins" + signer: + image: registry.stella-ops.org/stellaops/signer@sha256:8bfef9a75783883d49fc18e3566553934e970b00ee090abee9cb110d2d5c3298 + service: + port: 8441 + env: + SIGNER__AUTHORITY__BASEURL: "https://stellaops-authority:8440" + SIGNER__POE__INTROSPECTURL: "https://licensing.svc.local/introspect" + SIGNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://stellaops:stellaops@stellaops-mongo:27017" + attestor: + image: registry.stella-ops.org/stellaops/attestor@sha256:5cc417948c029da01dccf36e4645d961a3f6d8de7e62fe98d845f07cd2282114 + service: + port: 8442 + env: + ATTESTOR__SIGNER__BASEURL: "https://stellaops-signer:8441" + ATTESTOR__MONGO__CONNECTIONSTRING: "mongodb://stellaops:stellaops@stellaops-mongo:27017" + concelier: + image: registry.stella-ops.org/stellaops/concelier@sha256:dafef3954eb4b837e2c424dd2d23e1e4d60fa83794840fac9cd3dea1d43bd085 + service: + port: 8445 + env: + CONCELIER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://stellaops:stellaops@stellaops-mongo:27017" + CONCELIER__STORAGE__S3__ENDPOINT: "http://stellaops-minio:9000" + CONCELIER__STORAGE__S3__ACCESSKEYID: "stellaops" + CONCELIER__STORAGE__S3__SECRETACCESSKEY: "dev-minio-secret" + CONCELIER__AUTHORITY__BASEURL: "https://stellaops-authority:8440" + volumeMounts: + - name: concelier-jobs + mountPath: /var/lib/concelier/jobs + volumes: + - name: concelier-jobs + emptyDir: {} + scanner-web: + image: registry.stella-ops.org/stellaops/scanner-web@sha256:e0dfdb087e330585a5953029fb4757f5abdf7610820a085bd61b457dbead9a11 + service: + port: 8444 + env: + SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://stellaops:stellaops@stellaops-mongo:27017" + SCANNER__STORAGE__S3__ENDPOINT: "http://stellaops-minio:9000" + SCANNER__STORAGE__S3__ACCESSKEYID: "stellaops" + SCANNER__STORAGE__S3__SECRETACCESSKEY: "dev-minio-secret" + SCANNER__QUEUE__BROKER: "nats://stellaops-nats:4222" + scanner-worker: + image: registry.stella-ops.org/stellaops/scanner-worker@sha256:92dda42f6f64b2d9522104a5c9ffb61d37b34dd193132b68457a259748008f37 + env: + SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://stellaops:stellaops@stellaops-mongo:27017" + SCANNER__STORAGE__S3__ENDPOINT: "http://stellaops-minio:9000" + SCANNER__STORAGE__S3__ACCESSKEYID: "stellaops" + SCANNER__STORAGE__S3__SECRETACCESSKEY: "dev-minio-secret" + SCANNER__QUEUE__BROKER: "nats://stellaops-nats:4222" + excititor: + image: registry.stella-ops.org/stellaops/excititor@sha256:d9bd5cadf1eab427447ce3df7302c30ded837239771cc6433b9befb895054285 + env: + EXCITITOR__CONCELIER__BASEURL: "https://stellaops-concelier:8445" + EXCITITOR__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://stellaops:stellaops@stellaops-mongo:27017" + web-ui: + image: registry.stella-ops.org/stellaops/web-ui@sha256:38b225fa7767a5b94ebae4dae8696044126aac429415e93de514d5dd95748dcf + service: + port: 8443 + env: + STELLAOPS_UI__BACKEND__BASEURL: "https://stellaops-scanner-web:8444" + mongo: + class: infrastructure + image: docker.io/library/mongo@sha256:c258b26dbb7774f97f52aff52231ca5f228273a84329c5f5e451c3739457db49 + service: + port: 27017 + command: + - mongod + - --bind_ip_all + env: + MONGO_INITDB_ROOT_USERNAME: stellaops + MONGO_INITDB_ROOT_PASSWORD: stellaops + volumeMounts: + - name: mongo-data + mountPath: /data/db + volumes: + - name: mongo-data + emptyDir: {} + minio: + class: infrastructure + image: docker.io/minio/minio@sha256:14cea493d9a34af32f524e538b8346cf79f3321eff8e708c1e2960462bd8936e + service: + port: 9000 + command: + - server + - /data + - --console-address + - :9001 + env: + MINIO_ROOT_USER: stellaops + MINIO_ROOT_PASSWORD: dev-minio-secret + volumeMounts: + - name: minio-data + mountPath: /data + volumes: + - name: minio-data + emptyDir: {} + nats: + class: infrastructure + image: docker.io/library/nats@sha256:c82559e4476289481a8a5196e675ebfe67eea81d95e5161e3e78eccfe766608e + service: + port: 4222 + command: + - -js + - -sd + - /data + volumeMounts: + - name: nats-data + mountPath: /data + volumes: + - name: nats-data + emptyDir: {} diff --git a/deploy/helm/stellaops/values-stage.yaml b/deploy/helm/stellaops/values-stage.yaml new file mode 100644 index 00000000..51064d18 --- /dev/null +++ b/deploy/helm/stellaops/values-stage.yaml @@ -0,0 +1,132 @@ +global: + profile: stage + release: + version: "2025.09.2" + channel: stable + manifestSha256: "dc3c8fe1ab83941c838ccc5a8a5862f7ddfa38c2078e580b5649db26554565b7" + image: + pullPolicy: IfNotPresent + labels: + stellaops.io/channel: stable +services: + authority: + image: registry.stella-ops.org/stellaops/authority@sha256:b0348bad1d0b401cc3c71cb40ba034c8043b6c8874546f90d4783c9dbfcc0bf5 + service: + port: 8440 + env: + STELLAOPS_AUTHORITY__ISSUER: "https://stellaops-authority:8440" + STELLAOPS_AUTHORITY__MONGO__CONNECTIONSTRING: "mongodb://stellaops-stage:stellaops-stage@stellaops-mongo:27017" + STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0: "/app/plugins" + STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY: "/app/etc/authority.plugins" + signer: + image: registry.stella-ops.org/stellaops/signer@sha256:8ad574e61f3a9e9bda8a58eb2700ae46813284e35a150b1137bc7c2b92ac0f2e + service: + port: 8441 + env: + SIGNER__AUTHORITY__BASEURL: "https://stellaops-authority:8440" + SIGNER__POE__INTROSPECTURL: "https://licensing.stage.stella-ops.internal/introspect" + SIGNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://stellaops-stage:stellaops-stage@stellaops-mongo:27017" + attestor: + image: registry.stella-ops.org/stellaops/attestor@sha256:0534985f978b0b5d220d73c96fddd962cd9135f616811cbe3bff4666c5af568f + service: + port: 8442 + env: + ATTESTOR__SIGNER__BASEURL: "https://stellaops-signer:8441" + ATTESTOR__MONGO__CONNECTIONSTRING: "mongodb://stellaops-stage:stellaops-stage@stellaops-mongo:27017" + concelier: + image: registry.stella-ops.org/stellaops/concelier@sha256:c58cdcaee1d266d68d498e41110a589dd204b487d37381096bd61ab345a867c5 + service: + port: 8445 + env: + CONCELIER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://stellaops-stage:stellaops-stage@stellaops-mongo:27017" + CONCELIER__STORAGE__S3__ENDPOINT: "http://stellaops-minio:9000" + CONCELIER__STORAGE__S3__ACCESSKEYID: "stellaops-stage" + CONCELIER__STORAGE__S3__SECRETACCESSKEY: "stage-minio-secret" + CONCELIER__AUTHORITY__BASEURL: "https://stellaops-authority:8440" + volumeMounts: + - name: concelier-jobs + mountPath: /var/lib/concelier/jobs + volumeClaims: + - name: concelier-jobs + claimName: stellaops-concelier-jobs + scanner-web: + image: registry.stella-ops.org/stellaops/scanner-web@sha256:14b23448c3f9586a9156370b3e8c1991b61907efa666ca37dd3aaed1e79fe3b7 + service: + port: 8444 + env: + SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://stellaops-stage:stellaops-stage@stellaops-mongo:27017" + SCANNER__STORAGE__S3__ENDPOINT: "http://stellaops-minio:9000" + SCANNER__STORAGE__S3__ACCESSKEYID: "stellaops-stage" + SCANNER__STORAGE__S3__SECRETACCESSKEY: "stage-minio-secret" + SCANNER__QUEUE__BROKER: "nats://stellaops-nats:4222" + scanner-worker: + image: registry.stella-ops.org/stellaops/scanner-worker@sha256:32e25e76386eb9ea8bee0a1ad546775db9a2df989fab61ac877e351881960dab + replicas: 2 + env: + SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://stellaops-stage:stellaops-stage@stellaops-mongo:27017" + SCANNER__STORAGE__S3__ENDPOINT: "http://stellaops-minio:9000" + SCANNER__STORAGE__S3__ACCESSKEYID: "stellaops-stage" + SCANNER__STORAGE__S3__SECRETACCESSKEY: "stage-minio-secret" + SCANNER__QUEUE__BROKER: "nats://stellaops-nats:4222" + excititor: + image: registry.stella-ops.org/stellaops/excititor@sha256:59022e2016aebcef5c856d163ae705755d3f81949d41195256e935ef40a627fa + env: + EXCITITOR__CONCELIER__BASEURL: "https://stellaops-concelier:8445" + EXCITITOR__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://stellaops-stage:stellaops-stage@stellaops-mongo:27017" + web-ui: + image: registry.stella-ops.org/stellaops/web-ui@sha256:10d924808c48e4353e3a241da62eb7aefe727a1d6dc830eb23a8e181013b3a23 + service: + port: 8443 + env: + STELLAOPS_UI__BACKEND__BASEURL: "https://stellaops-scanner-web:8444" + mongo: + class: infrastructure + image: docker.io/library/mongo@sha256:c258b26dbb7774f97f52aff52231ca5f228273a84329c5f5e451c3739457db49 + service: + port: 27017 + command: + - mongod + - --bind_ip_all + env: + MONGO_INITDB_ROOT_USERNAME: stellaops-stage + MONGO_INITDB_ROOT_PASSWORD: stellaops-stage + volumeMounts: + - name: mongo-data + mountPath: /data/db + volumeClaims: + - name: mongo-data + claimName: stellaops-mongo-data + minio: + class: infrastructure + image: docker.io/minio/minio@sha256:14cea493d9a34af32f524e538b8346cf79f3321eff8e708c1e2960462bd8936e + service: + port: 9000 + command: + - server + - /data + - --console-address + - :9001 + env: + MINIO_ROOT_USER: stellaops-stage + MINIO_ROOT_PASSWORD: stage-minio-secret + volumeMounts: + - name: minio-data + mountPath: /data + volumeClaims: + - name: minio-data + claimName: stellaops-minio-data + nats: + class: infrastructure + image: docker.io/library/nats@sha256:c82559e4476289481a8a5196e675ebfe67eea81d95e5161e3e78eccfe766608e + service: + port: 4222 + command: + - -js + - -sd + - /data + volumeMounts: + - name: nats-data + mountPath: /data + volumeClaims: + - name: nats-data + claimName: stellaops-nats-data diff --git a/deploy/helm/stellaops/values.yaml b/deploy/helm/stellaops/values.yaml new file mode 100644 index 00000000..b6c3282f --- /dev/null +++ b/deploy/helm/stellaops/values.yaml @@ -0,0 +1,10 @@ +global: + release: + version: "" + channel: "" + manifestSha256: "" + profile: "" + image: + pullPolicy: IfNotPresent + labels: {} +services: {} diff --git a/deploy/releases/2025.09-airgap.yaml b/deploy/releases/2025.09-airgap.yaml new file mode 100644 index 00000000..dccc7968 --- /dev/null +++ b/deploy/releases/2025.09-airgap.yaml @@ -0,0 +1,29 @@ +release: + version: "2025.09.2-airgap" + channel: "airgap" + date: "2025-09-20T00:00:00Z" + calendar: "2025.09" + components: + - name: authority + image: registry.stella-ops.org/stellaops/authority@sha256:5551a3269b7008cd5aceecf45df018c67459ed519557ccbe48b093b926a39bcc + - name: signer + image: registry.stella-ops.org/stellaops/signer@sha256:ddbbd664a42846cea6b40fca6465bc679b30f72851158f300d01a8571c5478fc + - name: attestor + image: registry.stella-ops.org/stellaops/attestor@sha256:1ff0a3124d66d3a2702d8e421df40fbd98cc75cb605d95510598ebbae1433c50 + - name: scanner-web + image: registry.stella-ops.org/stellaops/scanner-web@sha256:3df8ca21878126758203c1a0444e39fd97f77ddacf04a69685cda9f1e5e94718 + - name: scanner-worker + image: registry.stella-ops.org/stellaops/scanner-worker@sha256:eea5d6cfe7835950c5ec7a735a651f2f0d727d3e470cf9027a4a402ea89c4fb5 + - name: concelier + image: registry.stella-ops.org/stellaops/concelier@sha256:29e2e1a0972707e092cbd3d370701341f9fec2aa9316fb5d8100480f2a1c76b5 + - name: excititor + image: registry.stella-ops.org/stellaops/excititor@sha256:65c0ee13f773efe920d7181512349a09d363ab3f3e177d276136bd2742325a68 + - name: web-ui + image: registry.stella-ops.org/stellaops/web-ui@sha256:bee9668011ff414572131dc777faab4da24473fe12c230893f161cabee092a1d + infrastructure: + mongo: + image: docker.io/library/mongo@sha256:c258b26dbb7774f97f52aff52231ca5f228273a84329c5f5e451c3739457db49 + minio: + image: docker.io/minio/minio@sha256:14cea493d9a34af32f524e538b8346cf79f3321eff8e708c1e2960462bd8936e + checksums: + releaseManifestSha256: b787b833dddd73960c31338279daa0b0a0dce2ef32bd32ef1aaf953d66135f94 diff --git a/deploy/releases/2025.09-stable.yaml b/deploy/releases/2025.09-stable.yaml new file mode 100644 index 00000000..3ee29ae4 --- /dev/null +++ b/deploy/releases/2025.09-stable.yaml @@ -0,0 +1,29 @@ +release: + version: "2025.09.2" + channel: "stable" + date: "2025-09-20T00:00:00Z" + calendar: "2025.09" + components: + - name: authority + image: registry.stella-ops.org/stellaops/authority@sha256:b0348bad1d0b401cc3c71cb40ba034c8043b6c8874546f90d4783c9dbfcc0bf5 + - name: signer + image: registry.stella-ops.org/stellaops/signer@sha256:8ad574e61f3a9e9bda8a58eb2700ae46813284e35a150b1137bc7c2b92ac0f2e + - name: attestor + image: registry.stella-ops.org/stellaops/attestor@sha256:0534985f978b0b5d220d73c96fddd962cd9135f616811cbe3bff4666c5af568f + - name: scanner-web + image: registry.stella-ops.org/stellaops/scanner-web@sha256:14b23448c3f9586a9156370b3e8c1991b61907efa666ca37dd3aaed1e79fe3b7 + - name: scanner-worker + image: registry.stella-ops.org/stellaops/scanner-worker@sha256:32e25e76386eb9ea8bee0a1ad546775db9a2df989fab61ac877e351881960dab + - name: concelier + image: registry.stella-ops.org/stellaops/concelier@sha256:c58cdcaee1d266d68d498e41110a589dd204b487d37381096bd61ab345a867c5 + - name: excititor + image: registry.stella-ops.org/stellaops/excititor@sha256:59022e2016aebcef5c856d163ae705755d3f81949d41195256e935ef40a627fa + - name: web-ui + image: registry.stella-ops.org/stellaops/web-ui@sha256:10d924808c48e4353e3a241da62eb7aefe727a1d6dc830eb23a8e181013b3a23 + infrastructure: + mongo: + image: docker.io/library/mongo@sha256:c258b26dbb7774f97f52aff52231ca5f228273a84329c5f5e451c3739457db49 + minio: + image: docker.io/minio/minio@sha256:14cea493d9a34af32f524e538b8346cf79f3321eff8e708c1e2960462bd8936e + checksums: + releaseManifestSha256: dc3c8fe1ab83941c838ccc5a8a5862f7ddfa38c2078e580b5649db26554565b7 diff --git a/deploy/releases/2025.10-edge.yaml b/deploy/releases/2025.10-edge.yaml new file mode 100644 index 00000000..65dcc0c1 --- /dev/null +++ b/deploy/releases/2025.10-edge.yaml @@ -0,0 +1,29 @@ +release: + version: "2025.10.0-edge" + channel: "edge" + date: "2025-10-01T00:00:00Z" + calendar: "2025.10" + components: + - name: authority + image: registry.stella-ops.org/stellaops/authority@sha256:a8e8faec44a579aa5714e58be835f25575710430b1ad2ccd1282a018cd9ffcdd + - name: signer + image: registry.stella-ops.org/stellaops/signer@sha256:8bfef9a75783883d49fc18e3566553934e970b00ee090abee9cb110d2d5c3298 + - name: attestor + image: registry.stella-ops.org/stellaops/attestor@sha256:5cc417948c029da01dccf36e4645d961a3f6d8de7e62fe98d845f07cd2282114 + - name: scanner-web + image: registry.stella-ops.org/stellaops/scanner-web@sha256:e0dfdb087e330585a5953029fb4757f5abdf7610820a085bd61b457dbead9a11 + - name: scanner-worker + image: registry.stella-ops.org/stellaops/scanner-worker@sha256:92dda42f6f64b2d9522104a5c9ffb61d37b34dd193132b68457a259748008f37 + - name: concelier + image: registry.stella-ops.org/stellaops/concelier@sha256:dafef3954eb4b837e2c424dd2d23e1e4d60fa83794840fac9cd3dea1d43bd085 + - name: excititor + image: registry.stella-ops.org/stellaops/excititor@sha256:d9bd5cadf1eab427447ce3df7302c30ded837239771cc6433b9befb895054285 + - name: web-ui + image: registry.stella-ops.org/stellaops/web-ui@sha256:38b225fa7767a5b94ebae4dae8696044126aac429415e93de514d5dd95748dcf + infrastructure: + mongo: + image: docker.io/library/mongo@sha256:c258b26dbb7774f97f52aff52231ca5f228273a84329c5f5e451c3739457db49 + minio: + image: docker.io/minio/minio@sha256:14cea493d9a34af32f524e538b8346cf79f3321eff8e708c1e2960462bd8936e + checksums: + releaseManifestSha256: 822f82987529ea38d2321dbdd2ef6874a4062a117116a20861c26a8df1807beb diff --git a/deploy/tools/validate-profiles.sh b/deploy/tools/validate-profiles.sh new file mode 100644 index 00000000..bac9cabd --- /dev/null +++ b/deploy/tools/validate-profiles.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +COMPOSE_DIR="$ROOT_DIR/compose" +HELM_DIR="$ROOT_DIR/helm/stellaops" + +compose_profiles=( + "docker-compose.dev.yaml:env/dev.env.example" + "docker-compose.stage.yaml:env/stage.env.example" + "docker-compose.airgap.yaml:env/airgap.env.example" +) + +if command -v docker >/dev/null 2>&1; then + for entry in "${compose_profiles[@]}"; do + IFS=":" read -r compose_file env_file <<<"$entry" + printf '→ validating %s with %s\n' "$compose_file" "$env_file" + docker compose \ + --env-file "$COMPOSE_DIR/$env_file" \ + -f "$COMPOSE_DIR/$compose_file" config >/dev/null + done +else + echo "⚠️ docker CLI not found; skipping compose validation" >&2 +fi + +helm_values=( + "$HELM_DIR/values-dev.yaml" + "$HELM_DIR/values-stage.yaml" + "$HELM_DIR/values-airgap.yaml" +) + +if command -v helm >/dev/null 2>&1; then + for values in "${helm_values[@]}"; do + printf '→ linting Helm chart with %s\n' "$(basename "$values")" + helm lint "$HELM_DIR" -f "$values" + helm template test-release "$HELM_DIR" -f "$values" >/dev/null + done +else + echo "⚠️ helm CLI not found; skipping Helm lint/template" >&2 +fi + +printf 'Profiles validated (where tooling was available).\n' diff --git a/docs/09_API_CLI_REFERENCE.md b/docs/09_API_CLI_REFERENCE.md index 23c5ee3a..e753762d 100755 --- a/docs/09_API_CLI_REFERENCE.md +++ b/docs/09_API_CLI_REFERENCE.md @@ -158,6 +158,90 @@ Client then generates SBOM **only** for the `missing` layers and re‑posts `/sc | `POST` | `/policy/validate` | Lint only; returns 400 on error | | `GET` | `/policy/history` | Paginated change log (audit trail) | +### 2.4 Scanner – Queue a Scan Job *(SP9 milestone)* + +``` +POST /api/v1/scans +Authorization: Bearer +Content-Type: application/json +``` + +```json +{ + "image": { + "reference": "registry.example.com/acme/app:1.2.3" + }, + "force": false, + "clientRequestId": "ci-build-1845", + "metadata": { + "pipeline": "github", + "trigger": "pull-request" + } +} +``` + +| Field | Required | Notes | +| ------------------- | -------- | ------------------------------------------------------------------------------------------------ | +| `image.reference` | no\* | Full repo/tag (`registry/repo:tag`). Provide **either** `reference` or `digest` (sha256:…). | +| `image.digest` | no\* | OCI digest (e.g. `sha256:…`). | +| `force` | no | `true` forces a re-run even if an identical scan (`scanId`) already exists. Default **false**. | +| `clientRequestId` | no | Free-form string surfaced in audit logs. | +| `metadata` | no | Optional string map stored with the job and surfaced in observability feeds. | + +\* At least one of `image.reference` or `image.digest` must be supplied. + +**Response 202** – job accepted (idempotent): + +```http +HTTP/1.1 202 Accepted +Location: /api/v1/scans/2f6c17f9b3f548e2a28b9c412f4d63f8 +``` + +```json +{ + "scanId": "2f6c17f9b3f548e2a28b9c412f4d63f8", + "status": "Pending", + "location": "/api/v1/scans/2f6c17f9b3f548e2a28b9c412f4d63f8", + "created": true +} +``` + +- `scanId` is deterministic – resubmitting an identical payload returns the same identifier with `"created": false`. +- API is cancellation-aware; aborting the HTTP request cancels the submission attempt. +- Required scope: **`scanner.scans.enqueue`**. + +**Response 400** – validation problem (`Content-Type: application/problem+json`) when both `image.reference` and `image.digest` are blank. + +### 2.5 Scanner – Fetch Scan Status + +``` +GET /api/v1/scans/{scanId} +Authorization: Bearer +Accept: application/json +``` + +**Response 200**: + +```json +{ + "scanId": "2f6c17f9b3f548e2a28b9c412f4d63f8", + "status": "Pending", + "image": { + "reference": "registry.example.com/acme/app:1.2.3", + "digest": null + }, + "createdAt": "2025-10-18T20:15:12.482Z", + "updatedAt": "2025-10-18T20:15:12.482Z", + "failureReason": null +} +``` + +Statuses: `Pending`, `Running`, `Succeeded`, `Failed`, `Cancelled`. + +**Response 404** – `application/problem+json` payload with type `https://stellaops.org/problems/not-found` when the scan identifier is unknown. + +> **Tip** – poll `Location` from the submission call until `status` transitions away from `Pending`/`Running`. + ```yaml # Example import payload (YAML) version: "1.0" @@ -181,6 +265,23 @@ Validation errors come back as: } ``` +```json +# Preview response excerpt +{ + "success": true, + "policyDigest": "9c5e...", + "revisionId": "rev-12", + "changed": 1, + "diffs": [ + { + "baseline": {"findingId": "finding-1", "status": "pass"}, + "projected": {"findingId": "finding-1", "status": "blocked", "ruleName": "Block Critical"}, + "changed": true + } + ] +} +``` + --- ### 2.4 Attestation (Planned – Q1‑2026) diff --git a/docs/11_DATA_SCHEMAS.md b/docs/11_DATA_SCHEMAS.md index b68ac7ec..db3a92bb 100755 --- a/docs/11_DATA_SCHEMAS.md +++ b/docs/11_DATA_SCHEMAS.md @@ -120,7 +120,18 @@ rules: action: escalate ``` -Validation is performed by `policy:mapping.yaml` JSON‑Schema embedded in backend. +Validation is performed by `policy:mapping.yaml` JSON‑Schema embedded in backend. + +Canonical schema source: `src/StellaOps.Policy/Schemas/policy-schema@1.json` (embedded into `StellaOps.Policy`). +`PolicyValidationCli` (see `src/StellaOps.Policy/PolicyValidationCli.cs`) provides the reusable command handler that the main CLI wires up; in the interim it can be invoked from a short host like: + +```csharp +await new PolicyValidationCli().RunAsync(new PolicyValidationCliOptions +{ + Inputs = new[] { "policies/root.yaml" }, + Strict = true, +}); +``` ### 4.1 Rego Variant (Advanced – TODO) diff --git a/docs/21_INSTALL_GUIDE.md b/docs/21_INSTALL_GUIDE.md index 1686632f..5667bb31 100755 --- a/docs/21_INSTALL_GUIDE.md +++ b/docs/21_INSTALL_GUIDE.md @@ -76,6 +76,12 @@ UI: [https://\<host\>:8443](https://<host>:8443) (self‑signed cert > `stella-ops:latest` with the immutable digest printed by > `docker images --digests`. +> **Repo bundles** – Development, staging, and air‑gapped Compose profiles live +> under `deploy/compose/`, already tied to the release manifests in +> `deploy/releases/`. Helm users can pull the same channel overlays from +> `deploy/helm/stellaops/values-*.yaml` and validate everything with +> `deploy/tools/validate-profiles.sh`. + ### 1.1 · Concelier authority configuration The Concelier container reads configuration from `etc/concelier.yaml` plus diff --git a/docs/ARCHITECTURE_DEVOPS.md b/docs/ARCHITECTURE_DEVOPS.md index 42f28812..d66375e9 100644 --- a/docs/ARCHITECTURE_DEVOPS.md +++ b/docs/ARCHITECTURE_DEVOPS.md @@ -234,6 +234,11 @@ release: The manifest is **cosign‑signed**; UI/CLI can verify a bundle without talking to registries. +> Deployment guardrails – The repository keeps channel-aligned Compose bundles +> in `deploy/compose/` and Helm overlays in `deploy/helm/stellaops/`. Both sets +> pull their digests from `deploy/releases/` and are validated by +> `deploy/tools/validate-profiles.sh` to guarantee lint/dry-run cleanliness. + ### 6.2 Image labels (release metadata) Each image sets OCI labels: diff --git a/docs/ARCHITECTURE_SCANNER.md b/docs/ARCHITECTURE_SCANNER.md index c45f51b6..68641aa6 100644 --- a/docs/ARCHITECTURE_SCANNER.md +++ b/docs/ARCHITECTURE_SCANNER.md @@ -42,6 +42,35 @@ src/ Analyzer assemblies and buildx generators are packaged as **restart-time plug-ins** under `plugins/scanner/**` with manifests; services must restart to activate new plug-ins. +### 1.1 Queue backbone (Redis / NATS) + +`StellaOps.Scanner.Queue` exposes a transport-agnostic contract (`IScanQueue`/`IScanQueueLease`) used by the WebService producer and Worker consumers. Sprint 9 introduces two first-party transports: + +- **Redis Streams** (default). Uses consumer groups, deterministic idempotency keys (`scanner:jobs:idemp:*`), and supports lease claim (`XCLAIM`), renewal, exponential-backoff retries, and a `scanner:jobs:dead` stream for exhausted attempts. +- **NATS JetStream**. Provisions the `SCANNER_JOBS` work-queue stream + durable consumer `scanner-workers`, publishes with `MsgId` for dedupe, applies backoff via `NAK` delays, and routes dead-lettered jobs to `SCANNER_JOBS_DEAD`. + +Metrics are emitted via `Meter` counters (`scanner_queue_enqueued_total`, `scanner_queue_retry_total`, `scanner_queue_deadletter_total`), and `ScannerQueueHealthCheck` pings the active backend (Redis `PING`, NATS `PING`). Configuration is bound from `scanner.queue`: + +```yaml +scanner: + queue: + kind: redis # or nats + redis: + connectionString: "redis://queue:6379/0" + streamName: "scanner:jobs" + nats: + url: "nats://queue:4222" + stream: "SCANNER_JOBS" + subject: "scanner.jobs" + durableConsumer: "scanner-workers" + deadLetterSubject: "scanner.jobs.dead" + maxDeliveryAttempts: 5 + retryInitialBackoff: 00:00:05 + retryMaxBackoff: 00:02:00 +``` + +The DI extension (`AddScannerQueue`) wires the selected transport, so future additions (e.g., RabbitMQ) only implement the same contract and register. + **Runtime form‑factor:** two deployables * **Scanner.WebService** (stateless REST) diff --git a/docs/README.md b/docs/README.md index 03ec1b65..2fefd3d8 100755 --- a/docs/README.md +++ b/docs/README.md @@ -31,12 +31,13 @@ Everything here is open‑source and versioned — when you check out a git ta - **03 – [Vision & Road‑map](03_VISION.md)** - **04 – [Feature Matrix](04_FEATURE_MATRIX.md)** -### Reference & concepts -- **05 – [System Requirements Specification](05_SYSTEM_REQUIREMENTS_SPEC.md)** -- **07 – [High‑Level Architecture](07_HIGH_LEVEL_ARCHITECTURE.md)** -- **08 – Module Architecture Dossiers** - - [Scanner](ARCHITECTURE_SCANNER.md) - - [Concelier](ARCHITECTURE_CONCELIER.md) +### Reference & concepts +- **05 – [System Requirements Specification](05_SYSTEM_REQUIREMENTS_SPEC.md)** +- **07 – [High‑Level Architecture](07_HIGH_LEVEL_ARCHITECTURE.md)** +- **08 – [Architecture Decision Records](adr/index.md)** +- **08 – Module Architecture Dossiers** + - [Scanner](ARCHITECTURE_SCANNER.md) + - [Concelier](ARCHITECTURE_CONCELIER.md) - [Excititor](ARCHITECTURE_EXCITITOR.md) - [Signer](ARCHITECTURE_SIGNER.md) - [Attestor](ARCHITECTURE_ATTESTOR.md) @@ -48,8 +49,9 @@ Everything here is open‑source and versioned — when you check out a git ta - [Zastava Runtime](ARCHITECTURE_ZASTAVA.md) - [Release & Operations](ARCHITECTURE_DEVOPS.md) - **09 – [API & CLI Reference](09_API_CLI_REFERENCE.md)** -- **10 – [Plug‑in SDK Guide](10_PLUGIN_SDK_GUIDE.md)** -- **10 – [Concelier CLI Quickstart](10_CONCELIER_CLI_QUICKSTART.md)** +- **10 – [Plug‑in SDK Guide](10_PLUGIN_SDK_GUIDE.md)** +- **10 – [Concelier CLI Quickstart](10_CONCELIER_CLI_QUICKSTART.md)** +- **10 – [BuildX Generator Quickstart](dev/BUILDX_PLUGIN_QUICKSTART.md)** - **30 – [Excititor Connector Packaging Guide](dev/30_EXCITITOR_CONNECTOR_GUIDE.md)** - **30 – Developer Templates** - [Excititor Connector Skeleton](dev/templates/excititor-connector/) diff --git a/docs/TASKS.md b/docs/TASKS.md index 9e4a7c74..118b1184 100644 --- a/docs/TASKS.md +++ b/docs/TASKS.md @@ -9,8 +9,8 @@ | DOC5.Concelier-Runbook | DONE (2025-10-12) | Docs Guild | DOC3.Concelier-Authority | Produce dedicated Concelier authority audit runbook covering log fields, monitoring recommendations, and troubleshooting steps. | ✅ Runbook published; ✅ linked from DOC3/DOC5; ✅ alerting guidance included. | | FEEDDOCS-DOCS-05-001 | DONE (2025-10-11) | Docs Guild | FEEDMERGE-ENGINE-04-001, FEEDMERGE-ENGINE-04-002 | Publish Concelier conflict resolution runbook covering precedence workflow, merge-event auditing, and Sprint 3 metrics. | ✅ `docs/ops/concelier-conflict-resolution.md` committed; ✅ metrics/log tables align with latest merge code; ✅ Ops alert guidance handed to Concelier team. | | FEEDDOCS-DOCS-05-002 | DONE (2025-10-16) | Docs Guild, Concelier Ops | FEEDDOCS-DOCS-05-001 | Ops sign-off captured: conflict runbook circulated, alert thresholds tuned, and rollout decisions documented in change log. | ✅ Ops review recorded; ✅ alert thresholds finalised using `docs/ops/concelier-authority-audit-runbook.md`; ✅ change-log entry linked from runbook once GHSA/NVD/OSV regression fixtures land. | -| DOCS-ADR-09-001 | TODO | Docs Guild, DevEx | — | Establish ADR process (`docs/adr/0000-template.md`) and document usage guidelines. | Template published; README snippet linking ADR process; announcement posted. | -| DOCS-EVENTS-09-002 | TODO | Docs Guild, Platform Events | SCANNER-EVENTS-15-201 | Publish event schema catalog (`docs/events/`) for `scanner.report.ready@1`, `scheduler.rescan.delta@1`, `attestor.logged@1`. | Schemas validated; docs/events/README summarises usage; Notify/Scheduler teams acknowledge. | +| DOCS-ADR-09-001 | DONE (2025-10-19) | Docs Guild, DevEx | — | Establish ADR process (`docs/adr/0000-template.md`) and document usage guidelines. | Template published; README snippet linking ADR process; announcement posted (`docs/updates/2025-10-18-docs-guild.md`). | +| DOCS-EVENTS-09-002 | DONE (2025-10-19) | Docs Guild, Platform Events | SCANNER-EVENTS-15-201 | Publish event schema catalog (`docs/events/`) for `scanner.report.ready@1`, `scheduler.rescan.delta@1`, `attestor.logged@1`. | Schemas validated (Ajv CI hooked); docs/events/README summarises usage; Platform Events notified via `docs/updates/2025-10-18-docs-guild.md`. | | DOCS-RUNTIME-17-004 | TODO | Docs Guild, Runtime Guild | SCANNER-EMIT-17-701, ZASTAVA-OBS-17-005, DEVOPS-REL-17-002 | Document build-id workflows: SBOM exposure, runtime event payloads, debug-store layout, and operator guidance for symbol retrieval. | Architecture + operator docs updated with build-id sections, examples show `readelf` output + debuginfod usage, references linked from Offline Kit/Release guides. | > Update statuses (TODO/DOING/REVIEW/DONE/BLOCKED) as progress changes. Keep guides in sync with configuration samples under `etc/`. diff --git a/docs/adr/0000-template.md b/docs/adr/0000-template.md index 20d091da..543034f5 100644 --- a/docs/adr/0000-template.md +++ b/docs/adr/0000-template.md @@ -3,16 +3,32 @@ ## Status Proposed +## Date +YYYY-MM-DD + +## Authors +- Name (team) + +## Deciders +- Names of approvers / reviewers + ## Context - What decision needs to be made? - What are the forces (requirements, constraints, stakeholders)? +- Why now? What triggers the ADR? ## Decision - Summary of the chosen option. +- Key rationale points. ## Consequences - Positive/negative consequences. - Follow-up actions or tasks. +- Rollback plan or re-evaluation criteria. + +## Alternatives Considered +- Option A — pros/cons. +- Option B — pros/cons. ## References - Links to related ADRs, issues, documents. diff --git a/docs/adr/index.md b/docs/adr/index.md new file mode 100644 index 00000000..f2262bde --- /dev/null +++ b/docs/adr/index.md @@ -0,0 +1,41 @@ +# Architecture Decision Records (ADRs) + +Architecture Decision Records document long-lived choices that shape StellaOps architecture, security posture, and operator experience. They complement RFCs by capturing the final call and the context that led to it. + +## When to file an ADR +- Decisions that affect cross-module contracts, persistence models, or external interfaces. +- Security or compliance controls with on-going operational ownership. +- Rollout strategies that require coordination across guilds or sprints. +- Reversals or deprecations of previously accepted ADRs. + +Small, module-local refactors that do not modify public behaviour can live in commit messages instead. + +## Workflow at a glance +1. Copy `docs/adr/0000-template.md` to `docs/adr/NNNN-short-slug.md` with a zero-padded sequence (see **Numbering**). +2. Fill in context, decision, consequences, and alternatives. Include links to RFCs, issues, benchmarks, or experiments. +3. Request async review from the impacted guilds. Capture sign-offs in the **Deciders** field. +4. Merge the ADR with the code/config changes (or in a preparatory PR). +5. Announce the accepted ADR in the Docs Guild channel or sprint notes so downstream teams can consume it. + +## Numbering and status +- Use zero-padded integers (e.g., `0001`, `0002`) in file names and the document header. Increment from the highest existing number. +- Valid statuses: `Proposed`, `Accepted`, `Rejected`, `Deprecated`, `Superseded`. Update the status when follow-up work lands. +- When an ADR supersedes another, link them in both documents’ **References** sections. + +## Review expectations +- Highlight edge-case handling, trade-offs, and determinism requirements. +- Include operational checklists for any new runtime path (quota updates, schema migrations, credential rotation, etc.). +- Attach diagrams under `docs/adr/assets/` when visuals improve comprehension. +- Add TODO tasks for follow-up work in the relevant module’s `TASKS.md` and link them from the ADR. + +## Verification checklist +- [ ] `Status`, `Date`, `Authors`, and `Deciders` populated. +- [ ] Links to code/config PRs or experiments recorded under **References**. +- [ ] Consequences call out migration or rollback steps. +- [ ] Announcement posted to Docs Guild updates (or sprint log). + +## Related resources +- [Docs Guild Task Board](../TASKS.md) +- [High-Level Architecture Overview](../07_HIGH_LEVEL_ARCHITECTURE.md) +- [Coding Standards](../18_CODING_STANDARDS.md) +- [Release Engineering Playbook](../13_RELEASE_ENGINEERING_PLAYBOOK.md) diff --git a/docs/ci/20_CI_RECIPES.md b/docs/ci/20_CI_RECIPES.md index 4ad86464..dd17e6dc 100755 --- a/docs/ci/20_CI_RECIPES.md +++ b/docs/ci/20_CI_RECIPES.md @@ -241,7 +241,59 @@ jobs: --- -## 4 · Troubleshooting cheat‑sheet +## 4 · Docs CI (Gitea Actions & Offline Mirror) + +StellaOps ships a dedicated Docs workflow at `.gitea/workflows/docs.yml`. When mirroring the pipeline offline or running it locally, install the same toolchain so markdown linting, schema validation, and HTML preview stay deterministic. + +### 4.1 Toolchain bootstrap + +```bash +# Node.js 20.x is required; install once per runner +npm install --no-save \ + markdown-link-check \ + remark-cli \ + remark-preset-lint-recommended \ + ajv \ + ajv-cli \ + ajv-formats + +# Python 3.11+ powers the preview renderer +python -m pip install --upgrade pip +python -m pip install markdown pygments +``` + +**Offline tip.** Add the packages above to your artifact mirror (for example `ops/devops/offline-kit.json`) so runners can install them via `npm --offline` / `pip --no-index`. + +### 4.2 Schema validation step + +Ajv compiles every event schema to guard against syntax or format regressions. The workflow uses `ajv-formats` for UUID/date-time support. + +```bash +for schema in docs/events/*.json; do + npx ajv compile -c ajv-formats -s "$schema" +done +``` + +Run this loop before committing schema changes. For new references, append `-r additional-file.json` so CI and local runs stay aligned. + +### 4.3 Preview build + +```bash +python scripts/render_docs.py --source docs --output artifacts/docs-preview --clean +``` + +Host the resulting bundle via any static file server for review (for example `python -m http.server`). + +### 4.4 Publishing checklist + +- [ ] Toolchain installs succeed without hitting the public internet (mirror or cached tarballs). +- [ ] Ajv validation passes for `scanner.report.ready@1`, `scheduler.rescan.delta@1`, `attestor.logged@1`. +- [ ] Markdown link check (`npx markdown-link-check`) reports no broken references. +- [ ] Preview bundle archived (or attached) for stakeholders. + +--- + +## 5 · Troubleshooting cheat‑sheet | Symptom | Root cause | First things to try | | ------------------------------------- | --------------------------- | --------------------------------------------------------------- | @@ -253,6 +305,7 @@ jobs: --- -### Change log - -* **2025‑08‑04** – Variable clean‑up, removed Docker‑socket & cache mounts, added Jenkins / CircleCI / Gitea examples, clarified Option B comment. +### Change log + +* **2025‑10‑18** – Documented Docs CI toolchain (Ajv validation, static preview) and offline checklist. +* **2025‑08‑04** – Variable clean‑up, removed Docker‑socket & cache mounts, added Jenkins / CircleCI / Gitea examples, clarified Option B comment. diff --git a/docs/dev/BUILDX_PLUGIN_QUICKSTART.md b/docs/dev/BUILDX_PLUGIN_QUICKSTART.md new file mode 100644 index 00000000..f4bdea5e --- /dev/null +++ b/docs/dev/BUILDX_PLUGIN_QUICKSTART.md @@ -0,0 +1,117 @@ +# BuildX Generator Quickstart + +This quickstart explains how to run the StellaOps **BuildX SBOM generator** offline, verify the CAS handshake, and emit OCI descriptors that downstream services can attest. + +## 1. Prerequisites + +- Docker 25+ with BuildKit enabled (`docker buildx` available). +- .NET 10 (preview) SDK matching the repository `global.json`. +- Optional: network access to a StellaOps Attestor endpoint (the quickstart uses a mock service). + +## 2. Publish the plug-in binaries + +The BuildX generator publishes as a .NET self-contained executable with its manifest under `plugins/scanner/buildx/`. + +```bash +# From the repository root +DOTNET_CLI_HOME="${PWD}/.dotnet" \ +dotnet publish src/StellaOps.Scanner.Sbomer.BuildXPlugin/StellaOps.Scanner.Sbomer.BuildXPlugin.csproj \ + -c Release \ + -o out/buildx +``` + +- `out/buildx/` now contains `StellaOps.Scanner.Sbomer.BuildXPlugin.dll` and the manifest `stellaops.sbom-indexer.manifest.json`. +- `plugins/scanner/buildx/StellaOps.Scanner.Sbomer.BuildXPlugin/` receives the same artefacts for release packaging. + +## 3. Verify the CAS handshake + +```bash +dotnet out/buildx/StellaOps.Scanner.Sbomer.BuildXPlugin.dll handshake \ + --manifest out/buildx \ + --cas out/cas +``` + +The command performs a deterministic probe write (`sha256`) into the provided CAS directory and prints the resolved path. + +## 4. Emit a descriptor + provenance placeholder + +1. Build or identify the image you want to describe and capture its digest: + + ```bash + docker buildx build --load -t stellaops/buildx-demo:ci samples/ci/buildx-demo + DIGEST=$(docker image inspect stellaops/buildx-demo:ci --format '{{index .RepoDigests 0}}') + ``` + +2. Generate a CycloneDX SBOM for the built image (any tool works; here we use `docker sbom`): + + ```bash + docker sbom stellaops/buildx-demo:ci --format cyclonedx-json > out/buildx-sbom.cdx.json + ``` + +3. Invoke the `descriptor` command, pointing at the SBOM file and optional metadata: + + ```bash + dotnet out/buildx/StellaOps.Scanner.Sbomer.BuildXPlugin.dll descriptor \ + --manifest out/buildx \ + --image "$DIGEST" \ + --sbom out/buildx-sbom.cdx.json \ + --sbom-name buildx-sbom.cdx.json \ + --artifact-type application/vnd.stellaops.sbom.layer+json \ + --sbom-format cyclonedx-json \ + --sbom-kind inventory \ + --repository git.stella-ops.org/stellaops/buildx-demo \ + --build-ref $(git rev-parse HEAD) \ + > out/buildx-descriptor.json + ``` + +The output JSON captures: + +- OCI artifact descriptor including size, digest, and annotations (`org.stellaops.*`). +- Provenance placeholder (`expectedDsseSha256`, `nonce`, `attestorUri` when provided). +- Generator metadata and deterministic timestamps. + +## 5. (Optional) Send the placeholder to an Attestor + +The plug-in can POST the descriptor metadata to an Attestor endpoint, returning once it receives an HTTP 202. + +```bash +python3 - <<'PY' & +from http.server import BaseHTTPRequestHandler, HTTPServer +class Handler(BaseHTTPRequestHandler): + def do_POST(self): + _ = self.rfile.read(int(self.headers.get('Content-Length', 0))) + self.send_response(202); self.end_headers(); self.wfile.write(b'accepted') + def log_message(self, fmt, *args): + return +server = HTTPServer(('127.0.0.1', 8085), Handler) +try: + server.serve_forever() +except KeyboardInterrupt: + pass +finally: + server.server_close() +PY +MOCK_PID=$! + +dotnet out/buildx/StellaOps.Scanner.Sbomer.BuildXPlugin.dll descriptor \ + --manifest out/buildx \ + --image "$DIGEST" \ + --sbom out/buildx-sbom.cdx.json \ + --attestor http://127.0.0.1:8085/provenance \ + --attestor-token "$STELLAOPS_ATTESTOR_TOKEN" \ + > out/buildx-descriptor.json + +kill $MOCK_PID +``` + +Set `STELLAOPS_ATTESTOR_TOKEN` (or pass `--attestor-token`) when the Attestor requires bearer authentication. Use `--attestor-insecure` for lab environments with self-signed certificates. + +## 6. CI workflow example + +A reusable GitHub Actions workflow is provided under `samples/ci/buildx-demo/github-actions-buildx-demo.yml`. It publishes the plug-in, runs the handshake, builds the demo image, emits a descriptor, and uploads both the descriptor and the mock-Attestor request as artefacts. + +Add the workflow to your repository (or call it via `workflow_call`) and adjust the SBOM path + Attestor URL as needed. + +--- + +For deeper integration guidance (custom SBOM builders, exporting DSSE bundles), track ADRs in `docs/ARCHITECTURE_SCANNER.md` §7 and follow upcoming Attestor API releases. diff --git a/docs/events/README.md b/docs/events/README.md index 500077f3..bd3deb37 100644 --- a/docs/events/README.md +++ b/docs/events/README.md @@ -1,9 +1,30 @@ # Event Envelope Schemas -Versioned JSON Schemas for platform events consumed by Scheduler, Notify, and UI. +Platform services publish strongly typed events; the JSON Schemas in this directory define those envelopes. File names follow `@.json` so producers and consumers can negotiate contracts explicitly. -- `scanner.report.ready@1.json` -- `scheduler.rescan.delta@1.json` -- `attestor.logged@1.json` +## Catalog +- `scanner.report.ready@1.json` — emitted by Scanner.WebService once a signed report is persisted. Consumers: Notify, UI timeline. +- `scheduler.rescan.delta@1.json` — emitted by Scheduler when BOM-Index diffs require fresh scans. Consumers: Notify, Policy Engine. +- `attestor.logged@1.json` — emitted by Attestor after storing the Rekor inclusion proof. Consumers: UI attestation panel, Governance exports. -Producers must bump the version suffix when introducing breaking changes; consumers validate incoming payloads against these schemas. +Additive payload changes (new optional fields) can stay within the same version. Any breaking change (removing a field, tightening validation, altering semantics) must increment the `@` suffix and update downstream consumers. + +## CI validation +The Docs CI workflow (`.gitea/workflows/docs.yml`) installs `ajv-cli` and compiles every schema on pull requests. Run the same check locally before opening a PR: + +```bash +for schema in docs/events/*.json; do + npx ajv compile -c ajv-formats -s "$schema" +done +``` + +Tip: run `npm install --no-save ajv ajv-cli ajv-formats` once per clone so `npx` can resolve the tooling offline. + +If a schema references additional files, include `-r` flags so CI and local runs stay consistent. + +## Working with schemas +- Producers should validate outbound payloads using the matching schema during unit tests. +- Consumers should pin to a specific version and log when encountering unknown versions to catch missing migrations early. +- Store real payload samples under `samples/events/` (mirrors the schema version) to aid contract testing. + +Contact the Platform Events group in Docs Guild if you need help shaping a new event or version strategy. diff --git a/docs/scanner-core-contracts.md b/docs/scanner-core-contracts.md new file mode 100644 index 00000000..7add5dcb --- /dev/null +++ b/docs/scanner-core-contracts.md @@ -0,0 +1,42 @@ +# Scanner Core Contracts + +The **Scanner Core** library provides shared contracts, observability helpers, and security utilities consumed by `Scanner.WebService`, `Scanner.Worker`, analyzers, and tooling. These primitives guarantee deterministic identifiers, timestamps, and log context for all scanning flows. + +## DTOs + +- `ScanJob` & `ScanJobStatus` – canonical job metadata (image reference/digest, tenant, correlation ID, timestamps, failure details). Constructors normalise timestamps to UTC microsecond precision and canonicalise image digests. Round-trips with `JsonSerializerDefaults.Web` using `ScannerJsonOptions`. +- `ScanProgressEvent` & `ScanStage`/`ScanProgressEventKind` – stage-level progress surface for queue/stream consumers. Includes deterministic sequence numbers, optional progress percentage, attributes, and attached `ScannerError`. +- `ScannerError` & `ScannerErrorCode` – shared error taxonomy spanning queue, analyzers, storage, exporters, and signing. Carries severity, retryability, structured details, and microsecond-precision timestamps. +- `ScanJobId` – strongly-typed identifier rendered as `Guid` (lowercase `N` format) with deterministic parsing. + +## Deterministic helpers + +- `ScannerIdentifiers` – derives `ScanJobId`, correlation IDs, and SHA-256 hashes from normalised inputs (image reference/digest, tenant, salt). Ensures case-insensitive stability and reproducible metric keys. +- `ScannerTimestamps` – trims to microsecond precision, provides ISO-8601 (`yyyy-MM-ddTHH:mm:ss.ffffffZ`) rendering, and parsing helpers. +- `ScannerJsonOptions` – standard JSON options (web defaults, camel-case enums) shared by services/tests. + +## Observability primitives + +- `ScannerDiagnostics` – global `ActivitySource`/`Meter` for scanner components. `StartActivity` seeds deterministic tags (`job_id`, `stage`, `component`, `correlation_id`). +- `ScannerMetricNames` – centralises metric prefixes (`stellaops.scanner.*`) and deterministic job/event tag builders. +- `ScannerCorrelationContext` & `ScannerCorrelationContextAccessor` – ambient correlation propagation via `AsyncLocal` for log scopes, metrics, and diagnostics. +- `ScannerLogExtensions` – `ILogger` scopes for jobs/progress events with automatic correlation context push, minimal allocations, and consistent structured fields. + +## Security utilities + +- `AuthorityTokenSource` – caches short-lived OpToks per audience+scope using deterministic keys and refresh skew (default 30 s). Integrates with `StellaOps.Auth.Client`. +- `DpopProofValidator` – validates DPoP proofs (alg allowlist, `htm`/`htu`, nonce, replay window, signature) backed by pluggable `IDpopReplayCache`. Ships with `InMemoryDpopReplayCache` for restart-only deployments. +- `RestartOnlyPluginGuard` – enforces restart-time plug-in registration (deterministic path normalisation; throws if new plug-ins added post-seal). +- `ServiceCollectionExtensions.AddScannerAuthorityCore` – DI helper wiring Authority client, OpTok source, DPoP validation, replay cache, and plug-in guard. + +## Testing guarantees + +Unit tests (`StellaOps.Scanner.Core.Tests`) assert: + +- DTO JSON round-trips are stable and deterministic. +- Identifier/hash helpers ignore case and emit lowercase hex. +- Timestamp normalisation retains UTC semantics. +- Log scopes push/pop correlation context predictably. +- Authority token caching honours refresh skew and invalidation. +- DPoP validator accepts valid proofs, rejects nonce mismatch/replay, and enforces signature validation. +- Restart-only plug-in guard blocks runtime additions post-seal. diff --git a/docs/updates/2025-10-18-docs-guild.md b/docs/updates/2025-10-18-docs-guild.md new file mode 100644 index 00000000..69af7df0 --- /dev/null +++ b/docs/updates/2025-10-18-docs-guild.md @@ -0,0 +1,14 @@ +# Docs Guild Update — 2025-10-18 + +**Subject:** ADR process + events schema validation shipped +**Audience:** Docs Guild, DevEx, Platform Events + +- Published the ADR contribution guide at `docs/adr/index.md` and enriched the template to capture authorship, deciders, and alternatives. All new cross-module decisions should follow this workflow. +- Linked the ADR hub from `docs/README.md` so operators and engineers can discover the process without digging through directories. +- Extended Docs CI (`.gitea/workflows/docs.yml`) to compile event schemas with Ajv (including `ajv-formats`) and documented the local loop in `docs/events/README.md`. +- Captured the mirror/offline workflow in `docs/ci/20_CI_RECIPES.md` so runners know how to install the Ajv toolchain and publish previews without internet access. +- Validated `scanner.report.ready@1`, `scheduler.rescan.delta@1`, and `attestor.logged@1` schemas locally to unblock Platform Events acknowledgements. + +Next steps: +- Platform Events to confirm Notify/Scheduler consumers have visibility into the schema docs. +- DevEx to add ADR announcement blurb to the next sprint recap if broader broadcast is needed. diff --git a/ops/devops/TASKS.md b/ops/devops/TASKS.md index a4871844..1c35e064 100644 --- a/ops/devops/TASKS.md +++ b/ops/devops/TASKS.md @@ -2,7 +2,7 @@ | ID | Status | Owner(s) | Depends on | Description | Exit Criteria | |----|--------|----------|------------|-------------|---------------| -| DEVOPS-HELM-09-001 | TODO | DevOps Guild | SCANNER-WEB-09-101 | Create Helm/Compose environment profiles (dev, staging, airgap) with deterministic digests. | Profiles committed under `deploy/`; docs updated; CI smoke deploy passes. | +| DEVOPS-HELM-09-001 | DONE | DevOps Guild | SCANNER-WEB-09-101 | Create Helm/Compose environment profiles (dev, staging, airgap) with deterministic digests. | Profiles committed under `deploy/`; docs updated; CI smoke deploy passes. | | DEVOPS-PERF-10-001 | TODO | DevOps Guild | BENCH-SCANNER-10-001 | Add perf smoke job (SBOM compose <5 s target) to CI. | CI job runs sample build verifying <5 s; alerts configured. | | DEVOPS-REL-14-001 | TODO | DevOps Guild | SIGNER-API-11-101, ATTESTOR-API-11-201 | Deterministic build/release pipeline with SBOM/provenance, signing, manifest generation. | CI pipeline produces signed images + SBOM/attestations, manifests published with verified hashes, docs updated. | | DEVOPS-REL-17-002 | TODO | DevOps Guild | DEVOPS-REL-14-001, SCANNER-EMIT-17-701 | Persist stripped-debug artifacts organised by GNU build-id and bundle them into release/offline kits with checksum manifests. | CI job writes `.debug` files under `artifacts/debug/.build-id/`, manifest + checksums published, offline kit includes cache, smoke job proves symbol lookup via build-id. | diff --git a/plugins/scanner/buildx/StellaOps.Scanner.Sbomer.BuildXPlugin/manifest.json b/plugins/scanner/buildx/StellaOps.Scanner.Sbomer.BuildXPlugin/manifest.json new file mode 100644 index 00000000..cabadebd --- /dev/null +++ b/plugins/scanner/buildx/StellaOps.Scanner.Sbomer.BuildXPlugin/manifest.json @@ -0,0 +1,35 @@ +{ + "schemaVersion": "1.0", + "id": "stellaops.sbom-indexer", + "displayName": "StellaOps SBOM BuildX Generator", + "version": "0.1.0-alpha", + "requiresRestart": true, + "entryPoint": { + "type": "dotnet", + "executable": "StellaOps.Scanner.Sbomer.BuildXPlugin.dll", + "arguments": [ + "handshake" + ] + }, + "capabilities": [ + "generator", + "sbom" + ], + "cas": { + "protocol": "filesystem", + "defaultRoot": "cas", + "compression": "zstd" + }, + "image": { + "name": "stellaops/sbom-indexer", + "digest": null, + "platforms": [ + "linux/amd64", + "linux/arm64" + ] + }, + "metadata": { + "org.stellaops.plugin.kind": "buildx-generator", + "org.stellaops.restart.required": "true" + } +} diff --git a/plugins/scanner/buildx/StellaOps.Scanner.Sbomer.BuildXPlugin/stellaops.sbom-indexer.manifest.json b/plugins/scanner/buildx/StellaOps.Scanner.Sbomer.BuildXPlugin/stellaops.sbom-indexer.manifest.json new file mode 100644 index 00000000..cabadebd --- /dev/null +++ b/plugins/scanner/buildx/StellaOps.Scanner.Sbomer.BuildXPlugin/stellaops.sbom-indexer.manifest.json @@ -0,0 +1,35 @@ +{ + "schemaVersion": "1.0", + "id": "stellaops.sbom-indexer", + "displayName": "StellaOps SBOM BuildX Generator", + "version": "0.1.0-alpha", + "requiresRestart": true, + "entryPoint": { + "type": "dotnet", + "executable": "StellaOps.Scanner.Sbomer.BuildXPlugin.dll", + "arguments": [ + "handshake" + ] + }, + "capabilities": [ + "generator", + "sbom" + ], + "cas": { + "protocol": "filesystem", + "defaultRoot": "cas", + "compression": "zstd" + }, + "image": { + "name": "stellaops/sbom-indexer", + "digest": null, + "platforms": [ + "linux/amd64", + "linux/arm64" + ] + }, + "metadata": { + "org.stellaops.plugin.kind": "buildx-generator", + "org.stellaops.restart.required": "true" + } +} diff --git a/samples/ci/buildx-demo/Dockerfile b/samples/ci/buildx-demo/Dockerfile new file mode 100644 index 00000000..52f5b15a --- /dev/null +++ b/samples/ci/buildx-demo/Dockerfile @@ -0,0 +1,4 @@ +FROM alpine:3.20 +RUN adduser -S stella && echo "hello" > /app.txt +USER stella +CMD ["/bin/sh","-c","cat /app.txt"] diff --git a/samples/ci/buildx-demo/README.md b/samples/ci/buildx-demo/README.md new file mode 100644 index 00000000..db6b8864 --- /dev/null +++ b/samples/ci/buildx-demo/README.md @@ -0,0 +1,42 @@ +# Buildx SBOM Demo Workflow + +This sample GitHub Actions workflow shows how to run the StellaOps BuildX generator alongside a container build. + +## What it does + +1. Publishes the `StellaOps.Scanner.Sbomer.BuildXPlugin` with the manifest copied beside the binaries. +2. Calls the plug-in `handshake` command to verify the local CAS directory. +3. Builds a tiny Alpine-based image via `docker buildx`. +4. Generates a CycloneDX SBOM from the built image with `docker sbom`. +5. Emits a descriptor + provenance placeholder referencing the freshly generated SBOM with the `descriptor` command. +6. Sends the placeholder to a mock Attestor endpoint and uploads the descriptor, SBOM, and captured request as artefacts. (Swap the mock step with your real Attestor URL + `STELLAOPS_ATTESTOR_TOKEN` secret when ready.) + +## Files + +- `github-actions-buildx-demo.yml` – workflow definition (`workflow_dispatch` + `demo/buildx` branch trigger). +- `Dockerfile` – minimal demo image. +- `github-actions-buildx-demo.yml` now captures a real SBOM via `docker sbom`. + +## Running locally + +```bash +dotnet publish src/StellaOps.Scanner.Sbomer.BuildXPlugin/StellaOps.Scanner.Sbomer.BuildXPlugin.csproj -c Release -o out/buildx + +dotnet out/buildx/StellaOps.Scanner.Sbomer.BuildXPlugin.dll handshake \ + --manifest out/buildx \ + --cas out/cas + +docker buildx build --load -t stellaops/buildx-demo:ci samples/ci/buildx-demo +DIGEST=$(docker image inspect stellaops/buildx-demo:ci --format '{{index .RepoDigests 0}}') + +docker sbom stellaops/buildx-demo:ci --format cyclonedx-json > out/buildx-sbom.cdx.json + +dotnet out/buildx/StellaOps.Scanner.Sbomer.BuildXPlugin.dll descriptor \ + --manifest out/buildx \ + --image "$DIGEST" \ + --sbom out/buildx-sbom.cdx.json \ + --sbom-name buildx-sbom.cdx.json \ + > out/buildx-descriptor.json +``` + +The descriptor JSON contains deterministic annotations and provenance placeholders ready for the Attestor. diff --git a/samples/ci/buildx-demo/github-actions-buildx-demo.yml b/samples/ci/buildx-demo/github-actions-buildx-demo.yml new file mode 100644 index 00000000..1dd4ef9b --- /dev/null +++ b/samples/ci/buildx-demo/github-actions-buildx-demo.yml @@ -0,0 +1,120 @@ +name: Buildx SBOM Demo +on: + workflow_dispatch: + push: + branches: [ demo/buildx ] + +jobs: + buildx-sbom: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Set up .NET 10 preview + uses: actions/setup-dotnet@v4 + with: + dotnet-version: '10.0.x' + + - name: Publish StellaOps BuildX generator + run: | + dotnet publish src/StellaOps.Scanner.Sbomer.BuildXPlugin/StellaOps.Scanner.Sbomer.BuildXPlugin.csproj \ + -c Release \ + -o out/buildx + + - name: Handshake CAS + run: | + dotnet out/buildx/StellaOps.Scanner.Sbomer.BuildXPlugin.dll handshake \ + --manifest out/buildx \ + --cas out/cas + + - name: Build demo container image + run: | + docker buildx build --load -t stellaops/buildx-demo:ci samples/ci/buildx-demo + + - name: Capture image digest + id: digest + run: | + DIGEST=$(docker image inspect stellaops/buildx-demo:ci --format '{{index .RepoDigests 0}}') + echo "digest=$DIGEST" >> "$GITHUB_OUTPUT" + + - name: Generate SBOM from built image + run: | + mkdir -p out + docker sbom stellaops/buildx-demo:ci --format cyclonedx-json > out/buildx-sbom.cdx.json + + - name: Start mock Attestor + id: attestor + run: | + mkdir -p out + cat <<'PY' > out/mock-attestor.py +import json +import os +from http.server import BaseHTTPRequestHandler, HTTPServer + +class Handler(BaseHTTPRequestHandler): + def do_POST(self): + length = int(self.headers.get('Content-Length') or 0) + body = self.rfile.read(length) + with open(os.path.join('out', 'provenance-request.json'), 'wb') as fp: + fp.write(body) + self.send_response(202) + self.end_headers() + self.wfile.write(b'accepted') + + def log_message(self, format, *args): + return + +if __name__ == '__main__': + server = HTTPServer(('127.0.0.1', 8085), Handler) + try: + server.serve_forever() + except KeyboardInterrupt: + pass + finally: + server.server_close() +PY + touch out/provenance-request.json + python3 out/mock-attestor.py & + echo $! > out/mock-attestor.pid + + - name: Emit descriptor with provenance placeholder + env: + IMAGE_DIGEST: ${{ steps.digest.outputs.digest }} + # Uncomment the next line and remove the mock Attestor block to hit a real service. + # STELLAOPS_ATTESTOR_TOKEN: ${{ secrets.STELLAOPS_ATTESTOR_TOKEN }} + run: | + dotnet out/buildx/StellaOps.Scanner.Sbomer.BuildXPlugin.dll descriptor \ + --manifest out/buildx \ + --image "$IMAGE_DIGEST" \ + --sbom out/buildx-sbom.cdx.json \ + --sbom-name buildx-sbom.cdx.json \ + --artifact-type application/vnd.stellaops.sbom.layer+json \ + --sbom-format cyclonedx-json \ + --sbom-kind inventory \ + --repository ${{ github.repository }} \ + --build-ref ${{ github.sha }} \ + --attestor http://127.0.0.1:8085/provenance \ + > out/buildx-descriptor.json + + - name: Stop mock Attestor + if: always() + run: | + if [ -f out/mock-attestor.pid ]; then + kill $(cat out/mock-attestor.pid) + fi + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: stellaops-buildx-demo + path: | + out/buildx-descriptor.json + out/buildx-sbom.cdx.json + out/provenance-request.json + + - name: Show descriptor summary + run: | + cat out/buildx-descriptor.json diff --git a/src/Directory.Build.props b/src/Directory.Build.props index 3de1fe85..424371d8 100644 --- a/src/Directory.Build.props +++ b/src/Directory.Build.props @@ -7,6 +7,8 @@ true true true + $([System.IO.Path]::GetFullPath('$(MSBuildThisFileDirectory)..\plugins\scanner\buildx\')) + true diff --git a/src/Directory.Build.targets b/src/Directory.Build.targets index 48393b88..92c102a3 100644 --- a/src/Directory.Build.targets +++ b/src/Directory.Build.targets @@ -30,4 +30,21 @@ + + + + $(ScannerBuildxPluginOutputRoot)\$(MSBuildProjectName) + + + + + + + + + + + + + diff --git a/src/StellaOps.Policy.Tests/PolicyBinderTests.cs b/src/StellaOps.Policy.Tests/PolicyBinderTests.cs new file mode 100644 index 00000000..21b70f8c --- /dev/null +++ b/src/StellaOps.Policy.Tests/PolicyBinderTests.cs @@ -0,0 +1,86 @@ +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using Xunit; + +namespace StellaOps.Policy.Tests; + +public sealed class PolicyBinderTests +{ + [Fact] + public void Bind_ValidYaml_ReturnsSuccess() + { + const string yaml = """ + version: "1.0" + rules: + - name: Block Critical + severity: [Critical] + sources: [NVD] + action: block + """; + + var result = PolicyBinder.Bind(yaml, PolicyDocumentFormat.Yaml); + + Assert.True(result.Success); + Assert.Equal("1.0", result.Document.Version); + Assert.Single(result.Document.Rules); + Assert.Empty(result.Issues); + } + + [Fact] + public void Bind_InvalidSeverity_ReturnsError() + { + const string yaml = """ + version: "1.0" + rules: + - name: Invalid Severity + severity: [Nope] + action: block + """; + + var result = PolicyBinder.Bind(yaml, PolicyDocumentFormat.Yaml); + + Assert.False(result.Success); + Assert.Contains(result.Issues, issue => issue.Code == "policy.severity.invalid"); + } + + [Fact] + public async Task Cli_StrictMode_FailsOnWarnings() + { + const string yaml = """ + version: "1.0" + rules: + - name: Quiet Warning + sources: ["", "NVD"] + action: ignore + """; + + var path = Path.Combine(Path.GetTempPath(), $"policy-{Guid.NewGuid():N}.yaml"); + await File.WriteAllTextAsync(path, yaml); + + try + { + using var output = new StringWriter(); + using var error = new StringWriter(); + var cli = new PolicyValidationCli(output, error); + var options = new PolicyValidationCliOptions + { + Inputs = new[] { path }, + Strict = true, + }; + + var exitCode = await cli.RunAsync(options, CancellationToken.None); + + Assert.Equal(2, exitCode); + Assert.Contains("WARNING", output.ToString()); + } + finally + { + if (File.Exists(path)) + { + File.Delete(path); + } + } + } +} diff --git a/src/StellaOps.Policy.Tests/PolicyPreviewServiceTests.cs b/src/StellaOps.Policy.Tests/PolicyPreviewServiceTests.cs new file mode 100644 index 00000000..7d4cdb44 --- /dev/null +++ b/src/StellaOps.Policy.Tests/PolicyPreviewServiceTests.cs @@ -0,0 +1,166 @@ +using System.Collections.Immutable; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging.Abstractions; +using Microsoft.Extensions.Time.Testing; +using Xunit; + +namespace StellaOps.Policy.Tests; + +public sealed class PolicyPreviewServiceTests +{ + [Fact] + public async Task PreviewAsync_ComputesDiffs_ForBlockingRule() + { + const string yaml = """ +version: "1.0" +rules: + - name: Block Critical + severity: [Critical] + action: block +"""; + + var snapshotRepo = new InMemoryPolicySnapshotRepository(); + var auditRepo = new InMemoryPolicyAuditRepository(); + var timeProvider = new FakeTimeProvider(); + var store = new PolicySnapshotStore(snapshotRepo, auditRepo, timeProvider, NullLogger.Instance); + + await store.SaveAsync(new PolicySnapshotContent(yaml, PolicyDocumentFormat.Yaml, "tester", null, null), CancellationToken.None); + + var service = new PolicyPreviewService(store, NullLogger.Instance); + + var findings = ImmutableArray.Create( + PolicyFinding.Create("finding-1", PolicySeverity.Critical, environment: "prod", source: "NVD"), + PolicyFinding.Create("finding-2", PolicySeverity.Low)); + + var baseline = ImmutableArray.Create( + new PolicyVerdict("finding-1", PolicyVerdictStatus.Pass), + new PolicyVerdict("finding-2", PolicyVerdictStatus.Pass)); + + var response = await service.PreviewAsync(new PolicyPreviewRequest( + "sha256:abc", + findings, + baseline), + CancellationToken.None); + + Assert.True(response.Success); + Assert.Equal(1, response.ChangedCount); + var diff1 = Assert.Single(response.Diffs.Where(diff => diff.Projected.FindingId == "finding-1")); + Assert.Equal(PolicyVerdictStatus.Pass, diff1.Baseline.Status); + Assert.Equal(PolicyVerdictStatus.Blocked, diff1.Projected.Status); + Assert.Equal("Block Critical", diff1.Projected.RuleName); + Assert.True(diff1.Projected.Score > 0); + Assert.Equal(PolicyScoringConfig.Default.Version, diff1.Projected.ConfigVersion); + Assert.Equal(PolicyVerdictStatus.Pass, response.Diffs.First(diff => diff.Projected.FindingId == "finding-2").Projected.Status); + } + + [Fact] + public async Task PreviewAsync_UsesProposedPolicy_WhenProvided() + { + const string yaml = """ +version: "1.0" +rules: + - name: Ignore Dev + environments: [dev] + action: + type: ignore + justification: dev waiver +"""; + + var snapshotRepo = new InMemoryPolicySnapshotRepository(); + var auditRepo = new InMemoryPolicyAuditRepository(); + var store = new PolicySnapshotStore(snapshotRepo, auditRepo, TimeProvider.System, NullLogger.Instance); + var service = new PolicyPreviewService(store, NullLogger.Instance); + + var findings = ImmutableArray.Create( + PolicyFinding.Create("finding-1", PolicySeverity.Medium, environment: "dev")); + + var baseline = ImmutableArray.Create(new PolicyVerdict("finding-1", PolicyVerdictStatus.Blocked)); + + var response = await service.PreviewAsync(new PolicyPreviewRequest( + "sha256:def", + findings, + baseline, + SnapshotOverride: null, + ProposedPolicy: new PolicySnapshotContent(yaml, PolicyDocumentFormat.Yaml, "tester", null, "dev override")), + CancellationToken.None); + + Assert.True(response.Success); + var diff = Assert.Single(response.Diffs); + Assert.Equal(PolicyVerdictStatus.Blocked, diff.Baseline.Status); + Assert.Equal(PolicyVerdictStatus.Ignored, diff.Projected.Status); + Assert.Equal("Ignore Dev", diff.Projected.RuleName); + Assert.True(diff.Projected.Score >= 0); + Assert.Equal(1, response.ChangedCount); + } + + [Fact] + public async Task PreviewAsync_ReturnsIssues_WhenPolicyInvalid() + { + var snapshotRepo = new InMemoryPolicySnapshotRepository(); + var auditRepo = new InMemoryPolicyAuditRepository(); + var store = new PolicySnapshotStore(snapshotRepo, auditRepo, TimeProvider.System, NullLogger.Instance); + var service = new PolicyPreviewService(store, NullLogger.Instance); + + const string invalid = "version: 1.0"; + var request = new PolicyPreviewRequest( + "sha256:ghi", + ImmutableArray.Empty, + ImmutableArray.Empty, + SnapshotOverride: null, + ProposedPolicy: new PolicySnapshotContent(invalid, PolicyDocumentFormat.Yaml, null, null, null)); + + var response = await service.PreviewAsync(request, CancellationToken.None); + + Assert.False(response.Success); + Assert.NotEmpty(response.Issues); + } + + [Fact] + public async Task PreviewAsync_QuietWithoutVexDowngradesToWarn() + { + const string yaml = """ +version: "1.0" +rules: + - name: Quiet Without VEX + severity: [Low] + quiet: true + action: + type: ignore +"""; + + var binding = PolicyBinder.Bind(yaml, PolicyDocumentFormat.Yaml); + Assert.True(binding.Success); + Assert.Empty(binding.Issues); + Assert.False(binding.Document.Rules[0].Metadata.ContainsKey("quiet")); + Assert.True(binding.Document.Rules[0].Action.Quiet); + + var store = new PolicySnapshotStore(new InMemoryPolicySnapshotRepository(), new InMemoryPolicyAuditRepository(), TimeProvider.System, NullLogger.Instance); + await store.SaveAsync(new PolicySnapshotContent(yaml, PolicyDocumentFormat.Yaml, "tester", null, "quiet test"), CancellationToken.None); + var snapshot = await store.GetLatestAsync(); + Assert.NotNull(snapshot); + Assert.True(snapshot!.Document.Rules[0].Action.Quiet); + Assert.Null(snapshot.Document.Rules[0].Action.RequireVex); + Assert.Equal(PolicyActionType.Ignore, snapshot.Document.Rules[0].Action.Type); + var manualVerdict = PolicyEvaluation.EvaluateFinding(snapshot.Document, snapshot.ScoringConfig, PolicyFinding.Create("finding-quiet", PolicySeverity.Low)); + Assert.Equal(PolicyVerdictStatus.Warned, manualVerdict.Status); + + var service = new PolicyPreviewService(store, NullLogger.Instance); + + var findings = ImmutableArray.Create(PolicyFinding.Create("finding-quiet", PolicySeverity.Low)); + var baseline = ImmutableArray.Empty; + + var response = await service.PreviewAsync(new PolicyPreviewRequest( + "sha256:quiet", + findings, + baseline), + CancellationToken.None); + + Assert.True(response.Success); + var verdict = Assert.Single(response.Diffs).Projected; + Assert.Equal(PolicyVerdictStatus.Warned, verdict.Status); + Assert.Contains("requireVex", verdict.Notes, System.StringComparison.OrdinalIgnoreCase); + Assert.True(verdict.Score >= 0); + } +} diff --git a/src/StellaOps.Policy.Tests/PolicyScoringConfigTests.cs b/src/StellaOps.Policy.Tests/PolicyScoringConfigTests.cs new file mode 100644 index 00000000..fae02586 --- /dev/null +++ b/src/StellaOps.Policy.Tests/PolicyScoringConfigTests.cs @@ -0,0 +1,26 @@ +using System.Threading.Tasks; +using Xunit; + +namespace StellaOps.Policy.Tests; + +public sealed class PolicyScoringConfigTests +{ + [Fact] + public void LoadDefaultReturnsConfig() + { + var config = PolicyScoringConfigBinder.LoadDefault(); + Assert.NotNull(config); + Assert.Equal("1.0", config.Version); + Assert.NotEmpty(config.SeverityWeights); + Assert.True(config.SeverityWeights.ContainsKey(PolicySeverity.Critical)); + Assert.True(config.QuietPenalty > 0); + } + + [Fact] + public void BindRejectsEmptyContent() + { + var result = PolicyScoringConfigBinder.Bind(string.Empty, PolicyDocumentFormat.Json); + Assert.False(result.Success); + Assert.NotEmpty(result.Issues); + } +} diff --git a/src/StellaOps.Policy.Tests/PolicySnapshotStoreTests.cs b/src/StellaOps.Policy.Tests/PolicySnapshotStoreTests.cs new file mode 100644 index 00000000..e109af83 --- /dev/null +++ b/src/StellaOps.Policy.Tests/PolicySnapshotStoreTests.cs @@ -0,0 +1,94 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging.Abstractions; +using Microsoft.Extensions.Time.Testing; +using Xunit; + +namespace StellaOps.Policy.Tests; + +public sealed class PolicySnapshotStoreTests +{ + private const string BasePolicyYaml = """ +version: "1.0" +rules: + - name: Block Critical + severity: [Critical] + action: block +"""; + + [Fact] + public async Task SaveAsync_CreatesNewSnapshotAndAuditEntry() + { + var snapshotRepo = new InMemoryPolicySnapshotRepository(); + var auditRepo = new InMemoryPolicyAuditRepository(); + var timeProvider = new FakeTimeProvider(new DateTimeOffset(2025, 10, 18, 10, 0, 0, TimeSpan.Zero)); + var store = new PolicySnapshotStore(snapshotRepo, auditRepo, timeProvider, NullLogger.Instance); + + var content = new PolicySnapshotContent(BasePolicyYaml, PolicyDocumentFormat.Yaml, "cli", "test", null); + + var result = await store.SaveAsync(content, CancellationToken.None); + + Assert.True(result.Success); + Assert.True(result.Created); + Assert.NotNull(result.Snapshot); + Assert.Equal("rev-1", result.Snapshot!.RevisionId); + Assert.Equal(result.Digest, result.Snapshot.Digest); + Assert.Equal(timeProvider.GetUtcNow(), result.Snapshot.CreatedAt); + Assert.Equal(PolicyScoringConfig.Default.Version, result.Snapshot.ScoringConfig.Version); + + var latest = await store.GetLatestAsync(); + Assert.Equal(result.Snapshot, latest); + + var audits = await auditRepo.ListAsync(10); + Assert.Single(audits); + Assert.Equal(result.Digest, audits[0].Digest); + Assert.Equal("snapshot.created", audits[0].Action); + Assert.Equal("rev-1", audits[0].RevisionId); + } + + [Fact] + public async Task SaveAsync_DoesNotCreateNewRevisionWhenDigestUnchanged() + { + var snapshotRepo = new InMemoryPolicySnapshotRepository(); + var auditRepo = new InMemoryPolicyAuditRepository(); + var timeProvider = new FakeTimeProvider(new DateTimeOffset(2025, 10, 18, 10, 0, 0, TimeSpan.Zero)); + var store = new PolicySnapshotStore(snapshotRepo, auditRepo, timeProvider, NullLogger.Instance); + + var content = new PolicySnapshotContent(BasePolicyYaml, PolicyDocumentFormat.Yaml, "cli", "test", null); + var first = await store.SaveAsync(content, CancellationToken.None); + Assert.True(first.Created); + + timeProvider.Advance(TimeSpan.FromHours(1)); + var second = await store.SaveAsync(content, CancellationToken.None); + + Assert.True(second.Success); + Assert.False(second.Created); + Assert.Equal(first.Digest, second.Digest); + Assert.Equal("rev-1", second.Snapshot!.RevisionId); + Assert.Equal(PolicyScoringConfig.Default.Version, second.Snapshot.ScoringConfig.Version); + + var audits = await auditRepo.ListAsync(10); + Assert.Single(audits); + } + + [Fact] + public async Task SaveAsync_ReturnsFailureWhenValidationFails() + { + var snapshotRepo = new InMemoryPolicySnapshotRepository(); + var auditRepo = new InMemoryPolicyAuditRepository(); + var store = new PolicySnapshotStore(snapshotRepo, auditRepo, TimeProvider.System, NullLogger.Instance); + + const string invalidYaml = "version: '1.0'\nrules: []"; + var content = new PolicySnapshotContent(invalidYaml, PolicyDocumentFormat.Yaml, null, null, null); + + var result = await store.SaveAsync(content, CancellationToken.None); + + Assert.False(result.Success); + Assert.False(result.Created); + Assert.Null(result.Snapshot); + + var audits = await auditRepo.ListAsync(5); + Assert.Empty(audits); + } +} diff --git a/src/StellaOps.Policy.Tests/StellaOps.Policy.Tests.csproj b/src/StellaOps.Policy.Tests/StellaOps.Policy.Tests.csproj new file mode 100644 index 00000000..e36ed5ce --- /dev/null +++ b/src/StellaOps.Policy.Tests/StellaOps.Policy.Tests.csproj @@ -0,0 +1,13 @@ + + + net10.0 + preview + enable + enable + true + + + + + + diff --git a/src/StellaOps.Policy/Audit/IPolicyAuditRepository.cs b/src/StellaOps.Policy/Audit/IPolicyAuditRepository.cs new file mode 100644 index 00000000..fdc8913c --- /dev/null +++ b/src/StellaOps.Policy/Audit/IPolicyAuditRepository.cs @@ -0,0 +1,12 @@ +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Policy; + +public interface IPolicyAuditRepository +{ + Task AddAsync(PolicyAuditEntry entry, CancellationToken cancellationToken = default); + + Task> ListAsync(int limit, CancellationToken cancellationToken = default); +} diff --git a/src/StellaOps.Policy/Audit/InMemoryPolicyAuditRepository.cs b/src/StellaOps.Policy/Audit/InMemoryPolicyAuditRepository.cs new file mode 100644 index 00000000..b85e97bb --- /dev/null +++ b/src/StellaOps.Policy/Audit/InMemoryPolicyAuditRepository.cs @@ -0,0 +1,52 @@ +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Policy; + +public sealed class InMemoryPolicyAuditRepository : IPolicyAuditRepository +{ + private readonly List _entries = new(); + private readonly SemaphoreSlim _mutex = new(1, 1); + + public async Task AddAsync(PolicyAuditEntry entry, CancellationToken cancellationToken = default) + { + if (entry is null) + { + throw new ArgumentNullException(nameof(entry)); + } + + await _mutex.WaitAsync(cancellationToken).ConfigureAwait(false); + try + { + _entries.Add(entry); + _entries.Sort(static (left, right) => left.CreatedAt.CompareTo(right.CreatedAt)); + } + finally + { + _mutex.Release(); + } + } + + public async Task> ListAsync(int limit, CancellationToken cancellationToken = default) + { + await _mutex.WaitAsync(cancellationToken).ConfigureAwait(false); + try + { + IEnumerable query = _entries; + if (limit > 0) + { + query = query.TakeLast(limit); + } + + return query.ToImmutableArray(); + } + finally + { + _mutex.Release(); + } + } +} diff --git a/src/StellaOps.Policy/PolicyAuditEntry.cs b/src/StellaOps.Policy/PolicyAuditEntry.cs new file mode 100644 index 00000000..7543bc13 --- /dev/null +++ b/src/StellaOps.Policy/PolicyAuditEntry.cs @@ -0,0 +1,12 @@ +using System; + +namespace StellaOps.Policy; + +public sealed record PolicyAuditEntry( + Guid Id, + DateTimeOffset CreatedAt, + string Action, + string RevisionId, + string Digest, + string? Actor, + string Message); diff --git a/src/StellaOps.Policy/PolicyBinder.cs b/src/StellaOps.Policy/PolicyBinder.cs new file mode 100644 index 00000000..2cc30b47 --- /dev/null +++ b/src/StellaOps.Policy/PolicyBinder.cs @@ -0,0 +1,913 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Globalization; +using System.IO; +using System.Linq; +using System.Text; +using System.Text.Json; +using System.Text.Json.Nodes; +using System.Text.Json.Serialization; +using YamlDotNet.Serialization; +using YamlDotNet.Serialization.NamingConventions; + +namespace StellaOps.Policy; + +public enum PolicyDocumentFormat +{ + Json, + Yaml, +} + +public sealed record PolicyBindingResult( + bool Success, + PolicyDocument Document, + ImmutableArray Issues, + PolicyDocumentFormat Format); + +public static class PolicyBinder +{ + private static readonly JsonSerializerOptions SerializerOptions = new() + { + PropertyNameCaseInsensitive = true, + ReadCommentHandling = JsonCommentHandling.Skip, + AllowTrailingCommas = true, + NumberHandling = JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString, + Converters = + { + new JsonStringEnumConverter() + }, + }; + + private static readonly IDeserializer YamlDeserializer = new DeserializerBuilder() + .WithNamingConvention(CamelCaseNamingConvention.Instance) + .IgnoreUnmatchedProperties() + .Build(); + + public static PolicyBindingResult Bind(string content, PolicyDocumentFormat format) + { + if (string.IsNullOrWhiteSpace(content)) + { + var issues = ImmutableArray.Create( + PolicyIssue.Error("policy.empty", "Policy document is empty.", "$")); + return new PolicyBindingResult(false, PolicyDocument.Empty, issues, format); + } + + try + { + var node = ParseToNode(content, format); + if (node is not JsonObject obj) + { + var issues = ImmutableArray.Create( + PolicyIssue.Error("policy.document.invalid", "Policy document must be an object.", "$")); + return new PolicyBindingResult(false, PolicyDocument.Empty, issues, format); + } + + var model = obj.Deserialize(SerializerOptions) ?? new PolicyDocumentModel(); + var normalization = PolicyNormalizer.Normalize(model); + var success = normalization.Issues.All(static issue => issue.Severity != PolicyIssueSeverity.Error); + return new PolicyBindingResult(success, normalization.Document, normalization.Issues, format); + } + catch (JsonException ex) + { + var issues = ImmutableArray.Create( + PolicyIssue.Error("policy.parse.json", $"Failed to parse policy JSON: {ex.Message}", "$")); + return new PolicyBindingResult(false, PolicyDocument.Empty, issues, format); + } + catch (YamlDotNet.Core.YamlException ex) + { + var issues = ImmutableArray.Create( + PolicyIssue.Error("policy.parse.yaml", $"Failed to parse policy YAML: {ex.Message}", "$")); + return new PolicyBindingResult(false, PolicyDocument.Empty, issues, format); + } + } + + public static PolicyBindingResult Bind(Stream stream, PolicyDocumentFormat format, Encoding? encoding = null) + { + if (stream is null) + { + throw new ArgumentNullException(nameof(stream)); + } + + encoding ??= Encoding.UTF8; + using var reader = new StreamReader(stream, encoding, detectEncodingFromByteOrderMarks: true, leaveOpen: true); + var content = reader.ReadToEnd(); + return Bind(content, format); + } + + private static JsonNode? ParseToNode(string content, PolicyDocumentFormat format) + { + return format switch + { + PolicyDocumentFormat.Json => JsonNode.Parse(content, documentOptions: new JsonDocumentOptions + { + AllowTrailingCommas = true, + CommentHandling = JsonCommentHandling.Skip, + }), + PolicyDocumentFormat.Yaml => ConvertYamlToJsonNode(content), + _ => throw new ArgumentOutOfRangeException(nameof(format), format, "Unsupported policy document format."), + }; + } + + private static JsonNode? ConvertYamlToJsonNode(string content) + { + var yamlObject = YamlDeserializer.Deserialize(content); + return ConvertYamlObject(yamlObject); + } + + private static JsonNode? ConvertYamlObject(object? value) + { + switch (value) + { + case null: + return null; + case string s: + return JsonValue.Create(s); + case bool b: + return JsonValue.Create(b); + case sbyte or byte or short or ushort or int or uint or long or ulong or float or double or decimal: + return JsonValue.Create(Convert.ToDecimal(value, CultureInfo.InvariantCulture)); + case DateTime dt: + return JsonValue.Create(dt.ToString("O", CultureInfo.InvariantCulture)); + case DateTimeOffset dto: + return JsonValue.Create(dto.ToString("O", CultureInfo.InvariantCulture)); + case Enum e: + return JsonValue.Create(e.ToString()); + case IDictionary dictionary: + { + var obj = new JsonObject(); + foreach (DictionaryEntry entry in dictionary) + { + if (entry.Key is null) + { + continue; + } + + var key = Convert.ToString(entry.Key, CultureInfo.InvariantCulture); + if (string.IsNullOrWhiteSpace(key)) + { + continue; + } + + obj[key!] = ConvertYamlObject(entry.Value); + } + + return obj; + } + case IEnumerable enumerable: + { + var array = new JsonArray(); + foreach (var item in enumerable) + { + array.Add(ConvertYamlObject(item)); + } + + return array; + } + default: + return JsonValue.Create(value.ToString()); + } + } + + private sealed record PolicyDocumentModel + { + [JsonPropertyName("version")] + public JsonNode? Version { get; init; } + + [JsonPropertyName("description")] + public string? Description { get; init; } + + [JsonPropertyName("metadata")] + public Dictionary? Metadata { get; init; } + + [JsonPropertyName("rules")] + public List? Rules { get; init; } + + [JsonExtensionData] + public Dictionary? Extensions { get; init; } + } + + private sealed record PolicyRuleModel + { + [JsonPropertyName("id")] + public string? Identifier { get; init; } + + [JsonPropertyName("name")] + public string? Name { get; init; } + + [JsonPropertyName("description")] + public string? Description { get; init; } + + [JsonPropertyName("severity")] + public List? Severity { get; init; } + + [JsonPropertyName("sources")] + public List? Sources { get; init; } + + [JsonPropertyName("vendors")] + public List? Vendors { get; init; } + + [JsonPropertyName("licenses")] + public List? Licenses { get; init; } + + [JsonPropertyName("tags")] + public List? Tags { get; init; } + + [JsonPropertyName("environments")] + public List? Environments { get; init; } + + [JsonPropertyName("images")] + public List? Images { get; init; } + + [JsonPropertyName("repositories")] + public List? Repositories { get; init; } + + [JsonPropertyName("packages")] + public List? Packages { get; init; } + + [JsonPropertyName("purls")] + public List? Purls { get; init; } + + [JsonPropertyName("cves")] + public List? Cves { get; init; } + + [JsonPropertyName("paths")] + public List? Paths { get; init; } + + [JsonPropertyName("layerDigests")] + public List? LayerDigests { get; init; } + + [JsonPropertyName("usedByEntrypoint")] + public List? UsedByEntrypoint { get; init; } + + [JsonPropertyName("action")] + public JsonNode? Action { get; init; } + + [JsonPropertyName("expires")] + public JsonNode? Expires { get; init; } + + [JsonPropertyName("until")] + public JsonNode? Until { get; init; } + + [JsonPropertyName("justification")] + public string? Justification { get; init; } + + [JsonPropertyName("quiet")] + public bool? Quiet { get; init; } + + [JsonPropertyName("metadata")] + public Dictionary? Metadata { get; init; } + + [JsonExtensionData] + public Dictionary? Extensions { get; init; } + } + + private sealed class PolicyNormalizer + { + private static readonly ImmutableDictionary SeverityMap = + new Dictionary(StringComparer.OrdinalIgnoreCase) + { + ["critical"] = PolicySeverity.Critical, + ["high"] = PolicySeverity.High, + ["medium"] = PolicySeverity.Medium, + ["moderate"] = PolicySeverity.Medium, + ["low"] = PolicySeverity.Low, + ["informational"] = PolicySeverity.Informational, + ["info"] = PolicySeverity.Informational, + ["none"] = PolicySeverity.None, + ["unknown"] = PolicySeverity.Unknown, + }.ToImmutableDictionary(StringComparer.OrdinalIgnoreCase); + + public static (PolicyDocument Document, ImmutableArray Issues) Normalize(PolicyDocumentModel model) + { + var issues = ImmutableArray.CreateBuilder(); + + var version = NormalizeVersion(model.Version, issues); + var metadata = NormalizeMetadata(model.Metadata, "$.metadata", issues); + var rules = NormalizeRules(model.Rules, issues); + + if (model.Extensions is { Count: > 0 }) + { + foreach (var pair in model.Extensions) + { + issues.Add(PolicyIssue.Warning( + "policy.document.extension", + $"Unrecognized document property '{pair.Key}' has been ignored.", + $"$.{pair.Key}")); + } + } + + var document = new PolicyDocument( + version ?? PolicySchema.CurrentVersion, + rules, + metadata); + + var orderedIssues = SortIssues(issues); + return (document, orderedIssues); + } + + private static string? NormalizeVersion(JsonNode? versionNode, ImmutableArray.Builder issues) + { + if (versionNode is null) + { + issues.Add(PolicyIssue.Warning("policy.version.missing", "Policy version not specified; defaulting to 1.0.", "$.version")); + return PolicySchema.CurrentVersion; + } + + if (versionNode is JsonValue value) + { + if (value.TryGetValue(out string? versionText)) + { + versionText = versionText?.Trim(); + if (string.IsNullOrEmpty(versionText)) + { + issues.Add(PolicyIssue.Error("policy.version.empty", "Policy version is empty.", "$.version")); + return null; + } + + if (IsSupportedVersion(versionText)) + { + return CanonicalizeVersion(versionText); + } + + issues.Add(PolicyIssue.Error("policy.version.unsupported", $"Unsupported policy version '{versionText}'. Expected '{PolicySchema.CurrentVersion}'.", "$.version")); + return null; + } + + if (value.TryGetValue(out double numericVersion)) + { + var numericText = numericVersion.ToString("0.0###", CultureInfo.InvariantCulture); + if (IsSupportedVersion(numericText)) + { + return CanonicalizeVersion(numericText); + } + + issues.Add(PolicyIssue.Error("policy.version.unsupported", $"Unsupported policy version '{numericText}'.", "$.version")); + return null; + } + } + + var raw = versionNode.ToJsonString(); + issues.Add(PolicyIssue.Error("policy.version.invalid", $"Policy version must be a string. Received: {raw}", "$.version")); + return null; + } + + private static bool IsSupportedVersion(string versionText) + => string.Equals(versionText, "1", StringComparison.OrdinalIgnoreCase) + || string.Equals(versionText, "1.0", StringComparison.OrdinalIgnoreCase) + || string.Equals(versionText, PolicySchema.CurrentVersion, StringComparison.OrdinalIgnoreCase); + + private static string CanonicalizeVersion(string versionText) + => string.Equals(versionText, "1", StringComparison.OrdinalIgnoreCase) + ? "1.0" + : versionText; + + private static ImmutableDictionary NormalizeMetadata( + Dictionary? metadata, + string path, + ImmutableArray.Builder issues) + { + if (metadata is null || metadata.Count == 0) + { + return ImmutableDictionary.Empty; + } + + var builder = ImmutableDictionary.CreateBuilder(StringComparer.Ordinal); + foreach (var pair in metadata) + { + var key = pair.Key?.Trim(); + if (string.IsNullOrEmpty(key)) + { + issues.Add(PolicyIssue.Warning("policy.metadata.key.empty", "Metadata keys must be non-empty strings.", path)); + continue; + } + + var value = ConvertNodeToString(pair.Value); + builder[key] = value; + } + + return builder.ToImmutable(); + } + + private static ImmutableArray NormalizeRules( + List? rules, + ImmutableArray.Builder issues) + { + if (rules is null || rules.Count == 0) + { + issues.Add(PolicyIssue.Error("policy.rules.empty", "At least one rule must be defined.", "$.rules")); + return ImmutableArray.Empty; + } + + var normalized = new List<(PolicyRule Rule, int Index)>(rules.Count); + var seenNames = new HashSet(StringComparer.OrdinalIgnoreCase); + + for (var index = 0; index < rules.Count; index++) + { + var model = rules[index]; + var normalizedRule = NormalizeRule(model, index, issues); + if (normalizedRule is null) + { + continue; + } + + if (!seenNames.Add(normalizedRule.Name)) + { + issues.Add(PolicyIssue.Warning( + "policy.rules.duplicateName", + $"Duplicate rule name '{normalizedRule.Name}' detected; evaluation order may be ambiguous.", + $"$.rules[{index}].name")); + } + + normalized.Add((normalizedRule, index)); + } + + return normalized + .OrderBy(static tuple => tuple.Rule.Name, StringComparer.OrdinalIgnoreCase) + .ThenBy(static tuple => tuple.Rule.Identifier ?? string.Empty, StringComparer.OrdinalIgnoreCase) + .ThenBy(static tuple => tuple.Index) + .Select(static tuple => tuple.Rule) + .ToImmutableArray(); + } + + private static PolicyRule? NormalizeRule( + PolicyRuleModel model, + int index, + ImmutableArray.Builder issues) + { + var basePath = $"$.rules[{index}]"; + + var name = NormalizeRequiredString(model.Name, $"{basePath}.name", "Rule name", issues); + if (name is null) + { + return null; + } + + var identifier = NormalizeOptionalString(model.Identifier); + var description = NormalizeOptionalString(model.Description); + var metadata = NormalizeMetadata(model.Metadata, $"{basePath}.metadata", issues); + + var severities = NormalizeSeverityList(model.Severity, $"{basePath}.severity", issues); + var environments = NormalizeStringList(model.Environments, $"{basePath}.environments", issues); + var sources = NormalizeStringList(model.Sources, $"{basePath}.sources", issues); + var vendors = NormalizeStringList(model.Vendors, $"{basePath}.vendors", issues); + var licenses = NormalizeStringList(model.Licenses, $"{basePath}.licenses", issues); + var tags = NormalizeStringList(model.Tags, $"{basePath}.tags", issues); + + var match = new PolicyRuleMatchCriteria( + NormalizeStringList(model.Images, $"{basePath}.images", issues), + NormalizeStringList(model.Repositories, $"{basePath}.repositories", issues), + NormalizeStringList(model.Packages, $"{basePath}.packages", issues), + NormalizeStringList(model.Purls, $"{basePath}.purls", issues), + NormalizeStringList(model.Cves, $"{basePath}.cves", issues), + NormalizeStringList(model.Paths, $"{basePath}.paths", issues), + NormalizeStringList(model.LayerDigests, $"{basePath}.layerDigests", issues), + NormalizeStringList(model.UsedByEntrypoint, $"{basePath}.usedByEntrypoint", issues)); + + var action = NormalizeAction(model, basePath, issues); + var justification = NormalizeOptionalString(model.Justification); + var expires = NormalizeTemporal(model.Expires ?? model.Until, $"{basePath}.expires", issues); + + if (model.Extensions is { Count: > 0 }) + { + foreach (var pair in model.Extensions) + { + issues.Add(PolicyIssue.Warning( + "policy.rule.extension", + $"Unrecognized rule property '{pair.Key}' has been ignored.", + $"{basePath}.{pair.Key}")); + } + } + + return PolicyRule.Create( + name, + action, + severities, + environments, + sources, + vendors, + licenses, + tags, + match, + expires, + justification, + identifier, + description, + metadata); + } + + private static PolicyAction NormalizeAction( + PolicyRuleModel model, + string basePath, + ImmutableArray.Builder issues) + { + var actionNode = model.Action; + var quiet = model.Quiet ?? false; + if (!quiet && model.Extensions is not null && model.Extensions.TryGetValue("quiet", out var quietExtension) && quietExtension.ValueKind == JsonValueKind.True) + { + quiet = true; + } + string? justification = NormalizeOptionalString(model.Justification); + DateTimeOffset? until = NormalizeTemporal(model.Until, $"{basePath}.until", issues); + DateTimeOffset? expires = NormalizeTemporal(model.Expires, $"{basePath}.expires", issues); + + var effectiveUntil = until ?? expires; + + if (actionNode is null) + { + issues.Add(PolicyIssue.Error("policy.action.missing", "Rule action is required.", $"{basePath}.action")); + return new PolicyAction(PolicyActionType.Block, null, null, null, Quiet: false); + } + + string? actionType = null; + JsonObject? actionObject = null; + + switch (actionNode) + { + case JsonValue value when value.TryGetValue(out string? text): + actionType = text; + break; + case JsonValue value when value.TryGetValue(out bool booleanValue): + actionType = booleanValue ? "block" : "ignore"; + break; + case JsonObject obj: + actionObject = obj; + if (obj.TryGetPropertyValue("type", out var typeNode) && typeNode is JsonValue typeValue && typeValue.TryGetValue(out string? typeText)) + { + actionType = typeText; + } + else + { + issues.Add(PolicyIssue.Error("policy.action.type", "Action object must contain a 'type' property.", $"{basePath}.action.type")); + } + + if (obj.TryGetPropertyValue("quiet", out var quietNode) && quietNode is JsonValue quietValue && quietValue.TryGetValue(out bool quietFlag)) + { + quiet = quietFlag; + } + + if (obj.TryGetPropertyValue("until", out var untilNode)) + { + effectiveUntil ??= NormalizeTemporal(untilNode, $"{basePath}.action.until", issues); + } + + if (obj.TryGetPropertyValue("justification", out var justificationNode) && justificationNode is JsonValue justificationValue && justificationValue.TryGetValue(out string? justificationText)) + { + justification = NormalizeOptionalString(justificationText); + } + + break; + default: + actionType = actionNode.ToString(); + break; + } + + if (string.IsNullOrWhiteSpace(actionType)) + { + issues.Add(PolicyIssue.Error("policy.action.type", "Action type is required.", $"{basePath}.action")); + return new PolicyAction(PolicyActionType.Block, null, null, null, Quiet: quiet); + } + + actionType = actionType.Trim(); + var (type, typeIssues) = MapActionType(actionType, $"{basePath}.action"); + foreach (var issue in typeIssues) + { + issues.Add(issue); + } + + PolicyIgnoreOptions? ignoreOptions = null; + PolicyEscalateOptions? escalateOptions = null; + PolicyRequireVexOptions? requireVexOptions = null; + + if (type == PolicyActionType.Ignore) + { + ignoreOptions = new PolicyIgnoreOptions(effectiveUntil, justification); + } + else if (type == PolicyActionType.Escalate) + { + escalateOptions = NormalizeEscalateOptions(actionObject, $"{basePath}.action", issues); + } + else if (type == PolicyActionType.RequireVex) + { + requireVexOptions = NormalizeRequireVexOptions(actionObject, $"{basePath}.action", issues); + } + + return new PolicyAction(type, ignoreOptions, escalateOptions, requireVexOptions, quiet); + } + + private static (PolicyActionType Type, ImmutableArray Issues) MapActionType(string value, string path) + { + var issues = ImmutableArray.Empty; + var lower = value.ToLowerInvariant(); + return lower switch + { + "block" or "fail" or "deny" => (PolicyActionType.Block, issues), + "ignore" or "mute" => (PolicyActionType.Ignore, issues), + "warn" or "warning" => (PolicyActionType.Warn, issues), + "defer" => (PolicyActionType.Defer, issues), + "escalate" => (PolicyActionType.Escalate, issues), + "requirevex" or "require_vex" or "require-vex" => (PolicyActionType.RequireVex, issues), + _ => (PolicyActionType.Block, ImmutableArray.Create(PolicyIssue.Warning( + "policy.action.unknown", + $"Unknown action '{value}' encountered. Defaulting to 'block'.", + path))), + }; + } + + private static PolicyEscalateOptions? NormalizeEscalateOptions( + JsonObject? actionObject, + string path, + ImmutableArray.Builder issues) + { + if (actionObject is null) + { + return null; + } + + PolicySeverity? minSeverity = null; + bool requireKev = false; + double? minEpss = null; + + if (actionObject.TryGetPropertyValue("severity", out var severityNode) && severityNode is JsonValue severityValue && severityValue.TryGetValue(out string? severityText)) + { + if (SeverityMap.TryGetValue(severityText ?? string.Empty, out var mapped)) + { + minSeverity = mapped; + } + else + { + issues.Add(PolicyIssue.Warning("policy.action.escalate.severity", $"Unknown escalate severity '{severityText}'.", $"{path}.severity")); + } + } + + if (actionObject.TryGetPropertyValue("kev", out var kevNode) && kevNode is JsonValue kevValue && kevValue.TryGetValue(out bool kevFlag)) + { + requireKev = kevFlag; + } + + if (actionObject.TryGetPropertyValue("epss", out var epssNode)) + { + var parsed = ParseDouble(epssNode, $"{path}.epss", issues); + if (parsed is { } epssValue) + { + if (epssValue < 0 || epssValue > 1) + { + issues.Add(PolicyIssue.Warning("policy.action.escalate.epssRange", "EPS score must be between 0 and 1.", $"{path}.epss")); + } + else + { + minEpss = epssValue; + } + } + } + + return new PolicyEscalateOptions(minSeverity, requireKev, minEpss); + } + + private static PolicyRequireVexOptions? NormalizeRequireVexOptions( + JsonObject? actionObject, + string path, + ImmutableArray.Builder issues) + { + if (actionObject is null) + { + return null; + } + + var vendors = ImmutableArray.Empty; + var justifications = ImmutableArray.Empty; + + if (actionObject.TryGetPropertyValue("vendors", out var vendorsNode)) + { + vendors = NormalizeJsonStringArray(vendorsNode, $"{path}.vendors", issues); + } + + if (actionObject.TryGetPropertyValue("justifications", out var justificationsNode)) + { + justifications = NormalizeJsonStringArray(justificationsNode, $"{path}.justifications", issues); + } + + return new PolicyRequireVexOptions(vendors, justifications); + } + + private static ImmutableArray NormalizeStringList( + List? values, + string path, + ImmutableArray.Builder issues) + { + if (values is null || values.Count == 0) + { + return ImmutableArray.Empty; + } + + var builder = ImmutableHashSet.CreateBuilder(StringComparer.OrdinalIgnoreCase); + foreach (var value in values) + { + var normalized = NormalizeOptionalString(value); + if (string.IsNullOrEmpty(normalized)) + { + issues.Add(PolicyIssue.Warning("policy.list.blank", $"Blank entry detected; ignoring value at {path}.", path)); + continue; + } + + builder.Add(normalized); + } + + return builder.ToImmutable() + .OrderBy(static item => item, StringComparer.OrdinalIgnoreCase) + .ToImmutableArray(); + } + + private static ImmutableArray NormalizeSeverityList( + List? values, + string path, + ImmutableArray.Builder issues) + { + if (values is null || values.Count == 0) + { + return ImmutableArray.Empty; + } + + var builder = ImmutableArray.CreateBuilder(); + foreach (var value in values) + { + var normalized = NormalizeOptionalString(value); + if (string.IsNullOrEmpty(normalized)) + { + issues.Add(PolicyIssue.Warning("policy.severity.blank", "Blank severity was ignored.", path)); + continue; + } + + if (SeverityMap.TryGetValue(normalized, out var severity)) + { + builder.Add(severity); + } + else + { + issues.Add(PolicyIssue.Error("policy.severity.invalid", $"Unknown severity '{value}'.", path)); + } + } + + return builder.Distinct().OrderBy(static sev => sev).ToImmutableArray(); + } + + private static ImmutableArray NormalizeJsonStringArray( + JsonNode? node, + string path, + ImmutableArray.Builder issues) + { + if (node is null) + { + return ImmutableArray.Empty; + } + + if (node is JsonArray array) + { + var values = new List(array.Count); + foreach (var element in array) + { + var text = ConvertNodeToString(element); + if (string.IsNullOrWhiteSpace(text)) + { + issues.Add(PolicyIssue.Warning("policy.list.blank", $"Blank entry detected; ignoring value at {path}.", path)); + } + else + { + values.Add(text); + } + } + + return values + .Distinct(StringComparer.OrdinalIgnoreCase) + .OrderBy(static entry => entry, StringComparer.OrdinalIgnoreCase) + .ToImmutableArray(); + } + + var single = ConvertNodeToString(node); + return ImmutableArray.Create(single); + } + + private static double? ParseDouble(JsonNode? node, string path, ImmutableArray.Builder issues) + { + if (node is null) + { + return null; + } + + if (node is JsonValue value) + { + if (value.TryGetValue(out double numeric)) + { + return numeric; + } + + if (value.TryGetValue(out string? text) && double.TryParse(text, NumberStyles.Float, CultureInfo.InvariantCulture, out numeric)) + { + return numeric; + } + } + + issues.Add(PolicyIssue.Warning("policy.number.invalid", $"Value '{node.ToJsonString()}' is not a valid number.", path)); + return null; + } + + private static DateTimeOffset? NormalizeTemporal(JsonNode? node, string path, ImmutableArray.Builder issues) + { + if (node is null) + { + return null; + } + + if (node is JsonValue value) + { + if (value.TryGetValue(out DateTimeOffset dto)) + { + return dto; + } + + if (value.TryGetValue(out DateTime dt)) + { + return new DateTimeOffset(DateTime.SpecifyKind(dt, DateTimeKind.Utc)); + } + + if (value.TryGetValue(out string? text)) + { + if (DateTimeOffset.TryParse(text, CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal | DateTimeStyles.AdjustToUniversal, out var parsed)) + { + return parsed; + } + + if (DateTime.TryParse(text, CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal | DateTimeStyles.AdjustToUniversal, out var parsedDate)) + { + return new DateTimeOffset(parsedDate); + } + } + } + + issues.Add(PolicyIssue.Warning("policy.date.invalid", $"Value '{node.ToJsonString()}' is not a valid ISO-8601 timestamp.", path)); + return null; + } + + private static string? NormalizeRequiredString( + string? value, + string path, + string fieldDescription, + ImmutableArray.Builder issues) + { + var normalized = NormalizeOptionalString(value); + if (!string.IsNullOrEmpty(normalized)) + { + return normalized; + } + + issues.Add(PolicyIssue.Error( + "policy.required", + $"{fieldDescription} is required.", + path)); + return null; + } + + private static string? NormalizeOptionalString(string? value) + { + if (string.IsNullOrWhiteSpace(value)) + { + return null; + } + + return value.Trim(); + } + + private static string ConvertNodeToString(JsonNode? node) + { + if (node is null) + { + return string.Empty; + } + + return node switch + { + JsonValue value when value.TryGetValue(out string? text) => text ?? string.Empty, + JsonValue value when value.TryGetValue(out bool boolean) => boolean ? "true" : "false", + JsonValue value when value.TryGetValue(out double numeric) => numeric.ToString(CultureInfo.InvariantCulture), + JsonObject obj => obj.ToJsonString(), + JsonArray array => array.ToJsonString(), + _ => node.ToJsonString(), + }; + } + + private static ImmutableArray SortIssues(ImmutableArray.Builder issues) + { + return issues.ToImmutable() + .OrderBy(static issue => issue.Severity switch + { + PolicyIssueSeverity.Error => 0, + PolicyIssueSeverity.Warning => 1, + _ => 2, + }) + .ThenBy(static issue => issue.Path, StringComparer.Ordinal) + .ThenBy(static issue => issue.Code, StringComparer.Ordinal) + .ToImmutableArray(); + } + } +} diff --git a/src/StellaOps.Policy/PolicyDiagnostics.cs b/src/StellaOps.Policy/PolicyDiagnostics.cs new file mode 100644 index 00000000..4425ec27 --- /dev/null +++ b/src/StellaOps.Policy/PolicyDiagnostics.cs @@ -0,0 +1,77 @@ +using System; +using System.Collections.Immutable; +using System.Linq; + +namespace StellaOps.Policy; + +public sealed record PolicyDiagnosticsReport( + string Version, + int RuleCount, + int ErrorCount, + int WarningCount, + DateTimeOffset GeneratedAt, + ImmutableArray Issues, + ImmutableArray Recommendations); + +public static class PolicyDiagnostics +{ + public static PolicyDiagnosticsReport Create(PolicyBindingResult bindingResult, TimeProvider? timeProvider = null) + { + if (bindingResult is null) + { + throw new ArgumentNullException(nameof(bindingResult)); + } + + var time = (timeProvider ?? TimeProvider.System).GetUtcNow(); + var errorCount = bindingResult.Issues.Count(static issue => issue.Severity == PolicyIssueSeverity.Error); + var warningCount = bindingResult.Issues.Count(static issue => issue.Severity == PolicyIssueSeverity.Warning); + + var recommendations = BuildRecommendations(bindingResult.Document, errorCount, warningCount); + + return new PolicyDiagnosticsReport( + bindingResult.Document.Version, + bindingResult.Document.Rules.Length, + errorCount, + warningCount, + time, + bindingResult.Issues, + recommendations); + } + + private static ImmutableArray BuildRecommendations(PolicyDocument document, int errorCount, int warningCount) + { + var messages = ImmutableArray.CreateBuilder(); + + if (errorCount > 0) + { + messages.Add("Resolve policy errors before promoting the revision; fallback rules may be applied while errors remain."); + } + + if (warningCount > 0) + { + messages.Add("Review policy warnings and ensure intentional overrides are documented."); + } + + if (document.Rules.Length == 0) + { + messages.Add("Add at least one policy rule to enforce gating logic."); + } + + var quietRules = document.Rules + .Where(static rule => rule.Action.Quiet) + .Select(static rule => rule.Name) + .ToArray(); + + if (quietRules.Length > 0) + { + messages.Add($"Quiet rules detected ({string.Join(", ", quietRules)}); verify scoring behaviour aligns with expectations."); + } + + if (messages.Count == 0) + { + messages.Add("Policy validated successfully; no additional action required."); + } + + return messages.ToImmutable(); + } +} diff --git a/src/StellaOps.Policy/PolicyDigest.cs b/src/StellaOps.Policy/PolicyDigest.cs new file mode 100644 index 00000000..997f2b67 --- /dev/null +++ b/src/StellaOps.Policy/PolicyDigest.cs @@ -0,0 +1,211 @@ +using System; +using System.Buffers; +using System.Collections.Immutable; +using System.Linq; +using System.Security.Cryptography; +using System.Text.Json; + +namespace StellaOps.Policy; + +public static class PolicyDigest +{ + public static string Compute(PolicyDocument document) + { + if (document is null) + { + throw new ArgumentNullException(nameof(document)); + } + + var buffer = new ArrayBufferWriter(); + using (var writer = new Utf8JsonWriter(buffer, new JsonWriterOptions + { + SkipValidation = true, + })) + { + WriteDocument(writer, document); + } + + var hash = SHA256.HashData(buffer.WrittenSpan); + return Convert.ToHexString(hash).ToLowerInvariant(); + } + + private static void WriteDocument(Utf8JsonWriter writer, PolicyDocument document) + { + writer.WriteStartObject(); + writer.WriteString("version", document.Version); + + if (!document.Metadata.IsEmpty) + { + writer.WritePropertyName("metadata"); + writer.WriteStartObject(); + foreach (var pair in document.Metadata.OrderBy(static kvp => kvp.Key, StringComparer.Ordinal)) + { + writer.WriteString(pair.Key, pair.Value); + } + writer.WriteEndObject(); + } + + writer.WritePropertyName("rules"); + writer.WriteStartArray(); + foreach (var rule in document.Rules) + { + WriteRule(writer, rule); + } + writer.WriteEndArray(); + + writer.WriteEndObject(); + writer.Flush(); + } + + private static void WriteRule(Utf8JsonWriter writer, PolicyRule rule) + { + writer.WriteStartObject(); + writer.WriteString("name", rule.Name); + + if (!string.IsNullOrWhiteSpace(rule.Identifier)) + { + writer.WriteString("id", rule.Identifier); + } + + if (!string.IsNullOrWhiteSpace(rule.Description)) + { + writer.WriteString("description", rule.Description); + } + + WriteMetadata(writer, rule.Metadata); + WriteSeverities(writer, rule.Severities); + WriteStringArray(writer, "environments", rule.Environments); + WriteStringArray(writer, "sources", rule.Sources); + WriteStringArray(writer, "vendors", rule.Vendors); + WriteStringArray(writer, "licenses", rule.Licenses); + WriteStringArray(writer, "tags", rule.Tags); + + if (!rule.Match.IsEmpty) + { + writer.WritePropertyName("match"); + writer.WriteStartObject(); + WriteStringArray(writer, "images", rule.Match.Images); + WriteStringArray(writer, "repositories", rule.Match.Repositories); + WriteStringArray(writer, "packages", rule.Match.Packages); + WriteStringArray(writer, "purls", rule.Match.Purls); + WriteStringArray(writer, "cves", rule.Match.Cves); + WriteStringArray(writer, "paths", rule.Match.Paths); + WriteStringArray(writer, "layerDigests", rule.Match.LayerDigests); + WriteStringArray(writer, "usedByEntrypoint", rule.Match.UsedByEntrypoint); + writer.WriteEndObject(); + } + + WriteAction(writer, rule.Action); + + if (rule.Expires is DateTimeOffset expires) + { + writer.WriteString("expires", expires.ToUniversalTime().ToString("O")); + } + + if (!string.IsNullOrWhiteSpace(rule.Justification)) + { + writer.WriteString("justification", rule.Justification); + } + + writer.WriteEndObject(); + } + + private static void WriteAction(Utf8JsonWriter writer, PolicyAction action) + { + writer.WritePropertyName("action"); + writer.WriteStartObject(); + writer.WriteString("type", action.Type.ToString().ToLowerInvariant()); + + if (action.Quiet) + { + writer.WriteBoolean("quiet", true); + } + + if (action.Ignore is { } ignore) + { + if (ignore.Until is DateTimeOffset until) + { + writer.WriteString("until", until.ToUniversalTime().ToString("O")); + } + + if (!string.IsNullOrWhiteSpace(ignore.Justification)) + { + writer.WriteString("justification", ignore.Justification); + } + } + + if (action.Escalate is { } escalate) + { + if (escalate.MinimumSeverity is { } severity) + { + writer.WriteString("severity", severity.ToString()); + } + + if (escalate.RequireKev) + { + writer.WriteBoolean("kev", true); + } + + if (escalate.MinimumEpss is double epss) + { + writer.WriteNumber("epss", epss); + } + } + + if (action.RequireVex is { } requireVex) + { + WriteStringArray(writer, "vendors", requireVex.Vendors); + WriteStringArray(writer, "justifications", requireVex.Justifications); + } + + writer.WriteEndObject(); + } + + private static void WriteMetadata(Utf8JsonWriter writer, ImmutableDictionary metadata) + { + if (metadata.IsEmpty) + { + return; + } + + writer.WritePropertyName("metadata"); + writer.WriteStartObject(); + foreach (var pair in metadata.OrderBy(static kvp => kvp.Key, StringComparer.Ordinal)) + { + writer.WriteString(pair.Key, pair.Value); + } + writer.WriteEndObject(); + } + + private static void WriteSeverities(Utf8JsonWriter writer, ImmutableArray severities) + { + if (severities.IsDefaultOrEmpty) + { + return; + } + + writer.WritePropertyName("severity"); + writer.WriteStartArray(); + foreach (var severity in severities) + { + writer.WriteStringValue(severity.ToString()); + } + writer.WriteEndArray(); + } + + private static void WriteStringArray(Utf8JsonWriter writer, string propertyName, ImmutableArray values) + { + if (values.IsDefaultOrEmpty) + { + return; + } + + writer.WritePropertyName(propertyName); + writer.WriteStartArray(); + foreach (var value in values) + { + writer.WriteStringValue(value); + } + writer.WriteEndArray(); + } +} diff --git a/src/StellaOps.Policy/PolicyDocument.cs b/src/StellaOps.Policy/PolicyDocument.cs new file mode 100644 index 00000000..e2e4f790 --- /dev/null +++ b/src/StellaOps.Policy/PolicyDocument.cs @@ -0,0 +1,192 @@ +using System; +using System.Collections.Immutable; + +namespace StellaOps.Policy; + +/// +/// Canonical representation of a StellaOps policy document. +/// +public sealed record PolicyDocument( + string Version, + ImmutableArray Rules, + ImmutableDictionary Metadata) +{ + public static PolicyDocument Empty { get; } = new( + PolicySchema.CurrentVersion, + ImmutableArray.Empty, + ImmutableDictionary.Empty); +} + +public static class PolicySchema +{ + public const string SchemaId = "https://schemas.stella-ops.org/policy/policy-schema@1.json"; + public const string CurrentVersion = "1.0"; + + public static PolicyDocumentFormat DetectFormat(string fileName) + { + if (fileName is null) + { + throw new ArgumentNullException(nameof(fileName)); + } + + var lower = fileName.Trim().ToLowerInvariant(); + if (lower.EndsWith(".yaml", StringComparison.Ordinal) || lower.EndsWith(".yml", StringComparison.Ordinal)) + { + return PolicyDocumentFormat.Yaml; + } + + return PolicyDocumentFormat.Json; + } +} + +public sealed record PolicyRule( + string Name, + string? Identifier, + string? Description, + PolicyAction Action, + ImmutableArray Severities, + ImmutableArray Environments, + ImmutableArray Sources, + ImmutableArray Vendors, + ImmutableArray Licenses, + ImmutableArray Tags, + PolicyRuleMatchCriteria Match, + DateTimeOffset? Expires, + string? Justification, + ImmutableDictionary Metadata) +{ + public static PolicyRuleMatchCriteria EmptyMatch { get; } = new( + ImmutableArray.Empty, + ImmutableArray.Empty, + ImmutableArray.Empty, + ImmutableArray.Empty, + ImmutableArray.Empty, + ImmutableArray.Empty, + ImmutableArray.Empty, + ImmutableArray.Empty); + + public static PolicyRule Create( + string name, + PolicyAction action, + ImmutableArray severities, + ImmutableArray environments, + ImmutableArray sources, + ImmutableArray vendors, + ImmutableArray licenses, + ImmutableArray tags, + PolicyRuleMatchCriteria match, + DateTimeOffset? expires, + string? justification, + string? identifier = null, + string? description = null, + ImmutableDictionary? metadata = null) + { + metadata ??= ImmutableDictionary.Empty; + return new PolicyRule( + name, + identifier, + description, + action, + severities, + environments, + sources, + vendors, + licenses, + tags, + match, + expires, + justification, + metadata); + } + + public bool MatchesAnyEnvironment => Environments.IsDefaultOrEmpty; +} + +public sealed record PolicyRuleMatchCriteria( + ImmutableArray Images, + ImmutableArray Repositories, + ImmutableArray Packages, + ImmutableArray Purls, + ImmutableArray Cves, + ImmutableArray Paths, + ImmutableArray LayerDigests, + ImmutableArray UsedByEntrypoint) +{ + public static PolicyRuleMatchCriteria Create( + ImmutableArray images, + ImmutableArray repositories, + ImmutableArray packages, + ImmutableArray purls, + ImmutableArray cves, + ImmutableArray paths, + ImmutableArray layerDigests, + ImmutableArray usedByEntrypoint) + => new( + images, + repositories, + packages, + purls, + cves, + paths, + layerDigests, + usedByEntrypoint); + + public static PolicyRuleMatchCriteria Empty { get; } = new( + ImmutableArray.Empty, + ImmutableArray.Empty, + ImmutableArray.Empty, + ImmutableArray.Empty, + ImmutableArray.Empty, + ImmutableArray.Empty, + ImmutableArray.Empty, + ImmutableArray.Empty); + + public bool IsEmpty => + Images.IsDefaultOrEmpty && + Repositories.IsDefaultOrEmpty && + Packages.IsDefaultOrEmpty && + Purls.IsDefaultOrEmpty && + Cves.IsDefaultOrEmpty && + Paths.IsDefaultOrEmpty && + LayerDigests.IsDefaultOrEmpty && + UsedByEntrypoint.IsDefaultOrEmpty; +} + +public sealed record PolicyAction( + PolicyActionType Type, + PolicyIgnoreOptions? Ignore, + PolicyEscalateOptions? Escalate, + PolicyRequireVexOptions? RequireVex, + bool Quiet); + +public enum PolicyActionType +{ + Block, + Ignore, + Warn, + Defer, + Escalate, + RequireVex, +} + +public sealed record PolicyIgnoreOptions(DateTimeOffset? Until, string? Justification); + +public sealed record PolicyEscalateOptions( + PolicySeverity? MinimumSeverity, + bool RequireKev, + double? MinimumEpss); + +public sealed record PolicyRequireVexOptions( + ImmutableArray Vendors, + ImmutableArray Justifications); + +public enum PolicySeverity +{ + Critical, + High, + Medium, + Low, + Informational, + None, + Unknown, +} diff --git a/src/StellaOps.Policy/PolicyEvaluation.cs b/src/StellaOps.Policy/PolicyEvaluation.cs new file mode 100644 index 00000000..42ff055e --- /dev/null +++ b/src/StellaOps.Policy/PolicyEvaluation.cs @@ -0,0 +1,270 @@ +using System; +using System.Collections.Immutable; + +namespace StellaOps.Policy; + +public static class PolicyEvaluation +{ + public static PolicyVerdict EvaluateFinding(PolicyDocument document, PolicyScoringConfig scoringConfig, PolicyFinding finding) + { + if (document is null) + { + throw new ArgumentNullException(nameof(document)); + } + + if (scoringConfig is null) + { + throw new ArgumentNullException(nameof(scoringConfig)); + } + + if (finding is null) + { + throw new ArgumentNullException(nameof(finding)); + } + + var severityWeight = scoringConfig.SeverityWeights.TryGetValue(finding.Severity, out var weight) + ? weight + : scoringConfig.SeverityWeights.GetValueOrDefault(PolicySeverity.Unknown, 0); + + foreach (var rule in document.Rules) + { + if (!RuleMatches(rule, finding)) + { + continue; + } + + return BuildVerdict(rule, finding, scoringConfig, severityWeight); + } + + return PolicyVerdict.CreateBaseline(finding.FindingId, scoringConfig); + } + + private static PolicyVerdict BuildVerdict( + PolicyRule rule, + PolicyFinding finding, + PolicyScoringConfig config, + double severityWeight) + { + var action = rule.Action; + var status = MapAction(action); + var notes = BuildNotes(action); + var inputs = ImmutableDictionary.CreateBuilder(StringComparer.OrdinalIgnoreCase); + inputs["severityWeight"] = severityWeight; + + double score = severityWeight; + string? quietedBy = null; + var quiet = false; + + switch (status) + { + case PolicyVerdictStatus.Ignored: + score = Math.Max(0, severityWeight - config.IgnorePenalty); + inputs["ignorePenalty"] = config.IgnorePenalty; + break; + case PolicyVerdictStatus.Warned: + score = Math.Max(0, severityWeight - config.WarnPenalty); + inputs["warnPenalty"] = config.WarnPenalty; + break; + case PolicyVerdictStatus.Deferred: + score = Math.Max(0, severityWeight - (config.WarnPenalty / 2)); + inputs["deferPenalty"] = config.WarnPenalty / 2; + break; + } + + if (action.Quiet) + { + var quietAllowed = action.RequireVex is not null || action.Type == PolicyActionType.RequireVex; + if (quietAllowed) + { + score = Math.Max(0, score - config.QuietPenalty); + inputs["quietPenalty"] = config.QuietPenalty; + quietedBy = rule.Name; + quiet = true; + } + else + { + inputs.Remove("ignorePenalty"); + var warnScore = Math.Max(0, severityWeight - config.WarnPenalty); + inputs["warnPenalty"] = config.WarnPenalty; + var warnNotes = AppendNote(notes, "Quiet flag ignored: rule must specify requireVex justifications."); + + return new PolicyVerdict( + finding.FindingId, + PolicyVerdictStatus.Warned, + rule.Name, + action.Type.ToString(), + warnNotes, + warnScore, + config.Version, + inputs.ToImmutable(), + QuietedBy: null, + Quiet: false); + } + } + + return new PolicyVerdict( + finding.FindingId, + status, + rule.Name, + action.Type.ToString(), + notes, + score, + config.Version, + inputs.ToImmutable(), + quietedBy, + quiet); + } + + private static bool RuleMatches(PolicyRule rule, PolicyFinding finding) + { + if (!rule.Severities.IsDefaultOrEmpty && !rule.Severities.Contains(finding.Severity)) + { + return false; + } + + if (!Matches(rule.Environments, finding.Environment)) + { + return false; + } + + if (!Matches(rule.Sources, finding.Source)) + { + return false; + } + + if (!Matches(rule.Vendors, finding.Vendor)) + { + return false; + } + + if (!Matches(rule.Licenses, finding.License)) + { + return false; + } + + if (!RuleMatchCriteria(rule.Match, finding)) + { + return false; + } + + return true; + } + + private static bool Matches(ImmutableArray ruleValues, string? candidate) + { + if (ruleValues.IsDefaultOrEmpty) + { + return true; + } + + if (string.IsNullOrWhiteSpace(candidate)) + { + return false; + } + + return ruleValues.Contains(candidate, StringComparer.OrdinalIgnoreCase); + } + + private static bool RuleMatchCriteria(PolicyRuleMatchCriteria criteria, PolicyFinding finding) + { + if (!criteria.Images.IsDefaultOrEmpty && !ContainsValue(criteria.Images, finding.Image, StringComparer.OrdinalIgnoreCase)) + { + return false; + } + + if (!criteria.Repositories.IsDefaultOrEmpty && !ContainsValue(criteria.Repositories, finding.Repository, StringComparer.OrdinalIgnoreCase)) + { + return false; + } + + if (!criteria.Packages.IsDefaultOrEmpty && !ContainsValue(criteria.Packages, finding.Package, StringComparer.OrdinalIgnoreCase)) + { + return false; + } + + if (!criteria.Purls.IsDefaultOrEmpty && !ContainsValue(criteria.Purls, finding.Purl, StringComparer.OrdinalIgnoreCase)) + { + return false; + } + + if (!criteria.Cves.IsDefaultOrEmpty && !ContainsValue(criteria.Cves, finding.Cve, StringComparer.OrdinalIgnoreCase)) + { + return false; + } + + if (!criteria.Paths.IsDefaultOrEmpty && !ContainsValue(criteria.Paths, finding.Path, StringComparer.Ordinal)) + { + return false; + } + + if (!criteria.LayerDigests.IsDefaultOrEmpty && !ContainsValue(criteria.LayerDigests, finding.LayerDigest, StringComparer.OrdinalIgnoreCase)) + { + return false; + } + + if (!criteria.UsedByEntrypoint.IsDefaultOrEmpty) + { + var match = false; + foreach (var tag in criteria.UsedByEntrypoint) + { + if (finding.Tags.Contains(tag, StringComparer.OrdinalIgnoreCase)) + { + match = true; + break; + } + } + + if (!match) + { + return false; + } + } + + return true; + } + + private static bool ContainsValue(ImmutableArray values, string? candidate, StringComparer comparer) + { + if (values.IsDefaultOrEmpty) + { + return true; + } + + if (string.IsNullOrWhiteSpace(candidate)) + { + return false; + } + + return values.Contains(candidate, comparer); + } + + private static PolicyVerdictStatus MapAction(PolicyAction action) + => action.Type switch + { + PolicyActionType.Block => PolicyVerdictStatus.Blocked, + PolicyActionType.Ignore => PolicyVerdictStatus.Ignored, + PolicyActionType.Warn => PolicyVerdictStatus.Warned, + PolicyActionType.Defer => PolicyVerdictStatus.Deferred, + PolicyActionType.Escalate => PolicyVerdictStatus.Escalated, + PolicyActionType.RequireVex => PolicyVerdictStatus.RequiresVex, + _ => PolicyVerdictStatus.Pass, + }; + + private static string? BuildNotes(PolicyAction action) + { + if (action.Ignore is { } ignore && !string.IsNullOrWhiteSpace(ignore.Justification)) + { + return ignore.Justification; + } + + if (action.Escalate is { } escalate && escalate.MinimumSeverity is { } severity) + { + return $"Escalate >= {severity}"; + } + + return null; + } + + private static string? AppendNote(string? existing, string addition) + => string.IsNullOrWhiteSpace(existing) ? addition : string.Concat(existing, " | ", addition); +} diff --git a/src/StellaOps.Policy/PolicyFinding.cs b/src/StellaOps.Policy/PolicyFinding.cs new file mode 100644 index 00000000..83114219 --- /dev/null +++ b/src/StellaOps.Policy/PolicyFinding.cs @@ -0,0 +1,51 @@ +using System.Collections.Immutable; + +namespace StellaOps.Policy; + +public sealed record PolicyFinding( + string FindingId, + PolicySeverity Severity, + string? Environment, + string? Source, + string? Vendor, + string? License, + string? Image, + string? Repository, + string? Package, + string? Purl, + string? Cve, + string? Path, + string? LayerDigest, + ImmutableArray Tags) +{ + public static PolicyFinding Create( + string findingId, + PolicySeverity severity, + string? environment = null, + string? source = null, + string? vendor = null, + string? license = null, + string? image = null, + string? repository = null, + string? package = null, + string? purl = null, + string? cve = null, + string? path = null, + string? layerDigest = null, + ImmutableArray? tags = null) + => new( + findingId, + severity, + environment, + source, + vendor, + license, + image, + repository, + package, + purl, + cve, + path, + layerDigest, + tags ?? ImmutableArray.Empty); +} diff --git a/src/StellaOps.Policy/PolicyIssue.cs b/src/StellaOps.Policy/PolicyIssue.cs new file mode 100644 index 00000000..2fd61d03 --- /dev/null +++ b/src/StellaOps.Policy/PolicyIssue.cs @@ -0,0 +1,28 @@ +using System; + +namespace StellaOps.Policy; + +/// +/// Represents a validation or normalization issue discovered while processing a policy document. +/// +public sealed record PolicyIssue(string Code, string Message, PolicyIssueSeverity Severity, string Path) +{ + public static PolicyIssue Error(string code, string message, string path) + => new(code, message, PolicyIssueSeverity.Error, path); + + public static PolicyIssue Warning(string code, string message, string path) + => new(code, message, PolicyIssueSeverity.Warning, path); + + public static PolicyIssue Info(string code, string message, string path) + => new(code, message, PolicyIssueSeverity.Info, path); + + public PolicyIssue EnsurePath(string fallbackPath) + => string.IsNullOrWhiteSpace(Path) ? this with { Path = fallbackPath } : this; +} + +public enum PolicyIssueSeverity +{ + Error, + Warning, + Info, +} diff --git a/src/StellaOps.Policy/PolicyPreviewModels.cs b/src/StellaOps.Policy/PolicyPreviewModels.cs new file mode 100644 index 00000000..2e84f591 --- /dev/null +++ b/src/StellaOps.Policy/PolicyPreviewModels.cs @@ -0,0 +1,18 @@ +using System.Collections.Immutable; + +namespace StellaOps.Policy; + +public sealed record PolicyPreviewRequest( + string ImageDigest, + ImmutableArray Findings, + ImmutableArray BaselineVerdicts, + PolicySnapshot? SnapshotOverride = null, + PolicySnapshotContent? ProposedPolicy = null); + +public sealed record PolicyPreviewResponse( + bool Success, + string PolicyDigest, + string? RevisionId, + ImmutableArray Issues, + ImmutableArray Diffs, + int ChangedCount); diff --git a/src/StellaOps.Policy/PolicyPreviewService.cs b/src/StellaOps.Policy/PolicyPreviewService.cs new file mode 100644 index 00000000..3fc1ed04 --- /dev/null +++ b/src/StellaOps.Policy/PolicyPreviewService.cs @@ -0,0 +1,142 @@ +using System; +using System.Collections.Generic; +using System; +using System.Collections.Immutable; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; + +namespace StellaOps.Policy; + +public sealed class PolicyPreviewService +{ + private readonly PolicySnapshotStore _snapshotStore; + private readonly ILogger _logger; + + public PolicyPreviewService(PolicySnapshotStore snapshotStore, ILogger logger) + { + _snapshotStore = snapshotStore ?? throw new ArgumentNullException(nameof(snapshotStore)); + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + } + + public async Task PreviewAsync(PolicyPreviewRequest request, CancellationToken cancellationToken = default) + { + if (request is null) + { + throw new ArgumentNullException(nameof(request)); + } + + var (snapshot, bindingIssues) = await ResolveSnapshotAsync(request, cancellationToken).ConfigureAwait(false); + if (snapshot is null) + { + _logger.LogWarning("Policy preview failed: snapshot unavailable or validation errors. Issues={Count}", bindingIssues.Length); + return new PolicyPreviewResponse(false, string.Empty, null, bindingIssues, ImmutableArray.Empty, 0); + } + + var projected = Evaluate(snapshot.Document, snapshot.ScoringConfig, request.Findings); + var baseline = BuildBaseline(request.BaselineVerdicts, projected, snapshot.ScoringConfig); + var diffs = BuildDiffs(baseline, projected); + var changed = diffs.Count(static diff => diff.Changed); + + _logger.LogDebug("Policy preview computed for {ImageDigest}. Changed={Changed}", request.ImageDigest, changed); + + return new PolicyPreviewResponse(true, snapshot.Digest, snapshot.RevisionId, bindingIssues, diffs, changed); + } + + private async Task<(PolicySnapshot? Snapshot, ImmutableArray Issues)> ResolveSnapshotAsync(PolicyPreviewRequest request, CancellationToken cancellationToken) + { + if (request.ProposedPolicy is not null) + { + var binding = PolicyBinder.Bind(request.ProposedPolicy.Content, request.ProposedPolicy.Format); + if (!binding.Success) + { + return (null, binding.Issues); + } + + var digest = PolicyDigest.Compute(binding.Document); + var snapshot = new PolicySnapshot( + request.SnapshotOverride?.RevisionNumber + 1 ?? 0, + request.SnapshotOverride?.RevisionId ?? "preview", + digest, + DateTimeOffset.UtcNow, + request.ProposedPolicy.Actor, + request.ProposedPolicy.Format, + binding.Document, + binding.Issues, + PolicyScoringConfig.Default); + + return (snapshot, binding.Issues); + } + + if (request.SnapshotOverride is not null) + { + return (request.SnapshotOverride, ImmutableArray.Empty); + } + + var latest = await _snapshotStore.GetLatestAsync(cancellationToken).ConfigureAwait(false); + if (latest is not null) + { + return (latest, ImmutableArray.Empty); + } + + return (null, ImmutableArray.Create(PolicyIssue.Error("policy.preview.snapshot_missing", "No policy snapshot is available for preview.", "$"))); + } + + private static ImmutableArray Evaluate(PolicyDocument document, PolicyScoringConfig scoringConfig, ImmutableArray findings) + { + if (findings.IsDefaultOrEmpty) + { + return ImmutableArray.Empty; + } + + var results = ImmutableArray.CreateBuilder(findings.Length); + foreach (var finding in findings) + { + var verdict = PolicyEvaluation.EvaluateFinding(document, scoringConfig, finding); + results.Add(verdict); + } + + return results.ToImmutable(); + } + + private static ImmutableDictionary BuildBaseline(ImmutableArray baseline, ImmutableArray projected, PolicyScoringConfig scoringConfig) + { + var builder = ImmutableDictionary.CreateBuilder(StringComparer.Ordinal); + if (!baseline.IsDefaultOrEmpty) + { + foreach (var verdict in baseline) + { + if (!string.IsNullOrEmpty(verdict.FindingId) && !builder.ContainsKey(verdict.FindingId)) + { + builder.Add(verdict.FindingId, verdict); + } + } + } + + foreach (var verdict in projected) + { + if (!builder.ContainsKey(verdict.FindingId)) + { + builder.Add(verdict.FindingId, PolicyVerdict.CreateBaseline(verdict.FindingId, scoringConfig)); + } + } + + return builder.ToImmutable(); + } + + private static ImmutableArray BuildDiffs(ImmutableDictionary baseline, ImmutableArray projected) + { + var diffs = ImmutableArray.CreateBuilder(projected.Length); + foreach (var verdict in projected.OrderBy(static v => v.FindingId, StringComparer.Ordinal)) + { + var baseVerdict = baseline.TryGetValue(verdict.FindingId, out var existing) + ? existing + : new PolicyVerdict(verdict.FindingId, PolicyVerdictStatus.Pass); + + diffs.Add(new PolicyVerdictDiff(baseVerdict, verdict)); + } + + return diffs.ToImmutable(); + } +} diff --git a/src/StellaOps.Policy/PolicySchemaResource.cs b/src/StellaOps.Policy/PolicySchemaResource.cs new file mode 100644 index 00000000..db086456 --- /dev/null +++ b/src/StellaOps.Policy/PolicySchemaResource.cs @@ -0,0 +1,30 @@ +using System; +using System.IO; +using System.Reflection; +using System.Text; + +namespace StellaOps.Policy; + +public static class PolicySchemaResource +{ + private const string SchemaResourceName = "StellaOps.Policy.Schemas.policy-schema@1.json"; + + public static Stream OpenSchemaStream() + { + var assembly = Assembly.GetExecutingAssembly(); + var stream = assembly.GetManifestResourceStream(SchemaResourceName); + if (stream is null) + { + throw new InvalidOperationException($"Unable to locate embedded schema resource '{SchemaResourceName}'."); + } + + return stream; + } + + public static string ReadSchemaJson() + { + using var stream = OpenSchemaStream(); + using var reader = new StreamReader(stream, Encoding.UTF8, detectEncodingFromByteOrderMarks: true); + return reader.ReadToEnd(); + } +} diff --git a/src/StellaOps.Policy/PolicyScoringConfig.cs b/src/StellaOps.Policy/PolicyScoringConfig.cs new file mode 100644 index 00000000..c3ca3795 --- /dev/null +++ b/src/StellaOps.Policy/PolicyScoringConfig.cs @@ -0,0 +1,16 @@ +using System.Collections.Immutable; + +namespace StellaOps.Policy; + +public sealed record PolicyScoringConfig( + string Version, + ImmutableDictionary SeverityWeights, + double QuietPenalty, + double WarnPenalty, + double IgnorePenalty, + ImmutableDictionary TrustOverrides) +{ + public static string BaselineVersion => "1.0"; + + public static PolicyScoringConfig Default { get; } = PolicyScoringConfigBinder.LoadDefault(); +} diff --git a/src/StellaOps.Policy/PolicyScoringConfigBinder.cs b/src/StellaOps.Policy/PolicyScoringConfigBinder.cs new file mode 100644 index 00000000..3ce37b69 --- /dev/null +++ b/src/StellaOps.Policy/PolicyScoringConfigBinder.cs @@ -0,0 +1,266 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Globalization; +using System.IO; +using System.Linq; +using System.Reflection; +using System.Text; +using System.Text.Json; +using System.Text.Json.Nodes; +using YamlDotNet.Serialization; +using YamlDotNet.Serialization.NamingConventions; + +namespace StellaOps.Policy; + +public sealed record PolicyScoringBindingResult( + bool Success, + PolicyScoringConfig? Config, + ImmutableArray Issues); + +public static class PolicyScoringConfigBinder +{ + private const string DefaultResourceName = "StellaOps.Policy.Schemas.policy-scoring-default.json"; + + private static readonly JsonSerializerOptions SerializerOptions = new() + { + PropertyNameCaseInsensitive = true, + ReadCommentHandling = JsonCommentHandling.Skip, + AllowTrailingCommas = true, + }; + + private static readonly IDeserializer YamlDeserializer = new DeserializerBuilder() + .WithNamingConvention(CamelCaseNamingConvention.Instance) + .IgnoreUnmatchedProperties() + .Build(); + + public static PolicyScoringConfig LoadDefault() + { + var assembly = Assembly.GetExecutingAssembly(); + using var stream = assembly.GetManifestResourceStream(DefaultResourceName) + ?? throw new InvalidOperationException($"Embedded resource '{DefaultResourceName}' not found."); + using var reader = new StreamReader(stream, Encoding.UTF8, detectEncodingFromByteOrderMarks: true); + var json = reader.ReadToEnd(); + var binding = Bind(json, PolicyDocumentFormat.Json); + if (!binding.Success || binding.Config is null) + { + throw new InvalidOperationException("Failed to load default policy scoring configuration."); + } + + return binding.Config; + } + + public static PolicyScoringBindingResult Bind(string content, PolicyDocumentFormat format) + { + if (string.IsNullOrWhiteSpace(content)) + { + var issue = PolicyIssue.Error("scoring.empty", "Scoring configuration content is empty.", "$"); + return new PolicyScoringBindingResult(false, null, ImmutableArray.Create(issue)); + } + + try + { + var root = Parse(content, format); + if (root is not JsonObject obj) + { + var issue = PolicyIssue.Error("scoring.invalid", "Scoring configuration must be a JSON object.", "$"); + return new PolicyScoringBindingResult(false, null, ImmutableArray.Create(issue)); + } + + var issues = ImmutableArray.CreateBuilder(); + var config = BuildConfig(obj, issues); + var hasErrors = issues.Any(issue => issue.Severity == PolicyIssueSeverity.Error); + return new PolicyScoringBindingResult(!hasErrors, config, issues.ToImmutable()); + } + catch (JsonException ex) + { + var issue = PolicyIssue.Error("scoring.parse.json", $"Failed to parse scoring JSON: {ex.Message}", "$"); + return new PolicyScoringBindingResult(false, null, ImmutableArray.Create(issue)); + } + catch (YamlDotNet.Core.YamlException ex) + { + var issue = PolicyIssue.Error("scoring.parse.yaml", $"Failed to parse scoring YAML: {ex.Message}", "$"); + return new PolicyScoringBindingResult(false, null, ImmutableArray.Create(issue)); + } + } + + private static JsonNode? Parse(string content, PolicyDocumentFormat format) + { + return format switch + { + PolicyDocumentFormat.Json => JsonNode.Parse(content, new JsonNodeOptions { PropertyNameCaseInsensitive = true }), + PolicyDocumentFormat.Yaml => ConvertYamlToJsonNode(content), + _ => throw new ArgumentOutOfRangeException(nameof(format), format, "Unsupported scoring configuration format."), + }; + } + + private static JsonNode? ConvertYamlToJsonNode(string content) + { + var yamlObject = YamlDeserializer.Deserialize(content); + return PolicyBinderUtilities.ConvertYamlObject(yamlObject); + } + + private static PolicyScoringConfig BuildConfig(JsonObject obj, ImmutableArray.Builder issues) + { + var version = ReadString(obj, "version", issues, required: true) ?? PolicyScoringConfig.BaselineVersion; + + var severityWeights = ReadSeverityWeights(obj, issues); + var quietPenalty = ReadDouble(obj, "quietPenalty", issues, defaultValue: 45); + var warnPenalty = ReadDouble(obj, "warnPenalty", issues, defaultValue: 15); + var ignorePenalty = ReadDouble(obj, "ignorePenalty", issues, defaultValue: 35); + var trustOverrides = ReadTrustOverrides(obj, issues); + + return new PolicyScoringConfig( + version, + severityWeights, + quietPenalty, + warnPenalty, + ignorePenalty, + trustOverrides); + } + + private static ImmutableDictionary ReadSeverityWeights(JsonObject obj, ImmutableArray.Builder issues) + { + if (!obj.TryGetPropertyValue("severityWeights", out var node) || node is not JsonObject severityObj) + { + issues.Add(PolicyIssue.Error("scoring.severityWeights.missing", "severityWeights section is required.", "$.severityWeights")); + return ImmutableDictionary.Empty; + } + + var builder = ImmutableDictionary.CreateBuilder(); + foreach (var severity in Enum.GetValues()) + { + var key = severity.ToString(); + if (!severityObj.TryGetPropertyValue(key, out var valueNode)) + { + issues.Add(PolicyIssue.Warning("scoring.severityWeights.default", $"Severity '{key}' not specified; defaulting to 0.", $"$.severityWeights.{key}")); + builder[severity] = 0; + continue; + } + + var value = ExtractDouble(valueNode, issues, $"$.severityWeights.{key}"); + builder[severity] = value; + } + + return builder.ToImmutable(); + } + + private static double ReadDouble(JsonObject obj, string property, ImmutableArray.Builder issues, double defaultValue) + { + if (!obj.TryGetPropertyValue(property, out var node)) + { + issues.Add(PolicyIssue.Warning("scoring.numeric.default", $"{property} not specified; defaulting to {defaultValue:0.##}.", $"$.{property}")); + return defaultValue; + } + + return ExtractDouble(node, issues, $"$.{property}"); + } + + private static double ExtractDouble(JsonNode? node, ImmutableArray.Builder issues, string path) + { + if (node is null) + { + issues.Add(PolicyIssue.Warning("scoring.numeric.null", $"Value at {path} missing; defaulting to 0.", path)); + return 0; + } + + if (node is JsonValue value) + { + if (value.TryGetValue(out double number)) + { + return number; + } + + if (value.TryGetValue(out string? text) && double.TryParse(text, NumberStyles.Float, CultureInfo.InvariantCulture, out number)) + { + return number; + } + } + + issues.Add(PolicyIssue.Error("scoring.numeric.invalid", $"Value at {path} is not numeric.", path)); + return 0; + } + + private static ImmutableDictionary ReadTrustOverrides(JsonObject obj, ImmutableArray.Builder issues) + { + if (!obj.TryGetPropertyValue("trustOverrides", out var node) || node is not JsonObject trustObj) + { + return ImmutableDictionary.Empty; + } + + var builder = ImmutableDictionary.CreateBuilder(StringComparer.OrdinalIgnoreCase); + foreach (var pair in trustObj) + { + var value = ExtractDouble(pair.Value, issues, $"$.trustOverrides.{pair.Key}"); + builder[pair.Key] = value; + } + + return builder.ToImmutable(); + } + + private static string? ReadString(JsonObject obj, string property, ImmutableArray.Builder issues, bool required) + { + if (!obj.TryGetPropertyValue(property, out var node) || node is null) + { + if (required) + { + issues.Add(PolicyIssue.Error("scoring.string.missing", $"{property} is required.", $"$.{property}")); + } + return null; + } + + if (node is JsonValue value && value.TryGetValue(out string? text)) + { + return text?.Trim(); + } + + issues.Add(PolicyIssue.Error("scoring.string.invalid", $"{property} must be a string.", $"$.{property}")); + return null; + } +} + +internal static class PolicyBinderUtilities +{ + public static JsonNode? ConvertYamlObject(object? value) + { + switch (value) + { + case null: + return null; + case string s: + return JsonValue.Create(s); + case bool b: + return JsonValue.Create(b); + case sbyte or byte or short or ushort or int or uint or long or ulong or float or double or decimal: + return JsonValue.Create(Convert.ToDouble(value, CultureInfo.InvariantCulture)); + case IDictionary dictionary: + { + var obj = new JsonObject(); + foreach (DictionaryEntry entry in dictionary) + { + if (entry.Key is null) + { + continue; + } + + obj[entry.Key.ToString()!] = ConvertYamlObject(entry.Value); + } + + return obj; + } + case IEnumerable enumerable: + { + var array = new JsonArray(); + foreach (var item in enumerable) + { + array.Add(ConvertYamlObject(item)); + } + + return array; + } + default: + return JsonValue.Create(value.ToString()); + } + } +} diff --git a/src/StellaOps.Policy/PolicySnapshot.cs b/src/StellaOps.Policy/PolicySnapshot.cs new file mode 100644 index 00000000..6e22b26e --- /dev/null +++ b/src/StellaOps.Policy/PolicySnapshot.cs @@ -0,0 +1,29 @@ +using System; +using System.Collections.Immutable; + +namespace StellaOps.Policy; + +public sealed record PolicySnapshot( + long RevisionNumber, + string RevisionId, + string Digest, + DateTimeOffset CreatedAt, + string? CreatedBy, + PolicyDocumentFormat Format, + PolicyDocument Document, + ImmutableArray Issues, + PolicyScoringConfig ScoringConfig); + +public sealed record PolicySnapshotContent( + string Content, + PolicyDocumentFormat Format, + string? Actor, + string? Source, + string? Description); + +public sealed record PolicySnapshotSaveResult( + bool Success, + bool Created, + string Digest, + PolicySnapshot? Snapshot, + PolicyBindingResult BindingResult); diff --git a/src/StellaOps.Policy/PolicySnapshotStore.cs b/src/StellaOps.Policy/PolicySnapshotStore.cs new file mode 100644 index 00000000..7ff8008e --- /dev/null +++ b/src/StellaOps.Policy/PolicySnapshotStore.cs @@ -0,0 +1,101 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; + +namespace StellaOps.Policy; + +public sealed class PolicySnapshotStore +{ + private readonly IPolicySnapshotRepository _snapshotRepository; + private readonly IPolicyAuditRepository _auditRepository; + private readonly TimeProvider _timeProvider; + private readonly ILogger _logger; + private readonly SemaphoreSlim _mutex = new(1, 1); + + public PolicySnapshotStore( + IPolicySnapshotRepository snapshotRepository, + IPolicyAuditRepository auditRepository, + TimeProvider? timeProvider, + ILogger logger) + { + _snapshotRepository = snapshotRepository ?? throw new ArgumentNullException(nameof(snapshotRepository)); + _auditRepository = auditRepository ?? throw new ArgumentNullException(nameof(auditRepository)); + _timeProvider = timeProvider ?? TimeProvider.System; + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + } + + public async Task SaveAsync(PolicySnapshotContent content, CancellationToken cancellationToken = default) + { + if (content is null) + { + throw new ArgumentNullException(nameof(content)); + } + + var bindingResult = PolicyBinder.Bind(content.Content, content.Format); + if (!bindingResult.Success) + { + _logger.LogWarning("Policy snapshot rejected due to validation errors (Format: {Format})", content.Format); + return new PolicySnapshotSaveResult(false, false, string.Empty, null, bindingResult); + } + + var digest = PolicyDigest.Compute(bindingResult.Document); + + await _mutex.WaitAsync(cancellationToken).ConfigureAwait(false); + try + { + var latest = await _snapshotRepository.GetLatestAsync(cancellationToken).ConfigureAwait(false); + if (latest is not null && string.Equals(latest.Digest, digest, StringComparison.Ordinal)) + { + _logger.LogInformation("Policy snapshot unchanged; digest {Digest} matches revision {RevisionId}", digest, latest.RevisionId); + return new PolicySnapshotSaveResult(true, false, digest, latest, bindingResult); + } + + var revisionNumber = (latest?.RevisionNumber ?? 0) + 1; + var revisionId = $"rev-{revisionNumber}"; + var createdAt = _timeProvider.GetUtcNow(); + + var scoringConfig = PolicyScoringConfig.Default; + + var snapshot = new PolicySnapshot( + revisionNumber, + revisionId, + digest, + createdAt, + content.Actor, + content.Format, + bindingResult.Document, + bindingResult.Issues, + scoringConfig); + + await _snapshotRepository.AddAsync(snapshot, cancellationToken).ConfigureAwait(false); + + var auditMessage = content.Description ?? "Policy snapshot created"; + var auditEntry = new PolicyAuditEntry( + Guid.NewGuid(), + createdAt, + "snapshot.created", + revisionId, + digest, + content.Actor, + auditMessage); + + await _auditRepository.AddAsync(auditEntry, cancellationToken).ConfigureAwait(false); + + _logger.LogInformation( + "Policy snapshot saved. Revision {RevisionId}, digest {Digest}, issues {IssueCount}", + revisionId, + digest, + bindingResult.Issues.Length); + + return new PolicySnapshotSaveResult(true, true, digest, snapshot, bindingResult); + } + finally + { + _mutex.Release(); + } + } + + public Task GetLatestAsync(CancellationToken cancellationToken = default) + => _snapshotRepository.GetLatestAsync(cancellationToken); +} diff --git a/src/StellaOps.Policy/PolicyValidationCli.cs b/src/StellaOps.Policy/PolicyValidationCli.cs new file mode 100644 index 00000000..78eb1b03 --- /dev/null +++ b/src/StellaOps.Policy/PolicyValidationCli.cs @@ -0,0 +1,241 @@ +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.IO; +using System.Linq; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Policy; + +public sealed record PolicyValidationCliOptions +{ + public IReadOnlyList Inputs { get; init; } = Array.Empty(); + + /// + /// Writes machine-readable JSON instead of human-formatted text. + /// + public bool OutputJson { get; init; } + + /// + /// When enabled, warnings cause a non-zero exit code. + /// + public bool Strict { get; init; } +} + +public sealed record PolicyValidationFileResult( + string Path, + PolicyBindingResult BindingResult, + PolicyDiagnosticsReport Diagnostics); + +public sealed class PolicyValidationCli +{ + private readonly TextWriter _output; + private readonly TextWriter _error; + + public PolicyValidationCli(TextWriter? output = null, TextWriter? error = null) + { + _output = output ?? Console.Out; + _error = error ?? Console.Error; + } + + public async Task RunAsync(PolicyValidationCliOptions options, CancellationToken cancellationToken = default) + { + if (options is null) + { + throw new ArgumentNullException(nameof(options)); + } + + if (options.Inputs.Count == 0) + { + await _error.WriteLineAsync("No input files provided. Supply one or more policy file paths."); + return 64; // EX_USAGE + } + + var results = new List(); + foreach (var input in options.Inputs) + { + cancellationToken.ThrowIfCancellationRequested(); + + var resolvedPaths = ResolveInput(input); + if (resolvedPaths.Count == 0) + { + await _error.WriteLineAsync($"No files matched '{input}'."); + continue; + } + + foreach (var path in resolvedPaths) + { + cancellationToken.ThrowIfCancellationRequested(); + + var format = PolicySchema.DetectFormat(path); + var content = await File.ReadAllTextAsync(path, cancellationToken); + var bindingResult = PolicyBinder.Bind(content, format); + var diagnostics = PolicyDiagnostics.Create(bindingResult); + + results.Add(new PolicyValidationFileResult(path, bindingResult, diagnostics)); + } + } + + if (results.Count == 0) + { + await _error.WriteLineAsync("No files were processed."); + return 65; // EX_DATAERR + } + + if (options.OutputJson) + { + WriteJson(results); + } + else + { + await WriteTextAsync(results, cancellationToken); + } + + var hasErrors = results.Any(static result => !result.BindingResult.Success); + var hasWarnings = results.Any(static result => result.BindingResult.Issues.Any(static issue => issue.Severity == PolicyIssueSeverity.Warning)); + + if (hasErrors) + { + return 1; + } + + if (options.Strict && hasWarnings) + { + return 2; + } + + return 0; + } + + private async Task WriteTextAsync(IReadOnlyList results, CancellationToken cancellationToken) + { + foreach (var result in results) + { + cancellationToken.ThrowIfCancellationRequested(); + + var relativePath = MakeRelative(result.Path); + await _output.WriteLineAsync($"{relativePath} [{result.BindingResult.Format}]"); + + if (result.BindingResult.Issues.Length == 0) + { + await _output.WriteLineAsync(" OK"); + continue; + } + + foreach (var issue in result.BindingResult.Issues) + { + var severity = issue.Severity.ToString().ToUpperInvariant().PadRight(7); + await _output.WriteLineAsync($" {severity} {issue.Path} :: {issue.Message} ({issue.Code})"); + } + } + } + + private void WriteJson(IReadOnlyList results) + { + var payload = results.Select(static result => new + { + path = result.Path, + format = result.BindingResult.Format.ToString().ToLowerInvariant(), + success = result.BindingResult.Success, + issues = result.BindingResult.Issues.Select(static issue => new + { + code = issue.Code, + message = issue.Message, + severity = issue.Severity.ToString().ToLowerInvariant(), + path = issue.Path, + }), + diagnostics = new + { + version = result.Diagnostics.Version, + ruleCount = result.Diagnostics.RuleCount, + errorCount = result.Diagnostics.ErrorCount, + warningCount = result.Diagnostics.WarningCount, + generatedAt = result.Diagnostics.GeneratedAt, + recommendations = result.Diagnostics.Recommendations, + }, + }) + .ToArray(); + + var json = JsonSerializer.Serialize(payload, new JsonSerializerOptions + { + WriteIndented = true, + }); + _output.WriteLine(json); + } + + private static IReadOnlyList ResolveInput(string input) + { + if (string.IsNullOrWhiteSpace(input)) + { + return Array.Empty(); + } + + var expanded = Environment.ExpandEnvironmentVariables(input.Trim()); + if (File.Exists(expanded)) + { + return new[] { Path.GetFullPath(expanded) }; + } + + if (Directory.Exists(expanded)) + { + return Directory.EnumerateFiles(expanded, "*.*", SearchOption.TopDirectoryOnly) + .Where(static path => MatchesPolicyExtension(path)) + .OrderBy(static path => path, StringComparer.OrdinalIgnoreCase) + .Select(Path.GetFullPath) + .ToArray(); + } + + var directory = Path.GetDirectoryName(expanded); + var searchPattern = Path.GetFileName(expanded); + + if (string.IsNullOrEmpty(searchPattern)) + { + return Array.Empty(); + } + + if (string.IsNullOrEmpty(directory)) + { + directory = "."; + } + + if (!Directory.Exists(directory)) + { + return Array.Empty(); + } + + return Directory.EnumerateFiles(directory, searchPattern, SearchOption.TopDirectoryOnly) + .Where(static path => MatchesPolicyExtension(path)) + .OrderBy(static path => path, StringComparer.OrdinalIgnoreCase) + .Select(Path.GetFullPath) + .ToArray(); + } + + private static bool MatchesPolicyExtension(string path) + { + var extension = Path.GetExtension(path); + return extension.Equals(".yaml", StringComparison.OrdinalIgnoreCase) + || extension.Equals(".yml", StringComparison.OrdinalIgnoreCase) + || extension.Equals(".json", StringComparison.OrdinalIgnoreCase); + } + + private static string MakeRelative(string path) + { + try + { + var fullPath = Path.GetFullPath(path); + var current = Directory.GetCurrentDirectory(); + if (fullPath.StartsWith(current, StringComparison.OrdinalIgnoreCase)) + { + return fullPath[current.Length..].TrimStart(Path.DirectorySeparatorChar, Path.AltDirectorySeparatorChar); + } + + return fullPath; + } + catch + { + return path; + } + } +} diff --git a/src/StellaOps.Policy/PolicyVerdict.cs b/src/StellaOps.Policy/PolicyVerdict.cs new file mode 100644 index 00000000..9eb758ef --- /dev/null +++ b/src/StellaOps.Policy/PolicyVerdict.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Immutable; + +namespace StellaOps.Policy; + +public enum PolicyVerdictStatus +{ + Pass, + Blocked, + Ignored, + Warned, + Deferred, + Escalated, + RequiresVex, +} + +public sealed record PolicyVerdict( + string FindingId, + PolicyVerdictStatus Status, + string? RuleName = null, + string? RuleAction = null, + string? Notes = null, + double Score = 0, + string ConfigVersion = "1.0", + ImmutableDictionary? Inputs = null, + string? QuietedBy = null, + bool Quiet = false) +{ + public static PolicyVerdict CreateBaseline(string findingId, PolicyScoringConfig scoringConfig) + { + var inputs = ImmutableDictionary.Empty; + return new PolicyVerdict( + findingId, + PolicyVerdictStatus.Pass, + RuleName: null, + RuleAction: null, + Notes: null, + Score: 0, + ConfigVersion: scoringConfig.Version, + Inputs: inputs, + QuietedBy: null, + Quiet: false); + } + + public ImmutableDictionary GetInputs() + => Inputs ?? ImmutableDictionary.Empty; +} + +public sealed record PolicyVerdictDiff( + PolicyVerdict Baseline, + PolicyVerdict Projected) +{ + public bool Changed + { + get + { + if (Baseline.Status != Projected.Status) + { + return true; + } + + if (!string.Equals(Baseline.RuleName, Projected.RuleName, StringComparison.Ordinal)) + { + return true; + } + + if (Math.Abs(Baseline.Score - Projected.Score) > 0.0001) + { + return true; + } + + if (!string.Equals(Baseline.QuietedBy, Projected.QuietedBy, StringComparison.Ordinal)) + { + return true; + } + + return false; + } + } +} diff --git a/src/StellaOps.Policy/Schemas/policy-schema@1.json b/src/StellaOps.Policy/Schemas/policy-schema@1.json new file mode 100644 index 00000000..c11f2696 --- /dev/null +++ b/src/StellaOps.Policy/Schemas/policy-schema@1.json @@ -0,0 +1,176 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://schemas.stella-ops.org/policy/policy-schema@1.json", + "title": "StellaOps Policy Schema v1", + "type": "object", + "required": ["version", "rules"], + "properties": { + "version": { + "type": ["string", "number"], + "enum": ["1", "1.0", 1, 1.0] + }, + "description": { + "type": "string" + }, + "metadata": { + "type": "object", + "additionalProperties": { + "type": ["string", "number", "boolean"] + } + }, + "rules": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/$defs/rule" + } + } + }, + "additionalProperties": true, + "$defs": { + "identifier": { + "type": "string", + "minLength": 1 + }, + "severity": { + "type": "string", + "enum": ["Critical", "High", "Medium", "Low", "Informational", "None", "Unknown"] + }, + "stringArray": { + "type": "array", + "items": { + "type": "string", + "minLength": 1 + }, + "uniqueItems": true + }, + "rule": { + "type": "object", + "required": ["name", "action"], + "properties": { + "id": { + "$ref": "#/$defs/identifier" + }, + "name": { + "type": "string", + "minLength": 1 + }, + "description": { + "type": "string" + }, + "severity": { + "type": "array", + "items": { + "$ref": "#/$defs/severity" + }, + "uniqueItems": true + }, + "sources": { + "$ref": "#/$defs/stringArray" + }, + "vendors": { + "$ref": "#/$defs/stringArray" + }, + "licenses": { + "$ref": "#/$defs/stringArray" + }, + "tags": { + "$ref": "#/$defs/stringArray" + }, + "environments": { + "$ref": "#/$defs/stringArray" + }, + "images": { + "$ref": "#/$defs/stringArray" + }, + "repositories": { + "$ref": "#/$defs/stringArray" + }, + "packages": { + "$ref": "#/$defs/stringArray" + }, + "purls": { + "$ref": "#/$defs/stringArray" + }, + "cves": { + "$ref": "#/$defs/stringArray" + }, + "paths": { + "$ref": "#/$defs/stringArray" + }, + "layerDigests": { + "$ref": "#/$defs/stringArray" + }, + "usedByEntrypoint": { + "$ref": "#/$defs/stringArray" + }, + "justification": { + "type": "string" + }, + "quiet": { + "type": "boolean" + }, + "action": { + "oneOf": [ + { + "type": "string", + "enum": ["block", "fail", "deny", "ignore", "warn", "defer", "escalate", "requireVex"] + }, + { + "type": "object", + "required": ["type"], + "properties": { + "type": { + "type": "string" + }, + "quiet": { + "type": "boolean" + }, + "until": { + "type": "string", + "format": "date-time" + }, + "justification": { + "type": "string" + }, + "severity": { + "$ref": "#/$defs/severity" + }, + "vendors": { + "$ref": "#/$defs/stringArray" + }, + "justifications": { + "$ref": "#/$defs/stringArray" + }, + "epss": { + "type": "number", + "minimum": 0, + "maximum": 1 + }, + "kev": { + "type": "boolean" + } + }, + "additionalProperties": true + } + ] + }, + "expires": { + "type": "string", + "format": "date-time" + }, + "until": { + "type": "string", + "format": "date-time" + }, + "metadata": { + "type": "object", + "additionalProperties": { + "type": ["string", "number", "boolean"] + } + } + }, + "additionalProperties": true + } + } +} diff --git a/src/StellaOps.Policy/Schemas/policy-scoring-default.json b/src/StellaOps.Policy/Schemas/policy-scoring-default.json new file mode 100644 index 00000000..eaa41270 --- /dev/null +++ b/src/StellaOps.Policy/Schemas/policy-scoring-default.json @@ -0,0 +1,21 @@ +{ + "version": "1.0", + "severityWeights": { + "Critical": 90.0, + "High": 75.0, + "Medium": 50.0, + "Low": 25.0, + "Informational": 10.0, + "None": 0.0, + "Unknown": 60.0 + }, + "quietPenalty": 45.0, + "warnPenalty": 15.0, + "ignorePenalty": 35.0, + "trustOverrides": { + "vendor": 1.0, + "distro": 0.85, + "platform": 0.75, + "community": 0.65 + } +} diff --git a/src/StellaOps.Policy/StellaOps.Policy.csproj b/src/StellaOps.Policy/StellaOps.Policy.csproj index 6c3a8871..c04fee44 100644 --- a/src/StellaOps.Policy/StellaOps.Policy.csproj +++ b/src/StellaOps.Policy/StellaOps.Policy.csproj @@ -3,5 +3,18 @@ net10.0 enable enable + preview + true + + + + + + + + + + + diff --git a/src/StellaOps.Policy/Storage/IPolicySnapshotRepository.cs b/src/StellaOps.Policy/Storage/IPolicySnapshotRepository.cs new file mode 100644 index 00000000..111ece46 --- /dev/null +++ b/src/StellaOps.Policy/Storage/IPolicySnapshotRepository.cs @@ -0,0 +1,14 @@ +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Policy; + +public interface IPolicySnapshotRepository +{ + Task GetLatestAsync(CancellationToken cancellationToken = default); + + Task> ListAsync(int limit, CancellationToken cancellationToken = default); + + Task AddAsync(PolicySnapshot snapshot, CancellationToken cancellationToken = default); +} diff --git a/src/StellaOps.Policy/Storage/InMemoryPolicySnapshotRepository.cs b/src/StellaOps.Policy/Storage/InMemoryPolicySnapshotRepository.cs new file mode 100644 index 00000000..cb78d33b --- /dev/null +++ b/src/StellaOps.Policy/Storage/InMemoryPolicySnapshotRepository.cs @@ -0,0 +1,65 @@ +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Policy; + +public sealed class InMemoryPolicySnapshotRepository : IPolicySnapshotRepository +{ + private readonly List _snapshots = new(); + private readonly SemaphoreSlim _mutex = new(1, 1); + + public async Task AddAsync(PolicySnapshot snapshot, CancellationToken cancellationToken = default) + { + if (snapshot is null) + { + throw new ArgumentNullException(nameof(snapshot)); + } + + await _mutex.WaitAsync(cancellationToken).ConfigureAwait(false); + try + { + _snapshots.Add(snapshot); + _snapshots.Sort(static (left, right) => left.RevisionNumber.CompareTo(right.RevisionNumber)); + } + finally + { + _mutex.Release(); + } + } + + public async Task GetLatestAsync(CancellationToken cancellationToken = default) + { + await _mutex.WaitAsync(cancellationToken).ConfigureAwait(false); + try + { + return _snapshots.Count == 0 ? null : _snapshots[^1]; + } + finally + { + _mutex.Release(); + } + } + + public async Task> ListAsync(int limit, CancellationToken cancellationToken = default) + { + await _mutex.WaitAsync(cancellationToken).ConfigureAwait(false); + try + { + IEnumerable query = _snapshots; + if (limit > 0) + { + query = query.TakeLast(limit); + } + + return query.ToImmutableArray(); + } + finally + { + _mutex.Release(); + } + } +} diff --git a/src/StellaOps.Policy/TASKS.md b/src/StellaOps.Policy/TASKS.md index 22f396a8..968388f9 100644 --- a/src/StellaOps.Policy/TASKS.md +++ b/src/StellaOps.Policy/TASKS.md @@ -2,12 +2,17 @@ | ID | Status | Owner(s) | Depends on | Description | Exit Criteria | |----|--------|----------|------------|-------------|---------------| -| POLICY-CORE-09-001 | TODO | Policy Guild | SCANNER-WEB-09-101 | Define YAML schema/binder, diagnostics, CLI validation for policy files. | Schema doc published; binder loads sample policy; validation errors actionable. | -| POLICY-CORE-09-002 | TODO | Policy Guild | POLICY-CORE-09-001 | Implement policy snapshot store + revision digests + audit logging. | Snapshots persisted with digest; tests compare revisions; audit entries created. | -| POLICY-CORE-09-003 | TODO | Policy Guild | POLICY-CORE-09-002 | `/policy/preview` API (image digest → projected verdict delta). | Preview returns diff JSON; integration tests with mocked report; docs updated. | +| POLICY-CORE-09-001 | DONE | Policy Guild | SCANNER-WEB-09-101 | Define YAML schema/binder, diagnostics, CLI validation for policy files. | Schema doc published; binder loads sample policy; validation errors actionable. | +| POLICY-CORE-09-002 | DONE | Policy Guild | POLICY-CORE-09-001 | Implement policy snapshot store + revision digests + audit logging. | Snapshots persisted with digest; tests compare revisions; audit entries created. | +| POLICY-CORE-09-003 | DONE | Policy Guild | POLICY-CORE-09-002 | `/policy/preview` API (image digest → projected verdict delta). | Preview returns diff JSON; integration tests with mocked report; docs updated. | | POLICY-CORE-09-004 | TODO | Policy Guild | POLICY-CORE-09-001 | Versioned scoring config with schema validation, trust table, and golden fixtures. | Scoring config documented; fixtures stored; validation CLI passes. | | POLICY-CORE-09-005 | TODO | Policy Guild | POLICY-CORE-09-004 | Scoring/quiet engine – compute score, enforce VEX-only quiet rules, emit inputs and provenance. | Engine unit tests cover severity weighting; outputs include provenance data. | | POLICY-CORE-09-006 | TODO | Policy Guild | POLICY-CORE-09-005 | Unknown state & confidence decay – deterministic bands surfaced in policy outputs. | Confidence decay tests pass; docs updated; preview endpoint displays banding. | | POLICY-CORE-09-004 | TODO | Policy Guild | POLICY-CORE-09-001 | Versioned scoring config (weights, trust table, reachability buckets) with schema validation, binder, and golden fixtures. | Config serialized with semantic version, binder loads defaults, fixtures assert deterministic hash. | | POLICY-CORE-09-005 | TODO | Policy Guild | POLICY-CORE-09-004, POLICY-CORE-09-002 | Implement scoring/quiet engine: compute score from config, enforce VEX-only quiet rules, emit inputs + `quietedBy` metadata in policy verdicts. | `/reports` policy result includes score, inputs, configVersion, quiet provenance; unit/integration tests prove reproducibility. | | POLICY-CORE-09-006 | TODO | Policy Guild | POLICY-CORE-09-005, FEEDCORE-ENGINE-07-003 | Track unknown states with deterministic confidence bands that decay over time; expose state in policy outputs and docs. | Unknown flags + confidence band persisted, decay job deterministic, preview/report APIs show state with tests covering decay math. | + +## Notes +- 2025-10-18: POLICY-CORE-09-001 completed. Binder + diagnostics + CLI scaffolding landed with tests; schema embedded at `src/StellaOps.Policy/Schemas/policy-schema@1.json` and referenced by docs/11_DATA_SCHEMAS.md. +- 2025-10-18: POLICY-CORE-09-002 completed. Snapshot store + audit trail implemented with deterministic digest hashing and tests covering revision increments and dedupe. +- 2025-10-18: POLICY-CORE-09-003 delivered. Preview service evaluates policy projections vs. baseline, returns verdict diffs, and ships with unit coverage. diff --git a/src/StellaOps.Scanner.Core.Tests/Contracts/ScanJobTests.cs b/src/StellaOps.Scanner.Core.Tests/Contracts/ScanJobTests.cs new file mode 100644 index 00000000..72b29c6f --- /dev/null +++ b/src/StellaOps.Scanner.Core.Tests/Contracts/ScanJobTests.cs @@ -0,0 +1,81 @@ +using System.Text.Json; +using StellaOps.Scanner.Core.Contracts; +using StellaOps.Scanner.Core.Serialization; +using StellaOps.Scanner.Core.Utility; +using Xunit; + +namespace StellaOps.Scanner.Core.Tests.Contracts; + +public sealed class ScanJobTests +{ + [Fact] + public void SerializeAndDeserialize_RoundTripsDeterministically() + { + var createdAt = new DateTimeOffset(2025, 10, 18, 14, 30, 15, TimeSpan.Zero); + var jobId = ScannerIdentifiers.CreateJobId("registry.example.com/stellaops/scanner:1.2.3", "sha256:ABCDEF", "tenant-a", "request-1"); + var correlationId = ScannerIdentifiers.CreateCorrelationId(jobId, "enqueue"); + var error = new ScannerError( + ScannerErrorCode.AnalyzerFailure, + ScannerErrorSeverity.Error, + "Analyzer crashed for layer sha256:abc", + createdAt, + retryable: false, + details: new Dictionary + { + ["stage"] = "analyze-os", + ["layer"] = "sha256:abc" + }); + + var job = new ScanJob( + jobId, + ScanJobStatus.Running, + "registry.example.com/stellaops/scanner:1.2.3", + "SHA256:ABCDEF", + createdAt, + createdAt, + correlationId, + "tenant-a", + new Dictionary + { + ["requestId"] = "request-1" + }, + error); + + var json = JsonSerializer.Serialize(job, ScannerJsonOptions.CreateDefault()); + var deserialized = JsonSerializer.Deserialize(json, ScannerJsonOptions.CreateDefault()); + + Assert.NotNull(deserialized); + Assert.Equal(job.Id, deserialized!.Id); + Assert.Equal(job.ImageDigest, deserialized.ImageDigest); + Assert.Equal(job.CorrelationId, deserialized.CorrelationId); + Assert.Equal(job.Metadata["requestId"], deserialized.Metadata["requestId"]); + + var secondJson = JsonSerializer.Serialize(deserialized, ScannerJsonOptions.CreateDefault()); + Assert.Equal(json, secondJson); + } + + [Fact] + public void WithStatus_UpdatesTimestampDeterministically() + { + var createdAt = new DateTimeOffset(2025, 10, 18, 14, 30, 15, 123, TimeSpan.Zero); + var jobId = ScannerIdentifiers.CreateJobId("example/scanner:latest", "sha256:def", null, null); + var correlationId = ScannerIdentifiers.CreateCorrelationId(jobId, "enqueue"); + + var job = new ScanJob( + jobId, + ScanJobStatus.Pending, + "example/scanner:latest", + "sha256:def", + createdAt, + null, + correlationId, + null, + null, + null); + + var updated = job.WithStatus(ScanJobStatus.Running, createdAt.AddSeconds(5)); + + Assert.Equal(ScanJobStatus.Running, updated.Status); + Assert.Equal(ScannerTimestamps.Normalize(createdAt.AddSeconds(5)), updated.UpdatedAt); + } +} diff --git a/src/StellaOps.Scanner.Core.Tests/Observability/ScannerLogExtensionsTests.cs b/src/StellaOps.Scanner.Core.Tests/Observability/ScannerLogExtensionsTests.cs new file mode 100644 index 00000000..79b218bd --- /dev/null +++ b/src/StellaOps.Scanner.Core.Tests/Observability/ScannerLogExtensionsTests.cs @@ -0,0 +1,39 @@ +using Microsoft.Extensions.Logging; +using StellaOps.Scanner.Core.Contracts; +using StellaOps.Scanner.Core.Observability; +using StellaOps.Scanner.Core.Utility; +using Xunit; + +namespace StellaOps.Scanner.Core.Tests.Observability; + +public sealed class ScannerLogExtensionsTests +{ + [Fact] + public void BeginScanScope_PopulatesCorrelationContext() + { + using var factory = LoggerFactory.Create(builder => builder.AddFilter(_ => true)); + var logger = factory.CreateLogger("test"); + + var jobId = ScannerIdentifiers.CreateJobId("example/scanner:1.0", "sha256:abc", null, null); + var correlationId = ScannerIdentifiers.CreateCorrelationId(jobId, "enqueue"); + var job = new ScanJob( + jobId, + ScanJobStatus.Pending, + "example/scanner:1.0", + "sha256:abc", + DateTimeOffset.UtcNow, + null, + correlationId, + null, + null, + null); + + using (logger.BeginScanScope(job, "enqueue")) + { + Assert.True(ScannerCorrelationContextAccessor.TryGetCorrelationId(out var current)); + Assert.Equal(correlationId, current); + } + + Assert.False(ScannerCorrelationContextAccessor.TryGetCorrelationId(out _)); + } +} diff --git a/src/StellaOps.Scanner.Core.Tests/Security/AuthorityTokenSourceTests.cs b/src/StellaOps.Scanner.Core.Tests/Security/AuthorityTokenSourceTests.cs new file mode 100644 index 00000000..c5adcf21 --- /dev/null +++ b/src/StellaOps.Scanner.Core.Tests/Security/AuthorityTokenSourceTests.cs @@ -0,0 +1,89 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging.Abstractions; +using Microsoft.Extensions.Time.Testing; +using Microsoft.IdentityModel.Tokens; +using StellaOps.Auth.Client; +using StellaOps.Scanner.Core.Security; +using Xunit; + +namespace StellaOps.Scanner.Core.Tests.Security; + +public sealed class AuthorityTokenSourceTests +{ + [Fact] + public async Task GetAsync_ReusesCachedTokenUntilRefreshSkew() + { + var timeProvider = new FakeTimeProvider(new DateTimeOffset(2025, 10, 18, 12, 0, 0, TimeSpan.Zero)); + var client = new FakeTokenClient(timeProvider); + var source = new AuthorityTokenSource(client, TimeSpan.FromSeconds(30), timeProvider, NullLogger.Instance); + + var token1 = await source.GetAsync("scanner", new[] { "scanner.read" }); + Assert.Equal(1, client.RequestCount); + + var token2 = await source.GetAsync("scanner", new[] { "scanner.read" }); + Assert.Equal(1, client.RequestCount); + Assert.Equal(token1.AccessToken, token2.AccessToken); + + timeProvider.Advance(TimeSpan.FromMinutes(3)); + var token3 = await source.GetAsync("scanner", new[] { "scanner.read" }); + Assert.Equal(2, client.RequestCount); + Assert.NotEqual(token1.AccessToken, token3.AccessToken); + } + + [Fact] + public async Task InvalidateAsync_RemovesCachedToken() + { + var timeProvider = new FakeTimeProvider(new DateTimeOffset(2025, 10, 18, 12, 0, 0, TimeSpan.Zero)); + var client = new FakeTokenClient(timeProvider); + var source = new AuthorityTokenSource(client, TimeSpan.FromSeconds(30), timeProvider, NullLogger.Instance); + + _ = await source.GetAsync("scanner", new[] { "scanner.read" }); + Assert.Equal(1, client.RequestCount); + + await source.InvalidateAsync("scanner", new[] { "scanner.read" }); + _ = await source.GetAsync("scanner", new[] { "scanner.read" }); + + Assert.Equal(2, client.RequestCount); + } + + private sealed class FakeTokenClient : IStellaOpsTokenClient + { + private readonly FakeTimeProvider timeProvider; + private int counter; + + public FakeTokenClient(FakeTimeProvider timeProvider) + { + this.timeProvider = timeProvider; + } + + public int RequestCount => counter; + + public Task RequestClientCredentialsTokenAsync(string? scope = null, CancellationToken cancellationToken = default) + { + var access = $"token-{Interlocked.Increment(ref counter)}"; + var expires = timeProvider.GetUtcNow().AddMinutes(2); + var scopes = scope is null + ? Array.Empty() + : scope.Split(' ', StringSplitOptions.RemoveEmptyEntries | StringSplitOptions.TrimEntries); + + return Task.FromResult(new StellaOpsTokenResult(access, "Bearer", expires, scopes)); + } + + public Task RequestPasswordTokenAsync(string username, string password, string? scope = null, CancellationToken cancellationToken = default) + => throw new NotSupportedException(); + + public Task GetJsonWebKeySetAsync(CancellationToken cancellationToken = default) + => throw new NotSupportedException(); + + public ValueTask GetCachedTokenAsync(string key, CancellationToken cancellationToken = default) + => ValueTask.FromResult(null); + + public ValueTask CacheTokenAsync(string key, StellaOpsTokenCacheEntry entry, CancellationToken cancellationToken = default) + => ValueTask.CompletedTask; + + public ValueTask ClearCachedTokenAsync(string key, CancellationToken cancellationToken = default) + => ValueTask.CompletedTask; + } +} diff --git a/src/StellaOps.Scanner.Core.Tests/Security/DpopProofValidatorTests.cs b/src/StellaOps.Scanner.Core.Tests/Security/DpopProofValidatorTests.cs new file mode 100644 index 00000000..1bc72b99 --- /dev/null +++ b/src/StellaOps.Scanner.Core.Tests/Security/DpopProofValidatorTests.cs @@ -0,0 +1,117 @@ +using System.Collections.Generic; +using System.IdentityModel.Tokens.Jwt; +using System.Security.Cryptography; +using Microsoft.Extensions.Time.Testing; +using Microsoft.Extensions.Options; +using Microsoft.IdentityModel.Tokens; +using StellaOps.Scanner.Core.Security; +using Xunit; + +namespace StellaOps.Scanner.Core.Tests.Security; + +public sealed class DpopProofValidatorTests +{ + [Fact] + public async Task ValidateAsync_ReturnsSuccess_ForValidProof() + { + var timeProvider = new FakeTimeProvider(new DateTimeOffset(2025, 10, 18, 12, 0, 0, TimeSpan.Zero)); + var validator = new DpopProofValidator(Options.Create(new DpopValidationOptions()), new InMemoryDpopReplayCache(timeProvider), timeProvider); + using var key = ECDsa.Create(ECCurve.NamedCurves.nistP256); + var securityKey = new ECDsaSecurityKey(key) { KeyId = Guid.NewGuid().ToString("N") }; + + var proof = CreateProof(timeProvider, securityKey, "GET", new Uri("https://scanner.example.com/api/v1/scans")); + var result = await validator.ValidateAsync(proof, "GET", new Uri("https://scanner.example.com/api/v1/scans")); + + Assert.True(result.IsValid); + Assert.NotNull(result.PublicKey); + Assert.NotNull(result.JwtId); + } + + [Fact] + public async Task ValidateAsync_Fails_OnNonceMismatch() + { + var timeProvider = new FakeTimeProvider(new DateTimeOffset(2025, 10, 18, 12, 0, 0, TimeSpan.Zero)); + var validator = new DpopProofValidator(Options.Create(new DpopValidationOptions()), new InMemoryDpopReplayCache(timeProvider), timeProvider); + using var key = ECDsa.Create(ECCurve.NamedCurves.nistP256); + var securityKey = new ECDsaSecurityKey(key) { KeyId = Guid.NewGuid().ToString("N") }; + + var proof = CreateProof(timeProvider, securityKey, "POST", new Uri("https://scanner.example.com/api/v1/scans"), nonce: "expected"); + var result = await validator.ValidateAsync(proof, "POST", new Uri("https://scanner.example.com/api/v1/scans"), nonce: "different"); + + Assert.False(result.IsValid); + Assert.Equal("invalid_token", result.ErrorCode); + } + + [Fact] + public async Task ValidateAsync_Fails_OnReplay() + { + var timeProvider = new FakeTimeProvider(new DateTimeOffset(2025, 10, 18, 12, 0, 0, TimeSpan.Zero)); + var cache = new InMemoryDpopReplayCache(timeProvider); + var validator = new DpopProofValidator(Options.Create(new DpopValidationOptions()), cache, timeProvider); + using var key = ECDsa.Create(ECCurve.NamedCurves.nistP256); + var securityKey = new ECDsaSecurityKey(key) { KeyId = Guid.NewGuid().ToString("N") }; + var jti = Guid.NewGuid().ToString(); + + var proof = CreateProof(timeProvider, securityKey, "GET", new Uri("https://scanner.example.com/api/v1/scans"), jti: jti); + + var first = await validator.ValidateAsync(proof, "GET", new Uri("https://scanner.example.com/api/v1/scans")); + Assert.True(first.IsValid); + + var second = await validator.ValidateAsync(proof, "GET", new Uri("https://scanner.example.com/api/v1/scans")); + Assert.False(second.IsValid); + Assert.Equal("replay", second.ErrorCode); + } + + private static string CreateProof(FakeTimeProvider timeProvider, ECDsaSecurityKey key, string method, Uri uri, string? nonce = null, string? jti = null) + { + var handler = new JwtSecurityTokenHandler(); + var signingCredentials = new SigningCredentials(key, SecurityAlgorithms.EcdsaSha256); + var jwk = JsonWebKeyConverter.ConvertFromECDsaSecurityKey(key); + + var header = new JwtHeader(signingCredentials) + { + ["typ"] = "dpop+jwt", + ["jwk"] = new Dictionary + { + ["kty"] = jwk.Kty, + ["crv"] = jwk.Crv, + ["x"] = jwk.X, + ["y"] = jwk.Y + } + }; + + var payload = new JwtPayload + { + ["htm"] = method.ToUpperInvariant(), + ["htu"] = Normalize(uri), + ["iat"] = timeProvider.GetUtcNow().ToUnixTimeSeconds(), + ["jti"] = jti ?? Guid.NewGuid().ToString() + }; + + if (nonce is not null) + { + payload["nonce"] = nonce; + } + + var token = new JwtSecurityToken(header, payload); + return handler.WriteToken(token); + } + + private static string Normalize(Uri uri) + { + var builder = new UriBuilder(uri) + { + Fragment = string.Empty + }; + + builder.Host = builder.Host.ToLowerInvariant(); + builder.Scheme = builder.Scheme.ToLowerInvariant(); + + if ((builder.Scheme == "http" && builder.Port == 80) || (builder.Scheme == "https" && builder.Port == 443)) + { + builder.Port = -1; + } + + return builder.Uri.GetComponents(UriComponents.SchemeAndServer | UriComponents.PathAndQuery, UriFormat.UriEscaped); + } +} diff --git a/src/StellaOps.Scanner.Core.Tests/Security/RestartOnlyPluginGuardTests.cs b/src/StellaOps.Scanner.Core.Tests/Security/RestartOnlyPluginGuardTests.cs new file mode 100644 index 00000000..032ad1e2 --- /dev/null +++ b/src/StellaOps.Scanner.Core.Tests/Security/RestartOnlyPluginGuardTests.cs @@ -0,0 +1,26 @@ +using System; +using StellaOps.Scanner.Core.Security; +using Xunit; + +namespace StellaOps.Scanner.Core.Tests.Security; + +public sealed class RestartOnlyPluginGuardTests +{ + [Fact] + public void EnsureRegistrationAllowed_AllowsNewPluginsBeforeSeal() + { + var guard = new RestartOnlyPluginGuard(); + guard.EnsureRegistrationAllowed("./plugins/analyzer.dll"); + + Assert.Contains(guard.KnownPlugins, path => path.EndsWith("analyzer.dll", StringComparison.OrdinalIgnoreCase)); + } + + [Fact] + public void EnsureRegistrationAllowed_ThrowsAfterSeal() + { + var guard = new RestartOnlyPluginGuard(new[] { "./plugins/a.dll" }); + guard.Seal(); + + Assert.Throws(() => guard.EnsureRegistrationAllowed("./plugins/new.dll")); + } +} diff --git a/src/StellaOps.Scanner.Core.Tests/StellaOps.Scanner.Core.Tests.csproj b/src/StellaOps.Scanner.Core.Tests/StellaOps.Scanner.Core.Tests.csproj new file mode 100644 index 00000000..6875005c --- /dev/null +++ b/src/StellaOps.Scanner.Core.Tests/StellaOps.Scanner.Core.Tests.csproj @@ -0,0 +1,12 @@ + + + net10.0 + enable + enable + + + + + + + diff --git a/src/StellaOps.Scanner.Core.Tests/Utility/ScannerIdentifiersTests.cs b/src/StellaOps.Scanner.Core.Tests/Utility/ScannerIdentifiersTests.cs new file mode 100644 index 00000000..5c2601e7 --- /dev/null +++ b/src/StellaOps.Scanner.Core.Tests/Utility/ScannerIdentifiersTests.cs @@ -0,0 +1,33 @@ +using StellaOps.Scanner.Core.Utility; +using Xunit; + +namespace StellaOps.Scanner.Core.Tests.Utility; + +public sealed class ScannerIdentifiersTests +{ + [Fact] + public void CreateJobId_IsDeterministicAndCaseInsensitive() + { + var first = ScannerIdentifiers.CreateJobId("registry.example.com/repo:latest", "SHA256:ABC", "Tenant-A", "salt"); + var second = ScannerIdentifiers.CreateJobId("REGISTRY.EXAMPLE.COM/REPO:latest", "sha256:abc", "tenant-a", "salt"); + + Assert.Equal(first, second); + } + + [Fact] + public void CreateDeterministicHash_ProducesLowercaseHex() + { + var hash = ScannerIdentifiers.CreateDeterministicHash("scan", "abc", "123"); + + Assert.Matches("^[0-9a-f]{64}$", hash); + Assert.Equal(hash, hash.ToLowerInvariant()); + } + + [Fact] + public void NormalizeImageReference_LowercasesRegistryAndRepository() + { + var normalized = ScannerIdentifiers.NormalizeImageReference("Registry.Example.com/StellaOps/Scanner:1.0"); + + Assert.Equal("registry.example.com/stellaops/scanner:1.0", normalized); + } +} diff --git a/src/StellaOps.Scanner.Core.Tests/Utility/ScannerTimestampsTests.cs b/src/StellaOps.Scanner.Core.Tests/Utility/ScannerTimestampsTests.cs new file mode 100644 index 00000000..7fb6401f --- /dev/null +++ b/src/StellaOps.Scanner.Core.Tests/Utility/ScannerTimestampsTests.cs @@ -0,0 +1,26 @@ +using StellaOps.Scanner.Core.Utility; +using Xunit; + +namespace StellaOps.Scanner.Core.Tests.Utility; + +public sealed class ScannerTimestampsTests +{ + [Fact] + public void Normalize_TrimsToMicroseconds() + { + var value = new DateTimeOffset(2025, 10, 18, 14, 30, 15, TimeSpan.Zero).AddTicks(7); + var normalized = ScannerTimestamps.Normalize(value); + + var expectedTicks = value.UtcTicks - (value.UtcTicks % 10); + Assert.Equal(expectedTicks, normalized.UtcTicks); + } + + [Fact] + public void ToIso8601_ProducesUtcString() + { + var value = new DateTimeOffset(2025, 10, 18, 14, 30, 15, TimeSpan.FromHours(-4)); + var iso = ScannerTimestamps.ToIso8601(value); + + Assert.Equal("2025-10-18T18:30:15.000000Z", iso); + } +} diff --git a/src/StellaOps.Scanner.Core/AGENTS.md b/src/StellaOps.Scanner.Core/AGENTS.md new file mode 100644 index 00000000..f1760432 --- /dev/null +++ b/src/StellaOps.Scanner.Core/AGENTS.md @@ -0,0 +1,29 @@ +# AGENTS +## Role +Provide shared scanner contracts, observability primitives, and security utilities consumed by the WebService, Worker, analyzers, and downstream tooling. +## Scope +- Canonical DTOs for scan jobs, progress, outcomes, and error taxonomy shared across scanner services. +- Deterministic ID and timestamp helpers to guarantee reproducible job identifiers and ISO-8601 rendering. +- Observability helpers (logging scopes, correlation IDs, metric naming, activity sources) with negligible overhead. +- Authority/OpTok integrations, DPoP validation helpers, and restart-time plug-in guardrails for scanner components. +## Participants +- Scanner.WebService and Scanner.Worker depend on these primitives for request handling, queue interactions, and diagnostics. +- Policy/Signer integrations rely on deterministic identifiers and timestamps emitted here. +- DevOps/Offline kits bundle plug-in manifests validated via the guardrails defined in this module. +## Interfaces & contracts +- DTOs must round-trip via System.Text.Json with `JsonSerializerDefaults.Web` and preserve ordering. +- Deterministic helpers must not depend on ambient time/randomness; they derive IDs from explicit inputs and normalize timestamps to microsecond precision in UTC. +- Observability scopes expose `scanId`, `jobId`, `correlationId`, and `imageDigest` fields with `stellaops scanner` metric prefixing. +- Security helpers expose `IAuthorityTokenSource`, `IDPoPProofValidator`, and `IPluginCatalogGuard` abstractions with DI-friendly implementations. +## In/Out of scope +In: shared contracts, telemetry primitives, security utilities, plug-in manifest checks. +Out: queue implementations, analyzer logic, storage adapters, HTTP endpoints, UI wiring. +## Observability & security expectations +- No network calls except via registered Authority clients. +- Avoid allocations in hot paths; prefer struct enumerables/`ValueTask`. +- All logs structured, correlation IDs propagated, no secrets persisted. +- DPoP validation enforces algorithm allowlist (ES256/ES384) and ensures replay cache hooks. +## Tests +- `../StellaOps.Scanner.Core.Tests` owns unit coverage with deterministic fixtures. +- Golden JSON for DTO round-trips stored under `Fixtures/`. +- Security and observability helpers must include tests proving deterministic outputs and rejecting malformed proofs. diff --git a/src/StellaOps.Scanner.Core/Contracts/ScanJob.cs b/src/StellaOps.Scanner.Core/Contracts/ScanJob.cs new file mode 100644 index 00000000..78ebab15 --- /dev/null +++ b/src/StellaOps.Scanner.Core/Contracts/ScanJob.cs @@ -0,0 +1,173 @@ +using System.Collections.ObjectModel; +using System.Globalization; +using System.Text.Json.Serialization; +using StellaOps.Scanner.Core.Utility; + +namespace StellaOps.Scanner.Core.Contracts; + +[JsonConverter(typeof(ScanJobIdJsonConverter))] +public readonly record struct ScanJobId(Guid Value) +{ + public static readonly ScanJobId Empty = new(Guid.Empty); + + public override string ToString() + => Value.ToString("n", CultureInfo.InvariantCulture); + + public static ScanJobId From(Guid value) + => new(value); + + public static bool TryParse(string? text, out ScanJobId id) + { + if (Guid.TryParse(text, out var guid)) + { + id = new ScanJobId(guid); + return true; + } + + id = Empty; + return false; + } +} + +[JsonConverter(typeof(JsonStringEnumConverter))] +public enum ScanJobStatus +{ + Unknown = 0, + Pending, + Queued, + Running, + Succeeded, + Failed, + Cancelled +} + +public sealed class ScanJob +{ + private static readonly IReadOnlyDictionary EmptyMetadata = + new ReadOnlyDictionary(new Dictionary(0, StringComparer.Ordinal)); + + [JsonConstructor] + public ScanJob( + ScanJobId id, + ScanJobStatus status, + string imageReference, + string? imageDigest, + DateTimeOffset createdAt, + DateTimeOffset? updatedAt, + string correlationId, + string? tenantId, + IReadOnlyDictionary? metadata = null, + ScannerError? failure = null) + { + if (string.IsNullOrWhiteSpace(imageReference)) + { + throw new ArgumentException("Image reference cannot be null or whitespace.", nameof(imageReference)); + } + + if (string.IsNullOrWhiteSpace(correlationId)) + { + throw new ArgumentException("Correlation identifier cannot be null or whitespace.", nameof(correlationId)); + } + + Id = id; + Status = status; + ImageReference = imageReference.Trim(); + ImageDigest = NormalizeDigest(imageDigest); + CreatedAt = ScannerTimestamps.Normalize(createdAt); + UpdatedAt = updatedAt is null ? null : ScannerTimestamps.Normalize(updatedAt.Value); + CorrelationId = correlationId; + TenantId = string.IsNullOrWhiteSpace(tenantId) ? null : tenantId.Trim(); + Metadata = metadata is null or { Count: 0 } + ? EmptyMetadata + : new ReadOnlyDictionary(new Dictionary(metadata, StringComparer.Ordinal)); + Failure = failure; + } + + [JsonPropertyName("id")] + [JsonPropertyOrder(0)] + public ScanJobId Id { get; } + + [JsonPropertyName("status")] + [JsonPropertyOrder(1)] + public ScanJobStatus Status { get; init; } + + [JsonPropertyName("imageReference")] + [JsonPropertyOrder(2)] + public string ImageReference { get; } + + [JsonPropertyName("imageDigest")] + [JsonPropertyOrder(3)] + public string? ImageDigest { get; } + + [JsonPropertyName("createdAt")] + [JsonPropertyOrder(4)] + public DateTimeOffset CreatedAt { get; } + + [JsonPropertyName("updatedAt")] + [JsonPropertyOrder(5)] + public DateTimeOffset? UpdatedAt { get; init; } + + [JsonPropertyName("correlationId")] + [JsonPropertyOrder(6)] + public string CorrelationId { get; } + + [JsonPropertyName("tenantId")] + [JsonPropertyOrder(7)] + public string? TenantId { get; } + + [JsonPropertyName("metadata")] + [JsonPropertyOrder(8)] + public IReadOnlyDictionary Metadata { get; } + + [JsonPropertyName("failure")] + [JsonPropertyOrder(9)] + public ScannerError? Failure { get; init; } + + public ScanJob WithStatus(ScanJobStatus status, DateTimeOffset? updatedAt = null) + => new( + Id, + status, + ImageReference, + ImageDigest, + CreatedAt, + updatedAt ?? UpdatedAt ?? CreatedAt, + CorrelationId, + TenantId, + Metadata, + Failure); + + public ScanJob WithFailure(ScannerError failure, DateTimeOffset? updatedAt = null, TimeProvider? timeProvider = null) + => new( + Id, + ScanJobStatus.Failed, + ImageReference, + ImageDigest, + CreatedAt, + updatedAt ?? ScannerTimestamps.UtcNow(timeProvider), + CorrelationId, + TenantId, + Metadata, + failure); + + private static string? NormalizeDigest(string? digest) + { + if (string.IsNullOrWhiteSpace(digest)) + { + return null; + } + + var trimmed = digest.Trim(); + if (!trimmed.StartsWith("sha", StringComparison.OrdinalIgnoreCase)) + { + return trimmed; + } + + var parts = trimmed.Split(':', 2, StringSplitOptions.RemoveEmptyEntries | StringSplitOptions.TrimEntries); + if (parts.Length != 2) + { + return trimmed.ToLowerInvariant(); + } + + return $"{parts[0].ToLowerInvariant()}:{parts[1].ToLowerInvariant()}"; + } +} diff --git a/src/StellaOps.Scanner.Core/Contracts/ScanJobIdJsonConverter.cs b/src/StellaOps.Scanner.Core/Contracts/ScanJobIdJsonConverter.cs new file mode 100644 index 00000000..149f6ee1 --- /dev/null +++ b/src/StellaOps.Scanner.Core/Contracts/ScanJobIdJsonConverter.cs @@ -0,0 +1,26 @@ +using System.Text.Json; +using System.Text.Json.Serialization; + +namespace StellaOps.Scanner.Core.Contracts; + +internal sealed class ScanJobIdJsonConverter : JsonConverter +{ + public override ScanJobId Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) + { + if (reader.TokenType != JsonTokenType.String) + { + throw new JsonException("Expected scan job identifier to be a string."); + } + + var value = reader.GetString(); + if (!ScanJobId.TryParse(value, out var id)) + { + throw new JsonException("Invalid scan job identifier."); + } + + return id; + } + + public override void Write(Utf8JsonWriter writer, ScanJobId value, JsonSerializerOptions options) + => writer.WriteStringValue(value.ToString()); +} diff --git a/src/StellaOps.Scanner.Core/Contracts/ScanProgressEvent.cs b/src/StellaOps.Scanner.Core/Contracts/ScanProgressEvent.cs new file mode 100644 index 00000000..1d081891 --- /dev/null +++ b/src/StellaOps.Scanner.Core/Contracts/ScanProgressEvent.cs @@ -0,0 +1,121 @@ +using System.Collections.ObjectModel; +using System.Text.Json.Serialization; +using StellaOps.Scanner.Core.Utility; + +namespace StellaOps.Scanner.Core.Contracts; + +[JsonConverter(typeof(JsonStringEnumConverter))] +public enum ScanStage +{ + Unknown = 0, + ResolveImage, + FetchLayers, + MountLayers, + AnalyzeOperatingSystem, + AnalyzeLanguageEcosystems, + AnalyzeNativeArtifacts, + ComposeSbom, + BuildDiffs, + EmitArtifacts, + SignArtifacts, + Complete +} + +[JsonConverter(typeof(JsonStringEnumConverter))] +public enum ScanProgressEventKind +{ + Progress = 0, + StageStarted, + StageCompleted, + Warning, + Error +} + +public sealed class ScanProgressEvent +{ + private static readonly IReadOnlyDictionary EmptyAttributes = + new ReadOnlyDictionary(new Dictionary(0, StringComparer.Ordinal)); + + [JsonConstructor] + public ScanProgressEvent( + ScanJobId jobId, + ScanStage stage, + ScanProgressEventKind kind, + int sequence, + DateTimeOffset timestamp, + double? percentComplete = null, + string? message = null, + IReadOnlyDictionary? attributes = null, + ScannerError? error = null) + { + if (sequence < 0) + { + throw new ArgumentOutOfRangeException(nameof(sequence), sequence, "Sequence cannot be negative."); + } + + JobId = jobId; + Stage = stage; + Kind = kind; + Sequence = sequence; + Timestamp = ScannerTimestamps.Normalize(timestamp); + PercentComplete = percentComplete is < 0 or > 100 ? null : percentComplete; + Message = message is { Length: > 0 } ? message.Trim() : null; + Attributes = attributes is null or { Count: 0 } + ? EmptyAttributes + : new ReadOnlyDictionary(new Dictionary(attributes, StringComparer.Ordinal)); + Error = error; + } + + [JsonPropertyName("jobId")] + [JsonPropertyOrder(0)] + public ScanJobId JobId { get; } + + [JsonPropertyName("stage")] + [JsonPropertyOrder(1)] + public ScanStage Stage { get; } + + [JsonPropertyName("kind")] + [JsonPropertyOrder(2)] + public ScanProgressEventKind Kind { get; } + + [JsonPropertyName("sequence")] + [JsonPropertyOrder(3)] + public int Sequence { get; } + + [JsonPropertyName("timestamp")] + [JsonPropertyOrder(4)] + public DateTimeOffset Timestamp { get; } + + [JsonPropertyName("percentComplete")] + [JsonPropertyOrder(5)] + public double? PercentComplete { get; } + + [JsonPropertyName("message")] + [JsonPropertyOrder(6)] + public string? Message { get; } + + [JsonPropertyName("attributes")] + [JsonPropertyOrder(7)] + public IReadOnlyDictionary Attributes { get; } + + [JsonPropertyName("error")] + [JsonPropertyOrder(8)] + public ScannerError? Error { get; } + + public ScanProgressEvent With( + ScanProgressEventKind? kind = null, + double? percentComplete = null, + string? message = null, + IReadOnlyDictionary? attributes = null, + ScannerError? error = null) + => new( + JobId, + Stage, + kind ?? Kind, + Sequence, + Timestamp, + percentComplete ?? PercentComplete, + message ?? Message, + attributes ?? Attributes, + error ?? Error); +} diff --git a/src/StellaOps.Scanner.Core/Contracts/ScannerError.cs b/src/StellaOps.Scanner.Core/Contracts/ScannerError.cs new file mode 100644 index 00000000..0cd99be9 --- /dev/null +++ b/src/StellaOps.Scanner.Core/Contracts/ScannerError.cs @@ -0,0 +1,110 @@ +using System.Collections.ObjectModel; +using System.Text.Json.Serialization; +using StellaOps.Scanner.Core.Utility; + +namespace StellaOps.Scanner.Core.Contracts; + +[JsonConverter(typeof(JsonStringEnumConverter))] +public enum ScannerErrorCode +{ + Unknown = 0, + InvalidImageReference, + ImageNotFound, + AuthorizationFailed, + QueueUnavailable, + StorageUnavailable, + AnalyzerFailure, + ExportFailure, + SigningFailure, + RuntimeFailure, + Timeout, + Cancelled, + PluginViolation +} + +[JsonConverter(typeof(JsonStringEnumConverter))] +public enum ScannerErrorSeverity +{ + Warning = 0, + Error, + Fatal +} + +public sealed class ScannerError +{ + private static readonly IReadOnlyDictionary EmptyDetails = + new ReadOnlyDictionary(new Dictionary(0, StringComparer.Ordinal)); + + [JsonConstructor] + public ScannerError( + ScannerErrorCode code, + ScannerErrorSeverity severity, + string message, + DateTimeOffset timestamp, + bool retryable, + IReadOnlyDictionary? details = null, + string? stage = null, + string? component = null) + { + if (string.IsNullOrWhiteSpace(message)) + { + throw new ArgumentException("Error message cannot be null or whitespace.", nameof(message)); + } + + Code = code; + Severity = severity; + Message = message.Trim(); + Timestamp = ScannerTimestamps.Normalize(timestamp); + Retryable = retryable; + Stage = stage; + Component = component; + Details = details is null or { Count: 0 } + ? EmptyDetails + : new ReadOnlyDictionary(new Dictionary(details, StringComparer.Ordinal)); + } + + [JsonPropertyName("code")] + [JsonPropertyOrder(0)] + public ScannerErrorCode Code { get; } + + [JsonPropertyName("severity")] + [JsonPropertyOrder(1)] + public ScannerErrorSeverity Severity { get; } + + [JsonPropertyName("message")] + [JsonPropertyOrder(2)] + public string Message { get; } + + [JsonPropertyName("timestamp")] + [JsonPropertyOrder(3)] + public DateTimeOffset Timestamp { get; } + + [JsonPropertyName("retryable")] + [JsonPropertyOrder(4)] + public bool Retryable { get; } + + [JsonPropertyName("stage")] + [JsonPropertyOrder(5)] + public string? Stage { get; } + + [JsonPropertyName("component")] + [JsonPropertyOrder(6)] + public string? Component { get; } + + [JsonPropertyName("details")] + [JsonPropertyOrder(7)] + public IReadOnlyDictionary Details { get; } + + public ScannerError WithDetail(string key, string value) + { + ArgumentException.ThrowIfNullOrWhiteSpace(key); + ArgumentException.ThrowIfNullOrWhiteSpace(value); + + var mutable = new Dictionary(Details, StringComparer.Ordinal) + { + [key] = value + }; + + return new ScannerError(Code, Severity, Message, Timestamp, Retryable, mutable, Stage, Component); + } +} diff --git a/src/StellaOps.Scanner.Core/Observability/ScannerCorrelationContext.cs b/src/StellaOps.Scanner.Core/Observability/ScannerCorrelationContext.cs new file mode 100644 index 00000000..f95e7029 --- /dev/null +++ b/src/StellaOps.Scanner.Core/Observability/ScannerCorrelationContext.cs @@ -0,0 +1,80 @@ +using System.Diagnostics.CodeAnalysis; +using System.Threading; +using StellaOps.Scanner.Core.Contracts; +using StellaOps.Scanner.Core.Utility; + +namespace StellaOps.Scanner.Core.Observability; + +public readonly record struct ScannerCorrelationContext( + ScanJobId JobId, + string CorrelationId, + string? Stage, + string? Component, + string? Audience = null) +{ + public static ScannerCorrelationContext Create( + ScanJobId jobId, + string? stage = null, + string? component = null, + string? audience = null) + { + var correlationId = ScannerIdentifiers.CreateCorrelationId(jobId, stage, component); + return new ScannerCorrelationContext(jobId, correlationId, stage, component, audience); + } + + public string DeterministicHash() + => ScannerIdentifiers.CreateDeterministicHash( + JobId.ToString(), + Stage ?? string.Empty, + Component ?? string.Empty, + Audience ?? string.Empty); +} + +public static class ScannerCorrelationContextAccessor +{ + private static readonly AsyncLocal CurrentContext = new(); + + public static ScannerCorrelationContext? Current => CurrentContext.Value; + + public static IDisposable Push(in ScannerCorrelationContext context) + { + var previous = CurrentContext.Value; + CurrentContext.Value = context; + return new DisposableScope(() => CurrentContext.Value = previous); + } + + public static bool TryGetCorrelationId([NotNullWhen(true)] out string? correlationId) + { + var context = CurrentContext.Value; + if (context.HasValue) + { + correlationId = context.Value.CorrelationId; + return true; + } + + correlationId = null; + return false; + } + + private sealed class DisposableScope : IDisposable + { + private readonly Action release; + private bool disposed; + + public DisposableScope(Action release) + { + this.release = release ?? throw new ArgumentNullException(nameof(release)); + } + + public void Dispose() + { + if (disposed) + { + return; + } + + disposed = true; + release(); + } + } +} diff --git a/src/StellaOps.Scanner.Core/Observability/ScannerDiagnostics.cs b/src/StellaOps.Scanner.Core/Observability/ScannerDiagnostics.cs new file mode 100644 index 00000000..090e3bad --- /dev/null +++ b/src/StellaOps.Scanner.Core/Observability/ScannerDiagnostics.cs @@ -0,0 +1,55 @@ +using System.Diagnostics; +using System.Diagnostics.Metrics; +using StellaOps.Scanner.Core.Contracts; +using StellaOps.Scanner.Core.Utility; + +namespace StellaOps.Scanner.Core.Observability; + +public static class ScannerDiagnostics +{ + public const string ActivitySourceName = "StellaOps.Scanner"; + public const string ActivityVersion = "1.0.0"; + public const string MeterName = "stellaops.scanner"; + public const string MeterVersion = "1.0.0"; + + public static ActivitySource ActivitySource { get; } = new(ActivitySourceName, ActivityVersion); + public static Meter Meter { get; } = new(MeterName, MeterVersion); + + public static Activity? StartActivity( + string name, + ScanJobId jobId, + string? stage = null, + string? component = null, + ActivityKind kind = ActivityKind.Internal, + IEnumerable>? tags = null) + { + var activity = ActivitySource.StartActivity(name, kind); + if (activity is null) + { + return null; + } + + activity.SetTag("stellaops.scanner.job_id", jobId.ToString()); + activity.SetTag("stellaops.scanner.correlation_id", ScannerIdentifiers.CreateCorrelationId(jobId, stage, component)); + + if (!string.IsNullOrWhiteSpace(stage)) + { + activity.SetTag("stellaops.scanner.stage", stage); + } + + if (!string.IsNullOrWhiteSpace(component)) + { + activity.SetTag("stellaops.scanner.component", component); + } + + if (tags is not null) + { + foreach (var tag in tags) + { + activity?.SetTag(tag.Key, tag.Value); + } + } + + return activity; + } +} diff --git a/src/StellaOps.Scanner.Core/Observability/ScannerLogExtensions.cs b/src/StellaOps.Scanner.Core/Observability/ScannerLogExtensions.cs new file mode 100644 index 00000000..623d2dc0 --- /dev/null +++ b/src/StellaOps.Scanner.Core/Observability/ScannerLogExtensions.cs @@ -0,0 +1,115 @@ +using Microsoft.Extensions.Logging; +using StellaOps.Scanner.Core.Contracts; +using StellaOps.Scanner.Core.Utility; + +namespace StellaOps.Scanner.Core.Observability; + +public static class ScannerLogExtensions +{ + private sealed class NoopScope : IDisposable + { + public static NoopScope Instance { get; } = new(); + + public void Dispose() + { + } + } + + private sealed class CompositeScope : IDisposable + { + private readonly IDisposable first; + private readonly IDisposable second; + private bool disposed; + + public CompositeScope(IDisposable first, IDisposable second) + { + this.first = first; + this.second = second; + } + + public void Dispose() + { + if (disposed) + { + return; + } + + disposed = true; + second.Dispose(); + first.Dispose(); + } + } + + public static IDisposable BeginScanScope(this ILogger? logger, ScanJob job, string? stage = null, string? component = null) + { + var correlation = ScannerCorrelationContext.Create(job.Id, stage, component); + var logScope = logger is null + ? NoopScope.Instance + : logger.BeginScope(CreateScopeState( + job.Id, + job.CorrelationId, + stage, + component, + job.TenantId, + job.ImageDigest)) ?? NoopScope.Instance; + + var correlationScope = ScannerCorrelationContextAccessor.Push(correlation); + return new CompositeScope(logScope, correlationScope); + } + + public static IDisposable BeginProgressScope(this ILogger? logger, ScanProgressEvent progress, string? component = null) + { + var correlationId = ScannerIdentifiers.CreateCorrelationId(progress.JobId, progress.Stage.ToString(), component); + var correlation = new ScannerCorrelationContext(progress.JobId, correlationId, progress.Stage.ToString(), component); + + var logScope = logger is null + ? NoopScope.Instance + : logger.BeginScope(new Dictionary(6, StringComparer.Ordinal) + { + ["scanId"] = progress.JobId.ToString(), + ["stage"] = progress.Stage.ToString(), + ["sequence"] = progress.Sequence, + ["kind"] = progress.Kind.ToString(), + ["correlationId"] = correlationId, + ["component"] = component ?? string.Empty + }) ?? NoopScope.Instance; + + var correlationScope = ScannerCorrelationContextAccessor.Push(correlation); + return new CompositeScope(logScope, correlationScope); + } + + public static IDisposable BeginCorrelationScope(this ILogger? logger, ScannerCorrelationContext context) + { + var scope = logger is null + ? NoopScope.Instance + : logger.BeginScope(CreateScopeState(context.JobId, context.CorrelationId, context.Stage, context.Component, null, null)) ?? NoopScope.Instance; + + var correlationScope = ScannerCorrelationContextAccessor.Push(context); + return new CompositeScope(scope, correlationScope); + } + + private static Dictionary CreateScopeState( + ScanJobId jobId, + string correlationId, + string? stage, + string? component, + string? tenantId, + string? imageDigest) + { + var state = new Dictionary(6, StringComparer.Ordinal) + { + ["scanId"] = jobId.ToString(), + ["correlationId"] = correlationId, + ["stage"] = stage ?? string.Empty, + ["component"] = component ?? string.Empty, + ["tenantId"] = tenantId ?? string.Empty + }; + + if (!string.IsNullOrEmpty(imageDigest)) + { + state["imageDigest"] = imageDigest; + } + + return state; + } +} diff --git a/src/StellaOps.Scanner.Core/Observability/ScannerMetricNames.cs b/src/StellaOps.Scanner.Core/Observability/ScannerMetricNames.cs new file mode 100644 index 00000000..9ea721ce --- /dev/null +++ b/src/StellaOps.Scanner.Core/Observability/ScannerMetricNames.cs @@ -0,0 +1,55 @@ +using System.Collections.Frozen; +using StellaOps.Scanner.Core.Contracts; +using StellaOps.Scanner.Core.Utility; + +namespace StellaOps.Scanner.Core.Observability; + +public static class ScannerMetricNames +{ + public const string Prefix = "stellaops.scanner"; + public const string QueueLatency = $"{Prefix}.queue.latency"; + public const string QueueDepth = $"{Prefix}.queue.depth"; + public const string StageDuration = $"{Prefix}.stage.duration"; + public const string StageProgress = $"{Prefix}.stage.progress"; + public const string JobCount = $"{Prefix}.jobs.count"; + public const string JobFailures = $"{Prefix}.jobs.failures"; + public const string ArtifactBytes = $"{Prefix}.artifacts.bytes"; + + public static FrozenDictionary BuildJobTags(ScanJob job, string? stage = null, string? component = null) + { + ArgumentNullException.ThrowIfNull(job); + + var builder = new Dictionary(6, StringComparer.Ordinal) + { + ["jobId"] = job.Id.ToString(), + ["stage"] = stage ?? string.Empty, + ["component"] = component ?? string.Empty, + ["tenantId"] = job.TenantId ?? string.Empty, + ["correlationId"] = job.CorrelationId, + ["status"] = job.Status.ToString() + }; + + if (!string.IsNullOrEmpty(job.ImageDigest)) + { + builder["imageDigest"] = job.ImageDigest; + } + + return builder.ToFrozenDictionary(StringComparer.Ordinal); + } + + public static FrozenDictionary BuildEventTags(ScanProgressEvent progress) + { + ArgumentNullException.ThrowIfNull(progress); + + var builder = new Dictionary(5, StringComparer.Ordinal) + { + ["jobId"] = progress.JobId.ToString(), + ["stage"] = progress.Stage.ToString(), + ["kind"] = progress.Kind.ToString(), + ["sequence"] = progress.Sequence, + ["correlationId"] = ScannerIdentifiers.CreateCorrelationId(progress.JobId, progress.Stage.ToString()) + }; + + return builder.ToFrozenDictionary(StringComparer.Ordinal); + } +} diff --git a/src/StellaOps.Scanner.Core/Security/AuthorityTokenSource.cs b/src/StellaOps.Scanner.Core/Security/AuthorityTokenSource.cs new file mode 100644 index 00000000..41a6ee79 --- /dev/null +++ b/src/StellaOps.Scanner.Core/Security/AuthorityTokenSource.cs @@ -0,0 +1,128 @@ +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using StellaOps.Auth.Client; +using StellaOps.Scanner.Core.Utility; + +namespace StellaOps.Scanner.Core.Security; + +public sealed class AuthorityTokenSource : IAuthorityTokenSource +{ + private readonly IStellaOpsTokenClient tokenClient; + private readonly TimeProvider timeProvider; + private readonly TimeSpan refreshSkew; + private readonly ILogger? logger; + private readonly ConcurrentDictionary cache = new(StringComparer.Ordinal); + private readonly ConcurrentDictionary locks = new(StringComparer.Ordinal); + + public AuthorityTokenSource( + IStellaOpsTokenClient tokenClient, + TimeSpan? refreshSkew = null, + TimeProvider? timeProvider = null, + ILogger? logger = null) + { + this.tokenClient = tokenClient ?? throw new ArgumentNullException(nameof(tokenClient)); + this.timeProvider = timeProvider ?? TimeProvider.System; + this.logger = logger; + this.refreshSkew = refreshSkew is { } value && value > TimeSpan.Zero ? value : TimeSpan.FromSeconds(30); + } + + public async ValueTask GetAsync(string audience, IEnumerable scopes, CancellationToken cancellationToken = default) + { + ArgumentException.ThrowIfNullOrWhiteSpace(audience); + + var normalizedAudience = NormalizeAudience(audience); + var normalizedScopes = NormalizeScopes(scopes, normalizedAudience); + var cacheKey = BuildCacheKey(normalizedAudience, normalizedScopes); + + if (cache.TryGetValue(cacheKey, out var cached) && !cached.Token.IsExpired(timeProvider, refreshSkew)) + { + return cached.Token; + } + + var mutex = locks.GetOrAdd(cacheKey, static _ => new SemaphoreSlim(1, 1)); + await mutex.WaitAsync(cancellationToken).ConfigureAwait(false); + + try + { + if (cache.TryGetValue(cacheKey, out cached) && !cached.Token.IsExpired(timeProvider, refreshSkew)) + { + return cached.Token; + } + + var scopeString = string.Join(' ', normalizedScopes); + var tokenResult = await tokenClient.RequestClientCredentialsTokenAsync(scopeString, cancellationToken).ConfigureAwait(false); + + var token = ScannerOperationalToken.FromResult( + tokenResult.AccessToken, + tokenResult.TokenType, + tokenResult.ExpiresAtUtc, + tokenResult.Scopes); + + cache[cacheKey] = new CacheEntry(token); + logger?.LogDebug( + "Issued new scanner OpTok for audience {Audience} with scopes {Scopes}; expires at {ExpiresAt}.", + normalizedAudience, + scopeString, + token.ExpiresAt); + + return token; + } + finally + { + mutex.Release(); + } + } + + public ValueTask InvalidateAsync(string audience, IEnumerable scopes, CancellationToken cancellationToken = default) + { + ArgumentException.ThrowIfNullOrWhiteSpace(audience); + + var normalizedAudience = NormalizeAudience(audience); + var normalizedScopes = NormalizeScopes(scopes, normalizedAudience); + var cacheKey = BuildCacheKey(normalizedAudience, normalizedScopes); + + cache.TryRemove(cacheKey, out _); + if (locks.TryRemove(cacheKey, out var mutex)) + { + mutex.Dispose(); + } + + logger?.LogDebug("Invalidated cached OpTok for {Audience} ({CacheKey}).", normalizedAudience, cacheKey); + return ValueTask.CompletedTask; + } + + private static string NormalizeAudience(string audience) + => audience.Trim().ToLowerInvariant(); + + private static IReadOnlyList NormalizeScopes(IEnumerable scopes, string audience) + { + var set = new SortedSet(StringComparer.Ordinal) + { + $"aud:{audience}" + }; + + if (scopes is not null) + { + foreach (var scope in scopes) + { + if (string.IsNullOrWhiteSpace(scope)) + { + continue; + } + + set.Add(scope.Trim()); + } + } + + return set.ToArray(); + } + + private static string BuildCacheKey(string audience, IReadOnlyList scopes) + => ScannerIdentifiers.CreateDeterministicHash(audience, string.Join(' ', scopes)); + + private readonly record struct CacheEntry(ScannerOperationalToken Token); +} diff --git a/src/StellaOps.Scanner.Core/Security/DpopProofValidator.cs b/src/StellaOps.Scanner.Core/Security/DpopProofValidator.cs new file mode 100644 index 00000000..6b236be7 --- /dev/null +++ b/src/StellaOps.Scanner.Core/Security/DpopProofValidator.cs @@ -0,0 +1,248 @@ +using System.Linq; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using Microsoft.IdentityModel.Tokens; +using System.IdentityModel.Tokens.Jwt; + +namespace StellaOps.Scanner.Core.Security; + +public sealed class DpopProofValidator : IDpopProofValidator +{ + private static readonly string ProofType = "dpop+jwt"; + private readonly DpopValidationOptions options; + private readonly IDpopReplayCache replayCache; + private readonly TimeProvider timeProvider; + private readonly ILogger? logger; + private readonly JwtSecurityTokenHandler tokenHandler = new(); + + public DpopProofValidator( + IOptions options, + IDpopReplayCache? replayCache = null, + TimeProvider? timeProvider = null, + ILogger? logger = null) + { + if (options is null) + { + throw new ArgumentNullException(nameof(options)); + } + + var cloned = options.Value ?? throw new InvalidOperationException("DPoP options must be provided."); + cloned.Validate(); + + this.options = cloned; + this.replayCache = replayCache ?? NullReplayCache.Instance; + this.timeProvider = timeProvider ?? TimeProvider.System; + this.logger = logger; + } + + public async ValueTask ValidateAsync(string proof, string httpMethod, Uri httpUri, string? nonce = null, CancellationToken cancellationToken = default) + { + ArgumentException.ThrowIfNullOrWhiteSpace(proof); + ArgumentException.ThrowIfNullOrWhiteSpace(httpMethod); + ArgumentNullException.ThrowIfNull(httpUri); + + var now = timeProvider.GetUtcNow(); + + if (!TryDecodeSegment(proof, segmentIndex: 0, out var headerElement, out var headerError)) + { + logger?.LogWarning("DPoP header decode failure: {Error}", headerError); + return DpopValidationResult.Failure("invalid_header", headerError ?? "Unable to decode header."); + } + + if (!headerElement.TryGetProperty("typ", out var typElement) || !string.Equals(typElement.GetString(), ProofType, StringComparison.OrdinalIgnoreCase)) + { + return DpopValidationResult.Failure("invalid_header", "DPoP proof missing typ=dpop+jwt header."); + } + + if (!headerElement.TryGetProperty("alg", out var algElement)) + { + return DpopValidationResult.Failure("invalid_header", "DPoP proof missing alg header."); + } + + var algorithm = algElement.GetString()?.Trim().ToUpperInvariant(); + if (string.IsNullOrEmpty(algorithm) || !options.NormalizedAlgorithms.Contains(algorithm)) + { + return DpopValidationResult.Failure("invalid_header", "Unsupported DPoP algorithm."); + } + + if (!headerElement.TryGetProperty("jwk", out var jwkElement)) + { + return DpopValidationResult.Failure("invalid_header", "DPoP proof missing jwk header."); + } + + JsonWebKey jwk; + try + { + jwk = new JsonWebKey(jwkElement.GetRawText()); + } + catch (Exception ex) + { + logger?.LogWarning(ex, "Failed to parse DPoP jwk header."); + return DpopValidationResult.Failure("invalid_header", "DPoP proof jwk header is invalid."); + } + + if (!TryDecodeSegment(proof, segmentIndex: 1, out var payloadElement, out var payloadError)) + { + logger?.LogWarning("DPoP payload decode failure: {Error}", payloadError); + return DpopValidationResult.Failure("invalid_payload", payloadError ?? "Unable to decode payload."); + } + + if (!payloadElement.TryGetProperty("htm", out var htmElement)) + { + return DpopValidationResult.Failure("invalid_payload", "DPoP proof missing htm claim."); + } + + var method = httpMethod.Trim().ToUpperInvariant(); + if (!string.Equals(htmElement.GetString(), method, StringComparison.Ordinal)) + { + return DpopValidationResult.Failure("invalid_payload", "DPoP htm does not match request method."); + } + + if (!payloadElement.TryGetProperty("htu", out var htuElement)) + { + return DpopValidationResult.Failure("invalid_payload", "DPoP proof missing htu claim."); + } + + var normalizedHtu = NormalizeHtu(httpUri); + if (!string.Equals(htuElement.GetString(), normalizedHtu, StringComparison.Ordinal)) + { + return DpopValidationResult.Failure("invalid_payload", "DPoP htu does not match request URI."); + } + + if (!payloadElement.TryGetProperty("iat", out var iatElement) || iatElement.ValueKind is not JsonValueKind.Number) + { + return DpopValidationResult.Failure("invalid_payload", "DPoP proof missing iat claim."); + } + + if (!payloadElement.TryGetProperty("jti", out var jtiElement) || jtiElement.ValueKind != JsonValueKind.String) + { + return DpopValidationResult.Failure("invalid_payload", "DPoP proof missing jti claim."); + } + + long iatSeconds; + try + { + iatSeconds = iatElement.GetInt64(); + } + catch (Exception) + { + return DpopValidationResult.Failure("invalid_payload", "DPoP proof iat claim is not a valid number."); + } + + var issuedAt = DateTimeOffset.FromUnixTimeSeconds(iatSeconds).ToUniversalTime(); + if (issuedAt - options.AllowedClockSkew > now) + { + return DpopValidationResult.Failure("invalid_token", "DPoP proof issued in the future."); + } + + if (now - issuedAt > options.ProofLifetime + options.AllowedClockSkew) + { + return DpopValidationResult.Failure("invalid_token", "DPoP proof expired."); + } + + if (nonce is not null) + { + if (!payloadElement.TryGetProperty("nonce", out var nonceElement) || nonceElement.ValueKind != JsonValueKind.String) + { + return DpopValidationResult.Failure("invalid_token", "DPoP proof missing nonce claim."); + } + + if (!string.Equals(nonceElement.GetString(), nonce, StringComparison.Ordinal)) + { + return DpopValidationResult.Failure("invalid_token", "DPoP nonce mismatch."); + } + } + + var jwtId = jtiElement.GetString()!; + + try + { + var parameters = new TokenValidationParameters + { + ValidateAudience = false, + ValidateIssuer = false, + ValidateLifetime = false, + ValidateTokenReplay = false, + RequireSignedTokens = true, + ValidateIssuerSigningKey = true, + IssuerSigningKey = jwk, + ValidAlgorithms = options.NormalizedAlgorithms.ToArray() + }; + + tokenHandler.ValidateToken(proof, parameters, out _); + } + catch (Exception ex) + { + logger?.LogWarning(ex, "DPoP proof signature validation failed."); + return DpopValidationResult.Failure("invalid_signature", "DPoP proof signature validation failed."); + } + + if (!await replayCache.TryStoreAsync(jwtId, issuedAt + options.ReplayWindow, cancellationToken).ConfigureAwait(false)) + { + return DpopValidationResult.Failure("replay", "DPoP proof already used."); + } + + return DpopValidationResult.Success(jwk, jwtId, issuedAt); + } + + private static bool TryDecodeSegment(string token, int segmentIndex, out JsonElement element, out string? error) + { + element = default; + error = null; + + var segments = token.Split('.'); + if (segments.Length != 3) + { + error = "Token must contain three segments."; + return false; + } + + if (segmentIndex < 0 || segmentIndex > 1) + { + error = "Segment index must be 0 or 1."; + return false; + } + + try + { + var jsonBytes = Base64UrlEncoder.DecodeBytes(segments[segmentIndex]); + using var document = JsonDocument.Parse(jsonBytes); + element = document.RootElement.Clone(); + return true; + } + catch (Exception ex) + { + error = ex.Message; + return false; + } + } + + private static string NormalizeHtu(Uri uri) + { + var builder = new UriBuilder(uri) + { + Fragment = string.Empty + }; + + builder.Host = builder.Host.ToLowerInvariant(); + builder.Scheme = builder.Scheme.ToLowerInvariant(); + + if ((builder.Scheme == "http" && builder.Port == 80) || (builder.Scheme == "https" && builder.Port == 443)) + { + builder.Port = -1; + } + + return builder.Uri.GetComponents(UriComponents.SchemeAndServer | UriComponents.PathAndQuery, UriFormat.UriEscaped); + } + + private sealed class NullReplayCache : IDpopReplayCache + { + public static NullReplayCache Instance { get; } = new(); + + public ValueTask TryStoreAsync(string jwtId, DateTimeOffset expiresAt, CancellationToken cancellationToken = default) + => ValueTask.FromResult(true); + } +} diff --git a/src/StellaOps.Scanner.Core/Security/DpopValidationOptions.cs b/src/StellaOps.Scanner.Core/Security/DpopValidationOptions.cs new file mode 100644 index 00000000..347885f2 --- /dev/null +++ b/src/StellaOps.Scanner.Core/Security/DpopValidationOptions.cs @@ -0,0 +1,58 @@ +using System.Collections.Immutable; +using System.Linq; + +namespace StellaOps.Scanner.Core.Security; + +public sealed class DpopValidationOptions +{ + private readonly HashSet allowedAlgorithms = new(StringComparer.Ordinal); + + public DpopValidationOptions() + { + allowedAlgorithms.Add("ES256"); + allowedAlgorithms.Add("ES384"); + } + + public TimeSpan ProofLifetime { get; set; } = TimeSpan.FromMinutes(2); + + public TimeSpan AllowedClockSkew { get; set; } = TimeSpan.FromSeconds(30); + + public TimeSpan ReplayWindow { get; set; } = TimeSpan.FromMinutes(5); + + public ISet AllowedAlgorithms => allowedAlgorithms; + + public IReadOnlySet NormalizedAlgorithms { get; private set; } = ImmutableHashSet.Empty; + + public void Validate() + { + if (ProofLifetime <= TimeSpan.Zero) + { + throw new InvalidOperationException("DPoP proof lifetime must be greater than zero."); + } + + if (AllowedClockSkew < TimeSpan.Zero || AllowedClockSkew > TimeSpan.FromMinutes(5)) + { + throw new InvalidOperationException("DPoP allowed clock skew must be between 0 seconds and 5 minutes."); + } + + if (ReplayWindow < TimeSpan.Zero) + { + throw new InvalidOperationException("DPoP replay window must be greater than or equal to zero."); + } + + if (allowedAlgorithms.Count == 0) + { + throw new InvalidOperationException("At least one allowed DPoP algorithm must be configured."); + } + + NormalizedAlgorithms = allowedAlgorithms + .Select(static algorithm => algorithm.Trim().ToUpperInvariant()) + .Where(static algorithm => algorithm.Length > 0) + .ToImmutableHashSet(StringComparer.Ordinal); + + if (NormalizedAlgorithms.Count == 0) + { + throw new InvalidOperationException("Allowed DPoP algorithms cannot be empty after normalization."); + } + } +} diff --git a/src/StellaOps.Scanner.Core/Security/DpopValidationResult.cs b/src/StellaOps.Scanner.Core/Security/DpopValidationResult.cs new file mode 100644 index 00000000..02ae27fb --- /dev/null +++ b/src/StellaOps.Scanner.Core/Security/DpopValidationResult.cs @@ -0,0 +1,34 @@ +using Microsoft.IdentityModel.Tokens; + +namespace StellaOps.Scanner.Core.Security; + +public sealed class DpopValidationResult +{ + private DpopValidationResult(bool success, string? errorCode, string? errorDescription, SecurityKey? key, string? jwtId, DateTimeOffset? issuedAt) + { + IsValid = success; + ErrorCode = errorCode; + ErrorDescription = errorDescription; + PublicKey = key; + JwtId = jwtId; + IssuedAt = issuedAt; + } + + public bool IsValid { get; } + + public string? ErrorCode { get; } + + public string? ErrorDescription { get; } + + public SecurityKey? PublicKey { get; } + + public string? JwtId { get; } + + public DateTimeOffset? IssuedAt { get; } + + public static DpopValidationResult Success(SecurityKey key, string jwtId, DateTimeOffset issuedAt) + => new(true, null, null, key, jwtId, issuedAt); + + public static DpopValidationResult Failure(string code, string description) + => new(false, code, description, null, null, null); +} diff --git a/src/StellaOps.Scanner.Core/Security/IAuthorityTokenSource.cs b/src/StellaOps.Scanner.Core/Security/IAuthorityTokenSource.cs new file mode 100644 index 00000000..831c9b6e --- /dev/null +++ b/src/StellaOps.Scanner.Core/Security/IAuthorityTokenSource.cs @@ -0,0 +1,11 @@ +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Core.Security; + +public interface IAuthorityTokenSource +{ + ValueTask GetAsync(string audience, IEnumerable scopes, CancellationToken cancellationToken = default); + + ValueTask InvalidateAsync(string audience, IEnumerable scopes, CancellationToken cancellationToken = default); +} diff --git a/src/StellaOps.Scanner.Core/Security/IDpopProofValidator.cs b/src/StellaOps.Scanner.Core/Security/IDpopProofValidator.cs new file mode 100644 index 00000000..d6c0a61e --- /dev/null +++ b/src/StellaOps.Scanner.Core/Security/IDpopProofValidator.cs @@ -0,0 +1,9 @@ +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Core.Security; + +public interface IDpopProofValidator +{ + ValueTask ValidateAsync(string proof, string httpMethod, Uri httpUri, string? nonce = null, CancellationToken cancellationToken = default); +} diff --git a/src/StellaOps.Scanner.Core/Security/IDpopReplayCache.cs b/src/StellaOps.Scanner.Core/Security/IDpopReplayCache.cs new file mode 100644 index 00000000..758d3933 --- /dev/null +++ b/src/StellaOps.Scanner.Core/Security/IDpopReplayCache.cs @@ -0,0 +1,9 @@ +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Core.Security; + +public interface IDpopReplayCache +{ + ValueTask TryStoreAsync(string jwtId, DateTimeOffset expiresAt, CancellationToken cancellationToken = default); +} diff --git a/src/StellaOps.Scanner.Core/Security/IPluginCatalogGuard.cs b/src/StellaOps.Scanner.Core/Security/IPluginCatalogGuard.cs new file mode 100644 index 00000000..9eb1ba30 --- /dev/null +++ b/src/StellaOps.Scanner.Core/Security/IPluginCatalogGuard.cs @@ -0,0 +1,12 @@ +namespace StellaOps.Scanner.Core.Security; + +public interface IPluginCatalogGuard +{ + IReadOnlyCollection KnownPlugins { get; } + + bool IsSealed { get; } + + void EnsureRegistrationAllowed(string pluginPath); + + void Seal(); +} diff --git a/src/StellaOps.Scanner.Core/Security/InMemoryDpopReplayCache.cs b/src/StellaOps.Scanner.Core/Security/InMemoryDpopReplayCache.cs new file mode 100644 index 00000000..e8cc7ff4 --- /dev/null +++ b/src/StellaOps.Scanner.Core/Security/InMemoryDpopReplayCache.cs @@ -0,0 +1,65 @@ +using System.Collections.Concurrent; +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Core.Security; + +public sealed class InMemoryDpopReplayCache : IDpopReplayCache +{ + private readonly ConcurrentDictionary entries = new(StringComparer.Ordinal); + private readonly TimeProvider timeProvider; + + public InMemoryDpopReplayCache(TimeProvider? timeProvider = null) + { + this.timeProvider = timeProvider ?? TimeProvider.System; + } + + public ValueTask TryStoreAsync(string jwtId, DateTimeOffset expiresAt, CancellationToken cancellationToken = default) + { + ArgumentException.ThrowIfNullOrWhiteSpace(jwtId); + + var now = timeProvider.GetUtcNow(); + RemoveExpired(now); + + if (entries.TryAdd(jwtId, expiresAt)) + { + return ValueTask.FromResult(true); + } + + while (!cancellationToken.IsCancellationRequested) + { + if (!entries.TryGetValue(jwtId, out var existing)) + { + if (entries.TryAdd(jwtId, expiresAt)) + { + return ValueTask.FromResult(true); + } + + continue; + } + + if (existing > now) + { + return ValueTask.FromResult(false); + } + + if (entries.TryUpdate(jwtId, expiresAt, existing)) + { + return ValueTask.FromResult(true); + } + } + + return ValueTask.FromResult(false); + } + + private void RemoveExpired(DateTimeOffset now) + { + foreach (var entry in entries) + { + if (entry.Value <= now) + { + entries.TryRemove(entry.Key, out _); + } + } + } +} diff --git a/src/StellaOps.Scanner.Core/Security/RestartOnlyPluginGuard.cs b/src/StellaOps.Scanner.Core/Security/RestartOnlyPluginGuard.cs new file mode 100644 index 00000000..32a636ab --- /dev/null +++ b/src/StellaOps.Scanner.Core/Security/RestartOnlyPluginGuard.cs @@ -0,0 +1,53 @@ +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; + +namespace StellaOps.Scanner.Core.Security; + +public sealed class RestartOnlyPluginGuard : IPluginCatalogGuard +{ + private readonly ConcurrentDictionary plugins = new(StringComparer.OrdinalIgnoreCase); + private bool sealedState; + + public RestartOnlyPluginGuard(IEnumerable? initialPlugins = null) + { + if (initialPlugins is not null) + { + foreach (var plugin in initialPlugins) + { + var normalized = Normalize(plugin); + plugins.TryAdd(normalized, 0); + } + } + } + + public IReadOnlyCollection KnownPlugins => plugins.Keys.ToArray(); + + public bool IsSealed => Volatile.Read(ref sealedState); + + public void EnsureRegistrationAllowed(string pluginPath) + { + ArgumentException.ThrowIfNullOrWhiteSpace(pluginPath); + + var normalized = Normalize(pluginPath); + if (IsSealed && !plugins.ContainsKey(normalized)) + { + throw new InvalidOperationException($"Plug-in '{pluginPath}' cannot be registered after startup. Restart required."); + } + + plugins.TryAdd(normalized, 0); + } + + public void Seal() + { + Volatile.Write(ref sealedState, true); + } + + private static string Normalize(string path) + { + var full = Path.GetFullPath(path); + return full.TrimEnd(Path.DirectorySeparatorChar, Path.AltDirectorySeparatorChar); + } +} diff --git a/src/StellaOps.Scanner.Core/Security/ScannerOperationalToken.cs b/src/StellaOps.Scanner.Core/Security/ScannerOperationalToken.cs new file mode 100644 index 00000000..c258ca63 --- /dev/null +++ b/src/StellaOps.Scanner.Core/Security/ScannerOperationalToken.cs @@ -0,0 +1,66 @@ +using System.Collections.ObjectModel; +using System.Linq; + +namespace StellaOps.Scanner.Core.Security; + +public readonly record struct ScannerOperationalToken( + string AccessToken, + string TokenType, + DateTimeOffset ExpiresAt, + IReadOnlyList Scopes) +{ + public bool IsExpired(TimeProvider timeProvider, TimeSpan refreshSkew) + { + ArgumentNullException.ThrowIfNull(timeProvider); + + var now = timeProvider.GetUtcNow(); + return now >= ExpiresAt - refreshSkew; + } + + public static ScannerOperationalToken FromResult( + string accessToken, + string tokenType, + DateTimeOffset expiresAt, + IEnumerable scopes) + { + ArgumentException.ThrowIfNullOrWhiteSpace(accessToken); + ArgumentException.ThrowIfNullOrWhiteSpace(tokenType); + + IReadOnlyList normalized = scopes switch + { + null => Array.Empty(), + IReadOnlyList readOnly => readOnly.Count == 0 ? Array.Empty() : readOnly, + ICollection collection => NormalizeCollection(collection), + _ => NormalizeEnumerable(scopes) + }; + + return new ScannerOperationalToken( + accessToken, + tokenType, + expiresAt, + normalized); + } + + private static IReadOnlyList NormalizeCollection(ICollection collection) + { + if (collection.Count == 0) + { + return Array.Empty(); + } + + if (collection is IReadOnlyList readOnly) + { + return readOnly; + } + + var buffer = new string[collection.Count]; + collection.CopyTo(buffer, 0); + return new ReadOnlyCollection(buffer); + } + + private static IReadOnlyList NormalizeEnumerable(IEnumerable scopes) + { + var buffer = scopes.ToArray(); + return buffer.Length == 0 ? Array.Empty() : new ReadOnlyCollection(buffer); + } +} diff --git a/src/StellaOps.Scanner.Core/Security/ServiceCollectionExtensions.cs b/src/StellaOps.Scanner.Core/Security/ServiceCollectionExtensions.cs new file mode 100644 index 00000000..833c93ee --- /dev/null +++ b/src/StellaOps.Scanner.Core/Security/ServiceCollectionExtensions.cs @@ -0,0 +1,36 @@ +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.DependencyInjection.Extensions; +using Microsoft.Extensions.Options; +using StellaOps.Auth.Client; + +namespace StellaOps.Scanner.Core.Security; + +public static class ServiceCollectionExtensions +{ + public static IServiceCollection AddScannerAuthorityCore( + this IServiceCollection services, + Action configureAuthority, + Action? configureDpop = null) + { + ArgumentNullException.ThrowIfNull(services); + ArgumentNullException.ThrowIfNull(configureAuthority); + + services.AddStellaOpsAuthClient(configureAuthority); + + if (configureDpop is not null) + { + services.AddOptions().Configure(configureDpop).PostConfigure(static options => options.Validate()); + } + else + { + services.AddOptions().PostConfigure(static options => options.Validate()); + } + + services.TryAddSingleton(provider => new InMemoryDpopReplayCache(provider.GetService())); + services.TryAddSingleton(); + services.TryAddSingleton(); + services.TryAddSingleton(); + + return services; + } +} diff --git a/src/StellaOps.Scanner.Core/Serialization/ScannerJsonOptions.cs b/src/StellaOps.Scanner.Core/Serialization/ScannerJsonOptions.cs new file mode 100644 index 00000000..504852af --- /dev/null +++ b/src/StellaOps.Scanner.Core/Serialization/ScannerJsonOptions.cs @@ -0,0 +1,21 @@ +using System.Text.Json; +using System.Text.Json.Serialization; + +namespace StellaOps.Scanner.Core.Serialization; + +public static class ScannerJsonOptions +{ + public static JsonSerializerOptions Default { get; } = CreateDefault(); + + public static JsonSerializerOptions CreateDefault(bool indent = false) + { + var options = new JsonSerializerOptions(JsonSerializerDefaults.Web) + { + WriteIndented = indent + }; + + options.Converters.Add(new JsonStringEnumConverter(JsonNamingPolicy.CamelCase)); + + return options; + } +} diff --git a/src/StellaOps.Scanner.Core/StellaOps.Scanner.Core.csproj b/src/StellaOps.Scanner.Core/StellaOps.Scanner.Core.csproj new file mode 100644 index 00000000..ee9ecfa5 --- /dev/null +++ b/src/StellaOps.Scanner.Core/StellaOps.Scanner.Core.csproj @@ -0,0 +1,18 @@ + + + net10.0 + preview + enable + enable + true + + + + + + + + + + + diff --git a/src/StellaOps.Scanner.Core/TASKS.md b/src/StellaOps.Scanner.Core/TASKS.md new file mode 100644 index 00000000..55eeac1e --- /dev/null +++ b/src/StellaOps.Scanner.Core/TASKS.md @@ -0,0 +1,7 @@ +# Scanner Core Task Board + +| ID | Status | Owner(s) | Depends on | Description | Exit Criteria | +|----|--------|----------|------------|-------------|---------------| +| SCANNER-CORE-09-501 | DONE (2025-10-18) | Scanner Core Guild | — | Define shared DTOs (ScanJob, ProgressEvent), error taxonomy, and deterministic ID/timestamp helpers aligning with `ARCHITECTURE_SCANNER.md` §3–§4. | DTOs serialize deterministically, helpers produce reproducible IDs/timestamps, tests cover round-trips and hash derivation. | +| SCANNER-CORE-09-502 | DONE (2025-10-18) | Scanner Core Guild | SCANNER-CORE-09-501 | Observability helpers (correlation IDs, logging scopes, metric namespacing, deterministic hashes) consumed by WebService/Worker. | Logging/metrics helpers allocate minimally, correlation IDs stable, ActivitySource emitted; tests assert determinism. | +| SCANNER-CORE-09-503 | DONE (2025-10-18) | Scanner Core Guild | SCANNER-CORE-09-501, SCANNER-CORE-09-502 | Security utilities: Authority client factory, OpTok caching, DPoP verifier, restart-time plug-in guardrails for scanner components. | Authority helpers cache tokens, DPoP validator rejects invalid proofs, plug-in guard prevents runtime additions; tests cover happy/error paths. | diff --git a/src/StellaOps.Scanner.Core/Utility/ScannerIdentifiers.cs b/src/StellaOps.Scanner.Core/Utility/ScannerIdentifiers.cs new file mode 100644 index 00000000..52fbb512 --- /dev/null +++ b/src/StellaOps.Scanner.Core/Utility/ScannerIdentifiers.cs @@ -0,0 +1,136 @@ +using System.Globalization; +using System.Linq; +using System.Security.Cryptography; +using System.Text; +using StellaOps.Scanner.Core.Contracts; + +namespace StellaOps.Scanner.Core.Utility; + +public static class ScannerIdentifiers +{ + private static readonly Guid ScanJobNamespace = new("d985aa76-8c2b-4cba-bac0-c98c90674f04"); + private static readonly Guid CorrelationNamespace = new("7cde18f5-729e-4ea1-be3d-46fda4c55e38"); + + public static ScanJobId CreateJobId( + string imageReference, + string? imageDigest = null, + string? tenantId = null, + string? salt = null) + { + ArgumentException.ThrowIfNullOrWhiteSpace(imageReference); + + var normalizedReference = NormalizeImageReference(imageReference); + var normalizedDigest = NormalizeDigest(imageDigest) ?? "none"; + var normalizedTenant = string.IsNullOrWhiteSpace(tenantId) ? "global" : tenantId.Trim().ToLowerInvariant(); + var normalizedSalt = (salt?.Trim() ?? string.Empty).ToLowerInvariant(); + + using var sha256 = SHA256.Create(); + var payload = $"{normalizedReference}|{normalizedDigest}|{normalizedTenant}|{normalizedSalt}"; + var hashed = sha256.ComputeHash(Encoding.UTF8.GetBytes(payload)); + return new ScanJobId(CreateGuidFromHash(ScanJobNamespace, hashed)); + } + + public static string CreateCorrelationId(ScanJobId jobId, string? stage = null, string? suffix = null) + { + var normalizedStage = string.IsNullOrWhiteSpace(stage) + ? "scan" + : stage.Trim().ToLowerInvariant().Replace(' ', '-'); + + var normalizedSuffix = string.IsNullOrWhiteSpace(suffix) + ? string.Empty + : "-" + suffix.Trim().ToLowerInvariant().Replace(' ', '-'); + + return $"scan-{normalizedStage}-{jobId}{normalizedSuffix}"; + } + + public static string CreateDeterministicHash(params string[] segments) + { + if (segments is null || segments.Length == 0) + { + throw new ArgumentException("At least one segment must be provided.", nameof(segments)); + } + + using var sha256 = SHA256.Create(); + var joined = string.Join('|', segments.Select(static s => s?.Trim() ?? string.Empty)); + var hash = sha256.ComputeHash(Encoding.UTF8.GetBytes(joined)); + return Convert.ToHexString(hash).ToLowerInvariant(); + } + + public static Guid CreateDeterministicGuid(Guid namespaceId, ReadOnlySpan nameBytes) + { + Span namespaceBytes = stackalloc byte[16]; + namespaceId.TryWriteBytes(namespaceBytes); + + Span buffer = stackalloc byte[namespaceBytes.Length + nameBytes.Length]; + namespaceBytes.CopyTo(buffer); + nameBytes.CopyTo(buffer[namespaceBytes.Length..]); + + Span hash = stackalloc byte[32]; + SHA256.TryHashData(buffer, hash, out _); + + Span guidBytes = stackalloc byte[16]; + hash[..16].CopyTo(guidBytes); + + guidBytes[6] = (byte)((guidBytes[6] & 0x0F) | 0x50); + guidBytes[8] = (byte)((guidBytes[8] & 0x3F) | 0x80); + + return new Guid(guidBytes); + } + + public static string NormalizeImageReference(string reference) + { + ArgumentException.ThrowIfNullOrWhiteSpace(reference); + var trimmed = reference.Trim(); + var atIndex = trimmed.IndexOf('@'); + if (atIndex > 0) + { + var prefix = trimmed[..atIndex].ToLowerInvariant(); + return $"{prefix}{trimmed[atIndex..]}"; + } + + var colonIndex = trimmed.IndexOf(':'); + if (colonIndex > 0) + { + var name = trimmed[..colonIndex].ToLowerInvariant(); + var tag = trimmed[(colonIndex + 1)..]; + return $"{name}:{tag}"; + } + + return trimmed.ToLowerInvariant(); + } + + public static string? NormalizeDigest(string? digest) + { + if (string.IsNullOrWhiteSpace(digest)) + { + return null; + } + + var trimmed = digest.Trim(); + var parts = trimmed.Split(':', 2, StringSplitOptions.RemoveEmptyEntries | StringSplitOptions.TrimEntries); + if (parts.Length != 2) + { + return trimmed.ToLowerInvariant(); + } + + return $"{parts[0].ToLowerInvariant()}:{parts[1].ToLowerInvariant()}"; + } + + public static string CreateDeterministicCorrelation(string audience, ScanJobId jobId, string? component = null) + { + using var sha256 = SHA256.Create(); + var payload = $"{audience.Trim().ToLowerInvariant()}|{jobId}|{component?.Trim().ToLowerInvariant() ?? string.Empty}"; + var hash = sha256.ComputeHash(Encoding.UTF8.GetBytes(payload)); + var guid = CreateGuidFromHash(CorrelationNamespace, hash); + return $"corr-{guid.ToString("n", CultureInfo.InvariantCulture)}"; + } + + private static Guid CreateGuidFromHash(Guid namespaceId, ReadOnlySpan hash) + { + Span guidBytes = stackalloc byte[16]; + hash[..16].CopyTo(guidBytes); + guidBytes[6] = (byte)((guidBytes[6] & 0x0F) | 0x50); + guidBytes[8] = (byte)((guidBytes[8] & 0x3F) | 0x80); + return new Guid(guidBytes); + } +} diff --git a/src/StellaOps.Scanner.Core/Utility/ScannerTimestamps.cs b/src/StellaOps.Scanner.Core/Utility/ScannerTimestamps.cs new file mode 100644 index 00000000..dfd8bf2e --- /dev/null +++ b/src/StellaOps.Scanner.Core/Utility/ScannerTimestamps.cs @@ -0,0 +1,43 @@ +using System.Globalization; + +namespace StellaOps.Scanner.Core.Utility; + +public static class ScannerTimestamps +{ + private const long TicksPerMicrosecond = TimeSpan.TicksPerMillisecond / 1000; + + public static DateTimeOffset Normalize(DateTimeOffset value) + { + var utc = value.ToUniversalTime(); + var ticks = utc.Ticks - (utc.Ticks % TicksPerMicrosecond); + return new DateTimeOffset(ticks, TimeSpan.Zero); + } + + public static DateTimeOffset UtcNow(TimeProvider? provider = null) + => Normalize((provider ?? TimeProvider.System).GetUtcNow()); + + public static string ToIso8601(DateTimeOffset value) + => Normalize(value).ToString("yyyy-MM-dd'T'HH:mm:ss.ffffff'Z'", CultureInfo.InvariantCulture); + + public static bool TryParseIso8601(string? value, out DateTimeOffset timestamp) + { + if (string.IsNullOrWhiteSpace(value)) + { + timestamp = default; + return false; + } + + if (DateTimeOffset.TryParse( + value, + CultureInfo.InvariantCulture, + DateTimeStyles.AssumeUniversal | DateTimeStyles.AdjustToUniversal, + out var parsed)) + { + timestamp = Normalize(parsed); + return true; + } + + timestamp = default; + return false; + } +} diff --git a/src/StellaOps.Scanner.Queue.Tests/QueueLeaseIntegrationTests.cs b/src/StellaOps.Scanner.Queue.Tests/QueueLeaseIntegrationTests.cs new file mode 100644 index 00000000..e04295e5 --- /dev/null +++ b/src/StellaOps.Scanner.Queue.Tests/QueueLeaseIntegrationTests.cs @@ -0,0 +1,353 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Collections.ObjectModel; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using FluentAssertions; +using Microsoft.Extensions.Time.Testing; +using StellaOps.Scanner.Queue; +using Xunit; + +namespace StellaOps.Scanner.Queue.Tests; + +public sealed class QueueLeaseIntegrationTests +{ + private readonly ScannerQueueOptions _options = new() + { + MaxDeliveryAttempts = 3, + RetryInitialBackoff = TimeSpan.FromMilliseconds(1), + RetryMaxBackoff = TimeSpan.FromMilliseconds(5), + DefaultLeaseDuration = TimeSpan.FromSeconds(5) + }; + + [Fact] + public async Task Enqueue_ShouldDeduplicate_ByIdempotencyKey() + { + var clock = new FakeTimeProvider(); + var queue = new InMemoryScanQueue(_options, clock); + + var payload = new byte[] { 1, 2, 3 }; + var message = new ScanQueueMessage("job-1", payload) + { + IdempotencyKey = "idem-1" + }; + + var first = await queue.EnqueueAsync(message); + first.Deduplicated.Should().BeFalse(); + + var second = await queue.EnqueueAsync(message); + second.Deduplicated.Should().BeTrue(); + } + + [Fact] + public async Task Lease_Acknowledge_ShouldRemoveFromQueue() + { + var clock = new FakeTimeProvider(); + var queue = new InMemoryScanQueue(_options, clock); + + var message = new ScanQueueMessage("job-ack", new byte[] { 42 }); + await queue.EnqueueAsync(message); + + var lease = await LeaseSingleAsync(queue, consumer: "worker-1"); + lease.Should().NotBeNull(); + + await lease!.AcknowledgeAsync(); + + var afterAck = await queue.LeaseAsync(new QueueLeaseRequest("worker-1", 1, TimeSpan.FromSeconds(1))); + afterAck.Should().BeEmpty(); + } + + [Fact] + public async Task Release_WithRetry_ShouldDeadLetterAfterMaxAttempts() + { + var clock = new FakeTimeProvider(); + var queue = new InMemoryScanQueue(_options, clock); + + var message = new ScanQueueMessage("job-retry", new byte[] { 5 }); + await queue.EnqueueAsync(message); + + for (var attempt = 1; attempt <= _options.MaxDeliveryAttempts; attempt++) + { + var lease = await LeaseSingleAsync(queue, consumer: $"worker-{attempt}"); + lease.Should().NotBeNull(); + + await lease!.ReleaseAsync(QueueReleaseDisposition.Retry); + } + + queue.DeadLetters.Should().ContainSingle(dead => dead.JobId == "job-retry"); + } + + [Fact] + public async Task Retry_ShouldIncreaseAttemptOnNextLease() + { + var clock = new FakeTimeProvider(); + var queue = new InMemoryScanQueue(_options, clock); + + await queue.EnqueueAsync(new ScanQueueMessage("job-retry-attempt", new byte[] { 77 })); + + var firstLease = await LeaseSingleAsync(queue, "worker-retry"); + firstLease.Should().NotBeNull(); + firstLease!.Attempt.Should().Be(1); + + await firstLease.ReleaseAsync(QueueReleaseDisposition.Retry); + + var secondLease = await LeaseSingleAsync(queue, "worker-retry"); + secondLease.Should().NotBeNull(); + secondLease!.Attempt.Should().Be(2); + } + + private static async Task LeaseSingleAsync(InMemoryScanQueue queue, string consumer) + { + var leases = await queue.LeaseAsync(new QueueLeaseRequest(consumer, 1, TimeSpan.FromSeconds(1))); + return leases.FirstOrDefault(); + } + + private sealed class InMemoryScanQueue : IScanQueue + { + private readonly ScannerQueueOptions _options; + private readonly TimeProvider _timeProvider; + private readonly ConcurrentQueue _ready = new(); + private readonly ConcurrentDictionary _idempotency = new(StringComparer.Ordinal); + private readonly ConcurrentDictionary _inFlight = new(StringComparer.Ordinal); + private readonly List _deadLetters = new(); + private long _sequence; + + public InMemoryScanQueue(ScannerQueueOptions options, TimeProvider timeProvider) + { + _options = options; + _timeProvider = timeProvider; + } + + public IReadOnlyList DeadLetters => _deadLetters; + + public ValueTask EnqueueAsync(ScanQueueMessage message, CancellationToken cancellationToken = default) + { + var token = message.IdempotencyKey ?? message.JobId; + if (_idempotency.TryGetValue(token, out var existing)) + { + return ValueTask.FromResult(new QueueEnqueueResult(existing.SequenceId, true)); + } + + var entry = new QueueEntry( + sequenceId: Interlocked.Increment(ref _sequence).ToString(), + jobId: message.JobId, + payload: message.Payload.ToArray(), + idempotencyKey: token, + attempt: 1, + enqueuedAt: _timeProvider.GetUtcNow()); + + _idempotency[token] = entry; + _ready.Enqueue(entry); + return ValueTask.FromResult(new QueueEnqueueResult(entry.SequenceId, false)); + } + + public ValueTask> LeaseAsync(QueueLeaseRequest request, CancellationToken cancellationToken = default) + { + var now = _timeProvider.GetUtcNow(); + var leases = new List(request.BatchSize); + + while (leases.Count < request.BatchSize && _ready.TryDequeue(out var entry)) + { + entry.Attempt = Math.Max(entry.Attempt, entry.Deliveries + 1); + entry.Deliveries = entry.Attempt; + entry.LastLeaseAt = now; + _inFlight[entry.SequenceId] = entry; + + var lease = new InMemoryLease( + this, + entry, + request.Consumer, + now, + request.LeaseDuration); + leases.Add(lease); + } + + return ValueTask.FromResult>(leases); + } + + public ValueTask> ClaimExpiredLeasesAsync(QueueClaimOptions options, CancellationToken cancellationToken = default) + { + var now = _timeProvider.GetUtcNow(); + var leases = _inFlight.Values + .Where(entry => now - entry.LastLeaseAt >= options.MinIdleTime) + .Take(options.BatchSize) + .Select(entry => new InMemoryLease(this, entry, options.ClaimantConsumer, now, _options.DefaultLeaseDuration)) + .Cast() + .ToList(); + + return ValueTask.FromResult>(leases); + } + + internal Task AcknowledgeAsync(QueueEntry entry) + { + _inFlight.TryRemove(entry.SequenceId, out _); + _idempotency.TryRemove(entry.IdempotencyKey, out _); + return Task.CompletedTask; + } + + internal Task RenewAsync(QueueEntry entry, TimeSpan leaseDuration) + { + var expires = _timeProvider.GetUtcNow().Add(leaseDuration); + entry.LeaseExpiresAt = expires; + return Task.FromResult(expires); + } + + internal Task ReleaseAsync(QueueEntry entry, QueueReleaseDisposition disposition) + { + if (disposition == QueueReleaseDisposition.Retry && entry.Attempt >= _options.MaxDeliveryAttempts) + { + return DeadLetterAsync(entry, $"max-delivery-attempts:{entry.Attempt}"); + } + + if (disposition == QueueReleaseDisposition.Retry) + { + entry.Attempt++; + _ready.Enqueue(entry); + } + else + { + _idempotency.TryRemove(entry.IdempotencyKey, out _); + } + + _inFlight.TryRemove(entry.SequenceId, out _); + return Task.CompletedTask; + } + + internal Task DeadLetterAsync(QueueEntry entry, string reason) + { + entry.DeadLetterReason = reason; + _inFlight.TryRemove(entry.SequenceId, out _); + _idempotency.TryRemove(entry.IdempotencyKey, out _); + _deadLetters.Add(entry); + return Task.CompletedTask; + } + + private sealed class InMemoryLease : IScanQueueLease + { + private readonly InMemoryScanQueue _owner; + private readonly QueueEntry _entry; + private int _completed; + + public InMemoryLease( + InMemoryScanQueue owner, + QueueEntry entry, + string consumer, + DateTimeOffset now, + TimeSpan leaseDuration) + { + _owner = owner; + _entry = entry; + Consumer = consumer; + MessageId = entry.SequenceId; + JobId = entry.JobId; + Payload = entry.Payload; + Attempt = entry.Attempt; + EnqueuedAt = entry.EnqueuedAt; + LeaseExpiresAt = now.Add(leaseDuration); + IdempotencyKey = entry.IdempotencyKey; + Attributes = entry.Attributes; + } + + public string MessageId { get; } + + public string JobId { get; } + + public ReadOnlyMemory Payload { get; } + + public int Attempt { get; } + + public DateTimeOffset EnqueuedAt { get; } + + public DateTimeOffset LeaseExpiresAt { get; private set; } + + public string Consumer { get; } + + public string? IdempotencyKey { get; } + + public IReadOnlyDictionary Attributes { get; } + + public Task AcknowledgeAsync(CancellationToken cancellationToken = default) + { + if (TryComplete()) + { + return _owner.AcknowledgeAsync(_entry); + } + + return Task.CompletedTask; + } + + public Task RenewAsync(TimeSpan leaseDuration, CancellationToken cancellationToken = default) + { + return RenewInternalAsync(leaseDuration); + } + + public Task ReleaseAsync(QueueReleaseDisposition disposition, CancellationToken cancellationToken = default) + { + if (TryComplete()) + { + return _owner.ReleaseAsync(_entry, disposition); + } + + return Task.CompletedTask; + } + + public Task DeadLetterAsync(string reason, CancellationToken cancellationToken = default) + { + if (TryComplete()) + { + return _owner.DeadLetterAsync(_entry, reason); + } + + return Task.CompletedTask; + } + + private async Task RenewInternalAsync(TimeSpan leaseDuration) + { + var expires = await _owner.RenewAsync(_entry, leaseDuration).ConfigureAwait(false); + LeaseExpiresAt = expires; + } + + private bool TryComplete() + => Interlocked.CompareExchange(ref _completed, 1, 0) == 0; + } + + internal sealed class QueueEntry + { + public QueueEntry(string sequenceId, string jobId, byte[] payload, string idempotencyKey, int attempt, DateTimeOffset enqueuedAt) + { + SequenceId = sequenceId; + JobId = jobId; + Payload = payload; + IdempotencyKey = idempotencyKey; + Attempt = attempt; + EnqueuedAt = enqueuedAt; + LastLeaseAt = enqueuedAt; + Attributes = new ReadOnlyDictionary(new Dictionary(StringComparer.Ordinal)); + } + + public string SequenceId { get; } + + public string JobId { get; } + + public byte[] Payload { get; } + + public string IdempotencyKey { get; } + + public int Attempt { get; set; } + + public int Deliveries { get; set; } + + public DateTimeOffset EnqueuedAt { get; } + + public DateTimeOffset LeaseExpiresAt { get; set; } + + public DateTimeOffset LastLeaseAt { get; set; } + + public IReadOnlyDictionary Attributes { get; } + + public string? DeadLetterReason { get; set; } + } + } +} diff --git a/src/StellaOps.Scanner.Queue.Tests/StellaOps.Scanner.Queue.Tests.csproj b/src/StellaOps.Scanner.Queue.Tests/StellaOps.Scanner.Queue.Tests.csproj new file mode 100644 index 00000000..1d9e7359 --- /dev/null +++ b/src/StellaOps.Scanner.Queue.Tests/StellaOps.Scanner.Queue.Tests.csproj @@ -0,0 +1,14 @@ + + + net10.0 + enable + enable + false + + + + + + + + diff --git a/src/StellaOps.Scanner.Queue/AGENTS.md b/src/StellaOps.Scanner.Queue/AGENTS.md new file mode 100644 index 00000000..82aeef5d --- /dev/null +++ b/src/StellaOps.Scanner.Queue/AGENTS.md @@ -0,0 +1,15 @@ +# StellaOps.Scanner.Queue — Agent Charter + +## Mission +Deliver the scanner job queue backbone defined in `docs/ARCHITECTURE_SCANNER.md`, providing deterministic, offline-friendly leasing semantics for WebService producers and Worker consumers. + +## Responsibilities +- Define queue abstractions with idempotent enqueue tokens, acknowledgement, lease renewal, and claim support. +- Ship first-party adapters for Redis Streams and NATS JetStream, respecting offline deployments and allow-listed hosts. +- Surface health probes, structured diagnostics, and metrics needed by Scanner WebService/Worker. +- Document operational expectations and configuration binding hooks. + +## Interfaces & Dependencies +- Consumes shared configuration primitives from `StellaOps.Configuration`. +- Exposes dependency injection extensions for `StellaOps.DependencyInjection`. +- Targets `net10.0` (preview) and aligns with scanner DTOs once `StellaOps.Scanner.Core` lands. diff --git a/src/StellaOps.Scanner.Queue/IScanQueue.cs b/src/StellaOps.Scanner.Queue/IScanQueue.cs new file mode 100644 index 00000000..6c437063 --- /dev/null +++ b/src/StellaOps.Scanner.Queue/IScanQueue.cs @@ -0,0 +1,20 @@ +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Queue; + +public interface IScanQueue +{ + ValueTask EnqueueAsync( + ScanQueueMessage message, + CancellationToken cancellationToken = default); + + ValueTask> LeaseAsync( + QueueLeaseRequest request, + CancellationToken cancellationToken = default); + + ValueTask> ClaimExpiredLeasesAsync( + QueueClaimOptions options, + CancellationToken cancellationToken = default); +} diff --git a/src/StellaOps.Scanner.Queue/IScanQueueLease.cs b/src/StellaOps.Scanner.Queue/IScanQueueLease.cs new file mode 100644 index 00000000..ecf2be29 --- /dev/null +++ b/src/StellaOps.Scanner.Queue/IScanQueueLease.cs @@ -0,0 +1,35 @@ +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Queue; + +public interface IScanQueueLease +{ + string MessageId { get; } + + string JobId { get; } + + ReadOnlyMemory Payload { get; } + + int Attempt { get; } + + DateTimeOffset EnqueuedAt { get; } + + DateTimeOffset LeaseExpiresAt { get; } + + string Consumer { get; } + + string? IdempotencyKey { get; } + + IReadOnlyDictionary Attributes { get; } + + Task AcknowledgeAsync(CancellationToken cancellationToken = default); + + Task RenewAsync(TimeSpan leaseDuration, CancellationToken cancellationToken = default); + + Task ReleaseAsync(QueueReleaseDisposition disposition, CancellationToken cancellationToken = default); + + Task DeadLetterAsync(string reason, CancellationToken cancellationToken = default); +} diff --git a/src/StellaOps.Scanner.Queue/Nats/NatsScanQueue.cs b/src/StellaOps.Scanner.Queue/Nats/NatsScanQueue.cs new file mode 100644 index 00000000..b236e03f --- /dev/null +++ b/src/StellaOps.Scanner.Queue/Nats/NatsScanQueue.cs @@ -0,0 +1,644 @@ +using System; +using System.Collections.Generic; +using System.Collections.ObjectModel; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using NATS.Client.Core; +using NATS.Client.JetStream; +using NATS.Client.JetStream.Models; + +namespace StellaOps.Scanner.Queue.Nats; + +internal sealed class NatsScanQueue : IScanQueue, IAsyncDisposable +{ + private const string TransportName = "nats"; + + private static readonly INatsSerializer PayloadSerializer = NatsRawSerializer.Default; + + private readonly ScannerQueueOptions _queueOptions; + private readonly NatsQueueOptions _options; + private readonly ILogger _logger; + private readonly TimeProvider _timeProvider; + private readonly SemaphoreSlim _connectionGate = new(1, 1); + private readonly Func> _connectionFactory; + + private NatsConnection? _connection; + private NatsJSContext? _jsContext; + private INatsJSConsumer? _consumer; + private bool _disposed; + + public NatsScanQueue( + ScannerQueueOptions queueOptions, + NatsQueueOptions options, + ILogger logger, + TimeProvider timeProvider, + Func>? connectionFactory = null) + { + _queueOptions = queueOptions ?? throw new ArgumentNullException(nameof(queueOptions)); + _options = options ?? throw new ArgumentNullException(nameof(options)); + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + _timeProvider = timeProvider ?? TimeProvider.System; + _connectionFactory = connectionFactory ?? ((opts, cancellationToken) => new ValueTask(new NatsConnection(opts))); + + if (string.IsNullOrWhiteSpace(_options.Url)) + { + throw new InvalidOperationException("NATS connection URL must be configured for the scanner queue."); + } + } + + public async ValueTask EnqueueAsync( + ScanQueueMessage message, + CancellationToken cancellationToken = default) + { + ArgumentNullException.ThrowIfNull(message); + + var js = await GetJetStreamAsync(cancellationToken).ConfigureAwait(false); + await EnsureStreamAndConsumerAsync(js, cancellationToken).ConfigureAwait(false); + + var idempotencyKey = message.IdempotencyKey ?? message.JobId; + var headers = BuildHeaders(message, idempotencyKey); + var publishOpts = new NatsJSPubOpts + { + MsgId = idempotencyKey, + RetryAttempts = 0 + }; + + var ack = await js.PublishAsync( + _options.Subject, + message.Payload.ToArray(), + PayloadSerializer, + publishOpts, + headers, + cancellationToken) + .ConfigureAwait(false); + + if (ack.Duplicate) + { + _logger.LogDebug( + "Duplicate NATS enqueue detected for job {JobId} (token {Token}).", + message.JobId, + idempotencyKey); + + QueueMetrics.RecordDeduplicated(TransportName); + return new QueueEnqueueResult(ack.Seq.ToString(), true); + } + + QueueMetrics.RecordEnqueued(TransportName); + _logger.LogDebug( + "Enqueued job {JobId} into NATS stream {Stream} with sequence {Sequence}.", + message.JobId, + ack.Stream, + ack.Seq); + + return new QueueEnqueueResult(ack.Seq.ToString(), false); + } + + public async ValueTask> LeaseAsync( + QueueLeaseRequest request, + CancellationToken cancellationToken = default) + { + ArgumentNullException.ThrowIfNull(request); + + var js = await GetJetStreamAsync(cancellationToken).ConfigureAwait(false); + var consumer = await EnsureStreamAndConsumerAsync(js, cancellationToken).ConfigureAwait(false); + + var fetchOpts = new NatsJSFetchOpts + { + MaxMsgs = request.BatchSize, + Expires = request.LeaseDuration, + IdleHeartbeat = _options.IdleHeartbeat + }; + + var now = _timeProvider.GetUtcNow(); + var leases = new List(capacity: request.BatchSize); + + await foreach (var msg in consumer.FetchAsync(PayloadSerializer, fetchOpts, cancellationToken).ConfigureAwait(false)) + { + var lease = CreateLease(msg, request.Consumer, now, request.LeaseDuration); + if (lease is not null) + { + leases.Add(lease); + } + } + + return leases; + } + + public async ValueTask> ClaimExpiredLeasesAsync( + QueueClaimOptions options, + CancellationToken cancellationToken = default) + { + ArgumentNullException.ThrowIfNull(options); + + var js = await GetJetStreamAsync(cancellationToken).ConfigureAwait(false); + var consumer = await EnsureStreamAndConsumerAsync(js, cancellationToken).ConfigureAwait(false); + + var fetchOpts = new NatsJSFetchOpts + { + MaxMsgs = options.BatchSize, + Expires = options.MinIdleTime, + IdleHeartbeat = _options.IdleHeartbeat + }; + + var now = _timeProvider.GetUtcNow(); + var leases = new List(options.BatchSize); + + await foreach (var msg in consumer.FetchAsync(PayloadSerializer, fetchOpts, cancellationToken).ConfigureAwait(false)) + { + var deliveries = (int)(msg.Metadata?.NumDelivered ?? 1); + if (deliveries <= 1) + { + // Fresh message; surface back to queue and continue. + await msg.NakAsync(new AckOpts(), TimeSpan.Zero, cancellationToken).ConfigureAwait(false); + continue; + } + + var lease = CreateLease(msg, options.ClaimantConsumer, now, _queueOptions.DefaultLeaseDuration); + if (lease is not null) + { + leases.Add(lease); + } + } + + return leases; + } + + public async ValueTask DisposeAsync() + { + if (_disposed) + { + return; + } + + _disposed = true; + + if (_connection is not null) + { + await _connection.DisposeAsync().ConfigureAwait(false); + } + + _connectionGate.Dispose(); + GC.SuppressFinalize(this); + } + + internal async Task AcknowledgeAsync( + NatsScanQueueLease lease, + CancellationToken cancellationToken) + { + if (!lease.TryBeginCompletion()) + { + return; + } + + await lease.Message.AckAsync(new AckOpts(), cancellationToken).ConfigureAwait(false); + + QueueMetrics.RecordAck(TransportName); + _logger.LogDebug( + "Acknowledged job {JobId} (seq {Seq}).", + lease.JobId, + lease.MessageId); + } + + internal async Task RenewLeaseAsync( + NatsScanQueueLease lease, + TimeSpan leaseDuration, + CancellationToken cancellationToken) + { + await lease.Message.AckProgressAsync(new AckOpts(), cancellationToken).ConfigureAwait(false); + var expires = _timeProvider.GetUtcNow().Add(leaseDuration); + lease.RefreshLease(expires); + + _logger.LogDebug( + "Renewed NATS lease for job {JobId} until {Expires:u}.", + lease.JobId, + expires); + } + + internal async Task ReleaseAsync( + NatsScanQueueLease lease, + QueueReleaseDisposition disposition, + CancellationToken cancellationToken) + { + if (disposition == QueueReleaseDisposition.Retry + && lease.Attempt >= _queueOptions.MaxDeliveryAttempts) + { + _logger.LogWarning( + "Job {JobId} reached max delivery attempts ({Attempts}); shipping to dead-letter stream.", + lease.JobId, + lease.Attempt); + + await DeadLetterAsync( + lease, + $"max-delivery-attempts:{lease.Attempt}", + cancellationToken).ConfigureAwait(false); + return; + } + + if (!lease.TryBeginCompletion()) + { + return; + } + + if (disposition == QueueReleaseDisposition.Retry) + { + QueueMetrics.RecordRetry(TransportName); + + var delay = CalculateBackoff(lease.Attempt); + await lease.Message.NakAsync(new AckOpts(), delay, cancellationToken).ConfigureAwait(false); + + _logger.LogWarning( + "Rescheduled job {JobId} via NATS NAK with delay {Delay} (attempt {Attempt}).", + lease.JobId, + delay, + lease.Attempt); + } + else + { + await lease.Message.AckTerminateAsync(new AckOpts(), cancellationToken).ConfigureAwait(false); + QueueMetrics.RecordAck(TransportName); + + _logger.LogInformation( + "Abandoned job {JobId} after {Attempt} attempt(s).", + lease.JobId, + lease.Attempt); + } + } + + internal async Task DeadLetterAsync( + NatsScanQueueLease lease, + string reason, + CancellationToken cancellationToken) + { + if (!lease.TryBeginCompletion()) + { + return; + } + + await lease.Message.AckAsync(new AckOpts(), cancellationToken).ConfigureAwait(false); + + var js = await GetJetStreamAsync(cancellationToken).ConfigureAwait(false); + await EnsureDeadLetterStreamAsync(js, cancellationToken).ConfigureAwait(false); + + var headers = BuildDeadLetterHeaders(lease, reason); + await js.PublishAsync( + _options.DeadLetterSubject, + lease.Payload.ToArray(), + PayloadSerializer, + new NatsJSPubOpts(), + headers, + cancellationToken) + .ConfigureAwait(false); + + QueueMetrics.RecordDeadLetter(TransportName); + _logger.LogError( + "Dead-lettered job {JobId} (attempt {Attempt}): {Reason}", + lease.JobId, + lease.Attempt, + reason); + } + + private async Task GetJetStreamAsync(CancellationToken cancellationToken) + { + if (_jsContext is not null) + { + return _jsContext; + } + + var connection = await EnsureConnectionAsync(cancellationToken).ConfigureAwait(false); + + await _connectionGate.WaitAsync(cancellationToken).ConfigureAwait(false); + try + { + _jsContext ??= new NatsJSContext(connection); + return _jsContext; + } + finally + { + _connectionGate.Release(); + } + } + + private async ValueTask EnsureStreamAndConsumerAsync( + NatsJSContext js, + CancellationToken cancellationToken) + { + if (_consumer is not null) + { + return _consumer; + } + + await _connectionGate.WaitAsync(cancellationToken).ConfigureAwait(false); + try + { + if (_consumer is not null) + { + return _consumer; + } + + await EnsureStreamAsync(js, cancellationToken).ConfigureAwait(false); + await EnsureDeadLetterStreamAsync(js, cancellationToken).ConfigureAwait(false); + + var consumerConfig = new ConsumerConfig + { + DurableName = _options.DurableConsumer, + AckPolicy = ConsumerConfigAckPolicy.Explicit, + ReplayPolicy = ConsumerConfigReplayPolicy.Instant, + DeliverPolicy = ConsumerConfigDeliverPolicy.All, + AckWait = ToNanoseconds(_options.AckWait), + MaxAckPending = _options.MaxInFlight, + MaxDeliver = Math.Max(1, _queueOptions.MaxDeliveryAttempts), + FilterSubjects = new[] { _options.Subject } + }; + + try + { + _consumer = await js.CreateConsumerAsync( + _options.Stream, + consumerConfig, + cancellationToken) + .ConfigureAwait(false); + } + catch (NatsJSApiException apiEx) + { + _logger.LogDebug(apiEx, + "CreateConsumerAsync failed with code {Code}; attempting to fetch existing durable consumer {Durable}.", + apiEx.Error?.Code, + _options.DurableConsumer); + + _consumer = await js.GetConsumerAsync( + _options.Stream, + _options.DurableConsumer, + cancellationToken) + .ConfigureAwait(false); + } + + return _consumer; + } + finally + { + _connectionGate.Release(); + } + } + + private async Task EnsureConnectionAsync(CancellationToken cancellationToken) + { + if (_connection is not null) + { + return _connection; + } + + await _connectionGate.WaitAsync(cancellationToken).ConfigureAwait(false); + try + { + if (_connection is not null) + { + return _connection; + } + + var opts = new NatsOpts + { + Url = _options.Url!, + Name = "stellaops-scanner-queue", + CommandTimeout = TimeSpan.FromSeconds(10), + RequestTimeout = TimeSpan.FromSeconds(20), + PingInterval = TimeSpan.FromSeconds(30) + }; + + _connection = await _connectionFactory(opts, cancellationToken).ConfigureAwait(false); + await _connection.ConnectAsync().ConfigureAwait(false); + return _connection; + } + finally + { + _connectionGate.Release(); + } + } + + private async Task EnsureStreamAsync(NatsJSContext js, CancellationToken cancellationToken) + { + try + { + await js.GetStreamAsync( + _options.Stream, + new StreamInfoRequest(), + cancellationToken) + .ConfigureAwait(false); + } + catch (NatsJSApiException) + { + var config = new StreamConfig( + name: _options.Stream, + subjects: new[] { _options.Subject }) + { + Retention = StreamConfigRetention.Workqueue, + Storage = StreamConfigStorage.File, + MaxConsumers = -1, + MaxMsgs = -1, + MaxBytes = -1, + MaxAge = 0 + }; + + await js.CreateStreamAsync(config, cancellationToken).ConfigureAwait(false); + _logger.LogInformation("Created NATS JetStream stream {Stream} ({Subject}).", _options.Stream, _options.Subject); + } + } + + private async Task EnsureDeadLetterStreamAsync(NatsJSContext js, CancellationToken cancellationToken) + { + try + { + await js.GetStreamAsync( + _options.DeadLetterStream, + new StreamInfoRequest(), + cancellationToken) + .ConfigureAwait(false); + } + catch (NatsJSApiException) + { + var config = new StreamConfig( + name: _options.DeadLetterStream, + subjects: new[] { _options.DeadLetterSubject }) + { + Retention = StreamConfigRetention.Workqueue, + Storage = StreamConfigStorage.File, + MaxConsumers = -1, + MaxMsgs = -1, + MaxBytes = -1, + MaxAge = ToNanoseconds(_queueOptions.DeadLetter.Retention) + }; + + await js.CreateStreamAsync(config, cancellationToken).ConfigureAwait(false); + _logger.LogInformation("Created NATS dead-letter stream {Stream} ({Subject}).", _options.DeadLetterStream, _options.DeadLetterSubject); + } + } + + internal async ValueTask PingAsync(CancellationToken cancellationToken) + { + var connection = await EnsureConnectionAsync(cancellationToken).ConfigureAwait(false); + await connection.PingAsync(cancellationToken).ConfigureAwait(false); + } + + private NatsScanQueueLease? CreateLease( + NatsJSMsg message, + string consumer, + DateTimeOffset now, + TimeSpan leaseDuration) + { + var headers = message.Headers; + if (headers is null) + { + return null; + } + + if (!headers.TryGetValue(QueueEnvelopeFields.JobId, out var jobIdValues) || jobIdValues.Count == 0) + { + return null; + } + + var jobId = jobIdValues[0]!; + var idempotencyKey = headers.TryGetValue(QueueEnvelopeFields.IdempotencyKey, out var idemValues) && idemValues.Count > 0 + ? idemValues[0] + : null; + + var enqueuedAt = headers.TryGetValue(QueueEnvelopeFields.EnqueuedAt, out var enqueuedValues) && enqueuedValues.Count > 0 + && long.TryParse(enqueuedValues[0], out var unix) + ? DateTimeOffset.FromUnixTimeMilliseconds(unix) + : now; + + var attempt = headers.TryGetValue(QueueEnvelopeFields.Attempt, out var attemptValues) && attemptValues.Count > 0 + && int.TryParse(attemptValues[0], out var parsedAttempt) + ? parsedAttempt + : 1; + + if (message.Metadata?.NumDelivered is ulong delivered && delivered > 0) + { + var deliveredInt = delivered > int.MaxValue ? int.MaxValue : (int)delivered; + if (deliveredInt > attempt) + { + attempt = deliveredInt; + } + } + + var leaseExpires = now.Add(leaseDuration); + var attributes = ExtractAttributes(headers); + + var messageId = message.Metadata?.Sequence.Stream.ToString() ?? Guid.NewGuid().ToString("n"); + return new NatsScanQueueLease( + this, + message, + messageId, + jobId, + message.Data ?? Array.Empty(), + attempt, + enqueuedAt, + leaseExpires, + consumer, + idempotencyKey, + attributes); + } + + private static IReadOnlyDictionary ExtractAttributes(NatsHeaders headers) + { + var attributes = new Dictionary(StringComparer.Ordinal); + + foreach (var key in headers.Keys) + { + if (!key.StartsWith(QueueEnvelopeFields.AttributePrefix, StringComparison.Ordinal)) + { + continue; + } + + if (headers.TryGetValue(key, out var values) && values.Count > 0) + { + attributes[key[QueueEnvelopeFields.AttributePrefix.Length..]] = values[0]!; + } + } + + return attributes.Count == 0 + ? EmptyReadOnlyDictionary.Instance + : new ReadOnlyDictionary(attributes); + } + + private NatsHeaders BuildHeaders(ScanQueueMessage message, string idempotencyKey) + { + var headers = new NatsHeaders + { + { QueueEnvelopeFields.JobId, message.JobId }, + { QueueEnvelopeFields.IdempotencyKey, idempotencyKey }, + { QueueEnvelopeFields.Attempt, "1" }, + { QueueEnvelopeFields.EnqueuedAt, _timeProvider.GetUtcNow().ToUnixTimeMilliseconds().ToString() } + }; + + if (!string.IsNullOrEmpty(message.TraceId)) + { + headers.Add(QueueEnvelopeFields.TraceId, message.TraceId!); + } + + if (message.Attributes is not null) + { + foreach (var kvp in message.Attributes) + { + headers.Add(QueueEnvelopeFields.AttributePrefix + kvp.Key, kvp.Value); + } + } + + return headers; + } + + private NatsHeaders BuildDeadLetterHeaders(NatsScanQueueLease lease, string reason) + { + var headers = new NatsHeaders + { + { QueueEnvelopeFields.JobId, lease.JobId }, + { QueueEnvelopeFields.IdempotencyKey, lease.IdempotencyKey ?? lease.JobId }, + { QueueEnvelopeFields.Attempt, lease.Attempt.ToString() }, + { QueueEnvelopeFields.EnqueuedAt, lease.EnqueuedAt.ToUnixTimeMilliseconds().ToString() }, + { "deadletter-reason", reason } + }; + + foreach (var kvp in lease.Attributes) + { + headers.Add(QueueEnvelopeFields.AttributePrefix + kvp.Key, kvp.Value); + } + + return headers; + } + + private TimeSpan CalculateBackoff(int attempt) + { + var configuredInitial = _options.RetryDelay > TimeSpan.Zero + ? _options.RetryDelay + : _queueOptions.RetryInitialBackoff; + + if (configuredInitial <= TimeSpan.Zero) + { + return TimeSpan.Zero; + } + + if (attempt <= 1) + { + return configuredInitial; + } + + var max = _queueOptions.RetryMaxBackoff > TimeSpan.Zero + ? _queueOptions.RetryMaxBackoff + : configuredInitial; + + var exponent = attempt - 1; + var scaledTicks = configuredInitial.Ticks * Math.Pow(2, exponent - 1); + var cappedTicks = Math.Min(max.Ticks, scaledTicks); + var resultTicks = Math.Max(configuredInitial.Ticks, (long)cappedTicks); + return TimeSpan.FromTicks(resultTicks); + } + + private static long ToNanoseconds(TimeSpan timeSpan) + => timeSpan <= TimeSpan.Zero ? 0 : timeSpan.Ticks * 100L; + + private static class EmptyReadOnlyDictionary + where TKey : notnull + { + public static readonly IReadOnlyDictionary Instance = + new ReadOnlyDictionary(new Dictionary(0, EqualityComparer.Default)); + } +} diff --git a/src/StellaOps.Scanner.Queue/Nats/NatsScanQueueLease.cs b/src/StellaOps.Scanner.Queue/Nats/NatsScanQueueLease.cs new file mode 100644 index 00000000..4aaed705 --- /dev/null +++ b/src/StellaOps.Scanner.Queue/Nats/NatsScanQueueLease.cs @@ -0,0 +1,78 @@ +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using NATS.Client.JetStream; + +namespace StellaOps.Scanner.Queue.Nats; + +internal sealed class NatsScanQueueLease : IScanQueueLease +{ + private readonly NatsScanQueue _queue; + private readonly NatsJSMsg _message; + private int _completed; + + internal NatsScanQueueLease( + NatsScanQueue queue, + NatsJSMsg message, + string messageId, + string jobId, + byte[] payload, + int attempt, + DateTimeOffset enqueuedAt, + DateTimeOffset leaseExpiresAt, + string consumer, + string? idempotencyKey, + IReadOnlyDictionary attributes) + { + _queue = queue; + _message = message; + MessageId = messageId; + JobId = jobId; + Payload = payload; + Attempt = attempt; + EnqueuedAt = enqueuedAt; + LeaseExpiresAt = leaseExpiresAt; + Consumer = consumer; + IdempotencyKey = idempotencyKey; + Attributes = attributes; + } + + public string MessageId { get; } + + public string JobId { get; } + + public ReadOnlyMemory Payload { get; } + + public int Attempt { get; internal set; } + + public DateTimeOffset EnqueuedAt { get; } + + public DateTimeOffset LeaseExpiresAt { get; private set; } + + public string Consumer { get; } + + public string? IdempotencyKey { get; } + + public IReadOnlyDictionary Attributes { get; } + + internal NatsJSMsg Message => _message; + + public Task AcknowledgeAsync(CancellationToken cancellationToken = default) + => _queue.AcknowledgeAsync(this, cancellationToken); + + public Task RenewAsync(TimeSpan leaseDuration, CancellationToken cancellationToken = default) + => _queue.RenewLeaseAsync(this, leaseDuration, cancellationToken); + + public Task ReleaseAsync(QueueReleaseDisposition disposition, CancellationToken cancellationToken = default) + => _queue.ReleaseAsync(this, disposition, cancellationToken); + + public Task DeadLetterAsync(string reason, CancellationToken cancellationToken = default) + => _queue.DeadLetterAsync(this, reason, cancellationToken); + + internal bool TryBeginCompletion() + => Interlocked.CompareExchange(ref _completed, 1, 0) == 0; + + internal void RefreshLease(DateTimeOffset expiresAt) + => LeaseExpiresAt = expiresAt; +} diff --git a/src/StellaOps.Scanner.Queue/QueueEnvelopeFields.cs b/src/StellaOps.Scanner.Queue/QueueEnvelopeFields.cs new file mode 100644 index 00000000..165790b8 --- /dev/null +++ b/src/StellaOps.Scanner.Queue/QueueEnvelopeFields.cs @@ -0,0 +1,12 @@ +namespace StellaOps.Scanner.Queue; + +internal static class QueueEnvelopeFields +{ + public const string Payload = "payload"; + public const string JobId = "jobId"; + public const string IdempotencyKey = "idempotency"; + public const string Attempt = "attempt"; + public const string EnqueuedAt = "enqueuedAt"; + public const string TraceId = "traceId"; + public const string AttributePrefix = "attr:"; +} diff --git a/src/StellaOps.Scanner.Queue/QueueMetrics.cs b/src/StellaOps.Scanner.Queue/QueueMetrics.cs new file mode 100644 index 00000000..1d8dd369 --- /dev/null +++ b/src/StellaOps.Scanner.Queue/QueueMetrics.cs @@ -0,0 +1,28 @@ +using System.Diagnostics.Metrics; + +namespace StellaOps.Scanner.Queue; + +internal static class QueueMetrics +{ + private const string TransportTagName = "transport"; + + private static readonly Meter Meter = new("StellaOps.Scanner.Queue"); + private static readonly Counter EnqueuedCounter = Meter.CreateCounter("scanner_queue_enqueued_total"); + private static readonly Counter DeduplicatedCounter = Meter.CreateCounter("scanner_queue_deduplicated_total"); + private static readonly Counter AckCounter = Meter.CreateCounter("scanner_queue_ack_total"); + private static readonly Counter RetryCounter = Meter.CreateCounter("scanner_queue_retry_total"); + private static readonly Counter DeadLetterCounter = Meter.CreateCounter("scanner_queue_deadletter_total"); + + public static void RecordEnqueued(string transport) => EnqueuedCounter.Add(1, BuildTags(transport)); + + public static void RecordDeduplicated(string transport) => DeduplicatedCounter.Add(1, BuildTags(transport)); + + public static void RecordAck(string transport) => AckCounter.Add(1, BuildTags(transport)); + + public static void RecordRetry(string transport) => RetryCounter.Add(1, BuildTags(transport)); + + public static void RecordDeadLetter(string transport) => DeadLetterCounter.Add(1, BuildTags(transport)); + + private static KeyValuePair[] BuildTags(string transport) + => new[] { new KeyValuePair(TransportTagName, transport) }; +} diff --git a/src/StellaOps.Scanner.Queue/QueueTransportKind.cs b/src/StellaOps.Scanner.Queue/QueueTransportKind.cs new file mode 100644 index 00000000..8823d6cd --- /dev/null +++ b/src/StellaOps.Scanner.Queue/QueueTransportKind.cs @@ -0,0 +1,7 @@ +namespace StellaOps.Scanner.Queue; + +public enum QueueTransportKind +{ + Redis, + Nats +} diff --git a/src/StellaOps.Scanner.Queue/Redis/RedisScanQueue.cs b/src/StellaOps.Scanner.Queue/Redis/RedisScanQueue.cs new file mode 100644 index 00000000..c5911af6 --- /dev/null +++ b/src/StellaOps.Scanner.Queue/Redis/RedisScanQueue.cs @@ -0,0 +1,764 @@ +using System; +using System.Buffers; +using System.Collections.Generic; +using System.Collections.ObjectModel; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using StackExchange.Redis; + +namespace StellaOps.Scanner.Queue.Redis; + +internal sealed class RedisScanQueue : IScanQueue, IAsyncDisposable +{ + private const string TransportName = "redis"; + + private readonly ScannerQueueOptions _queueOptions; + private readonly RedisQueueOptions _options; + private readonly ILogger _logger; + private readonly TimeProvider _timeProvider; + private readonly SemaphoreSlim _connectionLock = new(1, 1); + private readonly SemaphoreSlim _groupInitLock = new(1, 1); + private readonly Func> _connectionFactory; + private IConnectionMultiplexer? _connection; + private volatile bool _groupInitialized; + private bool _disposed; + + private string BuildIdempotencyKey(string key) + => string.Concat(_options.IdempotencyKeyPrefix, key); + + public RedisScanQueue( + ScannerQueueOptions queueOptions, + RedisQueueOptions options, + ILogger logger, + TimeProvider timeProvider, + Func>? connectionFactory = null) + { + _queueOptions = queueOptions ?? throw new ArgumentNullException(nameof(queueOptions)); + _options = options ?? throw new ArgumentNullException(nameof(options)); + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + _timeProvider = timeProvider ?? TimeProvider.System; + _connectionFactory = connectionFactory ?? (config => Task.FromResult(ConnectionMultiplexer.Connect(config))); + + if (string.IsNullOrWhiteSpace(_options.ConnectionString)) + { + throw new InvalidOperationException("Redis connection string must be configured for the scanner queue."); + } + } + + public async ValueTask EnqueueAsync( + ScanQueueMessage message, + CancellationToken cancellationToken = default) + { + ArgumentNullException.ThrowIfNull(message); + cancellationToken.ThrowIfCancellationRequested(); + + var db = await GetDatabaseAsync(cancellationToken).ConfigureAwait(false); + var now = _timeProvider.GetUtcNow(); + await EnsureConsumerGroupAsync(db, cancellationToken).ConfigureAwait(false); + + var attempt = 1; + var entries = BuildEntries(message, now, attempt); + var messageId = await AddToStreamAsync( + db, + _options.StreamName, + entries, + _options.ApproximateMaxLength, + _options.ApproximateMaxLength is not null) + .ConfigureAwait(false); + + var idempotencyToken = message.IdempotencyKey ?? message.JobId; + var idempotencyKey = BuildIdempotencyKey(idempotencyToken); + + var stored = await db.StringSetAsync( + key: idempotencyKey, + value: messageId, + when: When.NotExists, + expiry: _options.IdempotencyWindow) + .ConfigureAwait(false); + + if (!stored) + { + // Duplicate enqueue – delete the freshly added entry and surface cached ID. + await db.StreamDeleteAsync( + _options.StreamName, + new RedisValue[] { messageId }) + .ConfigureAwait(false); + + var existing = await db.StringGetAsync(idempotencyKey).ConfigureAwait(false); + var duplicateId = existing.IsNullOrEmpty ? messageId : existing; + + _logger.LogDebug( + "Duplicate queue enqueue detected for job {JobId} (token {Token}), returning existing stream id {StreamId}.", + message.JobId, + idempotencyToken, + duplicateId.ToString()); + + QueueMetrics.RecordDeduplicated(TransportName); + return new QueueEnqueueResult(duplicateId.ToString()!, true); + } + + _logger.LogDebug( + "Enqueued job {JobId} into stream {Stream} with id {StreamId}.", + message.JobId, + _options.StreamName, + messageId.ToString()); + + QueueMetrics.RecordEnqueued(TransportName); + return new QueueEnqueueResult(messageId.ToString()!, false); + } + + public async ValueTask> LeaseAsync( + QueueLeaseRequest request, + CancellationToken cancellationToken = default) + { + ArgumentNullException.ThrowIfNull(request); + cancellationToken.ThrowIfCancellationRequested(); + + var db = await GetDatabaseAsync(cancellationToken).ConfigureAwait(false); + await EnsureConsumerGroupAsync(db, cancellationToken).ConfigureAwait(false); + + var entries = await db.StreamReadGroupAsync( + _options.StreamName, + _options.ConsumerGroup, + request.Consumer, + position: ">", + count: request.BatchSize, + flags: CommandFlags.None) + .ConfigureAwait(false); + + if (entries is null || entries.Length == 0) + { + return Array.Empty(); + } + + var now = _timeProvider.GetUtcNow(); + var leases = new List(entries.Length); + + foreach (var entry in entries) + { + var lease = TryMapLease( + entry, + request.Consumer, + now, + request.LeaseDuration, + default); + + if (lease is null) + { + _logger.LogWarning( + "Stream entry {StreamId} is missing required metadata; acknowledging to avoid poison message.", + entry.Id.ToString()); + await db.StreamAcknowledgeAsync( + _options.StreamName, + _options.ConsumerGroup, + new RedisValue[] { entry.Id }) + .ConfigureAwait(false); + await db.StreamDeleteAsync( + _options.StreamName, + new RedisValue[] { entry.Id }) + .ConfigureAwait(false); + continue; + } + + leases.Add(lease); + } + + return leases; + } + + public async ValueTask> ClaimExpiredLeasesAsync( + QueueClaimOptions options, + CancellationToken cancellationToken = default) + { + ArgumentNullException.ThrowIfNull(options); + cancellationToken.ThrowIfCancellationRequested(); + + var db = await GetDatabaseAsync(cancellationToken).ConfigureAwait(false); + await EnsureConsumerGroupAsync(db, cancellationToken).ConfigureAwait(false); + + var pending = await db.StreamPendingMessagesAsync( + _options.StreamName, + _options.ConsumerGroup, + options.BatchSize, + RedisValue.Null, + (long)options.MinIdleTime.TotalMilliseconds) + .ConfigureAwait(false); + + if (pending is null || pending.Length == 0) + { + return Array.Empty(); + } + + var eligible = pending + .Where(p => p.IdleTimeInMilliseconds >= options.MinIdleTime.TotalMilliseconds) + .ToArray(); + + if (eligible.Length == 0) + { + return Array.Empty(); + } + + var messageIds = eligible + .Select(static p => (RedisValue)p.MessageId) + .ToArray(); + + var entries = await db.StreamClaimAsync( + _options.StreamName, + _options.ConsumerGroup, + options.ClaimantConsumer, + 0, + messageIds, + CommandFlags.None) + .ConfigureAwait(false); + + if (entries is null || entries.Length == 0) + { + return Array.Empty(); + } + + var now = _timeProvider.GetUtcNow(); + var pendingById = Enumerable.ToDictionary( + eligible, + static p => p.MessageId.IsNullOrEmpty ? string.Empty : p.MessageId.ToString(), + static p => p, + StringComparer.Ordinal); + + var leases = new List(entries.Length); + foreach (var entry in entries) + { + var entryIdValue = entry.Id; + var entryId = entryIdValue.IsNullOrEmpty ? string.Empty : entryIdValue.ToString(); + var hasPending = pendingById.TryGetValue(entryId, out var pendingInfo); + var attempt = hasPending + ? (int)Math.Max(1, pendingInfo.DeliveryCount) + : 1; + + var lease = TryMapLease( + entry, + options.ClaimantConsumer, + now, + _queueOptions.DefaultLeaseDuration, + attempt); + + if (lease is null) + { + _logger.LogWarning( + "Unable to map claimed stream entry {StreamId}; acknowledging to unblock queue.", + entry.Id.ToString()); + await db.StreamAcknowledgeAsync( + _options.StreamName, + _options.ConsumerGroup, + new RedisValue[] { entry.Id }) + .ConfigureAwait(false); + await db.StreamDeleteAsync( + _options.StreamName, + new RedisValue[] { entry.Id }) + .ConfigureAwait(false); + continue; + } + + leases.Add(lease); + } + + return leases; + } + + public async ValueTask DisposeAsync() + { + if (_disposed) + { + return; + } + + _disposed = true; + if (_connection is not null) + { + await _connection.CloseAsync(); + _connection.Dispose(); + } + + _connectionLock.Dispose(); + _groupInitLock.Dispose(); + GC.SuppressFinalize(this); + } + + internal async Task AcknowledgeAsync( + RedisScanQueueLease lease, + CancellationToken cancellationToken) + { + if (!lease.TryBeginCompletion()) + { + return; + } + + var db = await GetDatabaseAsync(cancellationToken).ConfigureAwait(false); + + await db.StreamAcknowledgeAsync( + _options.StreamName, + _options.ConsumerGroup, + new RedisValue[] { lease.MessageId }) + .ConfigureAwait(false); + + await db.StreamDeleteAsync( + _options.StreamName, + new RedisValue[] { lease.MessageId }) + .ConfigureAwait(false); + + _logger.LogDebug( + "Acknowledged job {JobId} ({MessageId}) on consumer {Consumer}.", + lease.JobId, + lease.MessageId, + lease.Consumer); + + QueueMetrics.RecordAck(TransportName); + } + + internal async Task RenewLeaseAsync( + RedisScanQueueLease lease, + TimeSpan leaseDuration, + CancellationToken cancellationToken) + { + var db = await GetDatabaseAsync(cancellationToken).ConfigureAwait(false); + + await db.StreamClaimAsync( + _options.StreamName, + _options.ConsumerGroup, + lease.Consumer, + 0, + new RedisValue[] { lease.MessageId }, + CommandFlags.None) + .ConfigureAwait(false); + + var expires = _timeProvider.GetUtcNow().Add(leaseDuration); + lease.RefreshLease(expires); + + _logger.LogDebug( + "Renewed lease for job {JobId} until {LeaseExpiry:u}.", + lease.JobId, + expires); + } + + internal async Task ReleaseAsync( + RedisScanQueueLease lease, + QueueReleaseDisposition disposition, + CancellationToken cancellationToken) + { + if (disposition == QueueReleaseDisposition.Retry + && lease.Attempt >= _queueOptions.MaxDeliveryAttempts) + { + _logger.LogWarning( + "Job {JobId} reached max delivery attempts ({Attempts}); moving to dead-letter.", + lease.JobId, + lease.Attempt); + + await DeadLetterAsync( + lease, + $"max-delivery-attempts:{lease.Attempt}", + cancellationToken).ConfigureAwait(false); + return; + } + + if (!lease.TryBeginCompletion()) + { + return; + } + + var db = await GetDatabaseAsync(cancellationToken).ConfigureAwait(false); + await db.StreamAcknowledgeAsync( + _options.StreamName, + _options.ConsumerGroup, + new RedisValue[] { lease.MessageId }) + .ConfigureAwait(false); + await db.StreamDeleteAsync( + _options.StreamName, + new RedisValue[] { lease.MessageId }) + .ConfigureAwait(false); + + QueueMetrics.RecordAck(TransportName); + + if (disposition == QueueReleaseDisposition.Retry) + { + QueueMetrics.RecordRetry(TransportName); + + var delay = CalculateBackoff(lease.Attempt); + if (delay > TimeSpan.Zero) + { + _logger.LogDebug( + "Delaying retry for job {JobId} by {Delay} (attempt {Attempt}).", + lease.JobId, + delay, + lease.Attempt); + + try + { + await Task.Delay(delay, cancellationToken).ConfigureAwait(false); + } + catch (TaskCanceledException) + { + return; + } + } + + var requeueMessage = new ScanQueueMessage(lease.JobId, lease.Payload) + { + IdempotencyKey = lease.IdempotencyKey, + Attributes = lease.Attributes, + TraceId = null + }; + + var now = _timeProvider.GetUtcNow(); + var entries = BuildEntries(requeueMessage, now, lease.Attempt + 1); + + await AddToStreamAsync( + db, + _options.StreamName, + entries, + _options.ApproximateMaxLength, + _options.ApproximateMaxLength is not null) + .ConfigureAwait(false); + + QueueMetrics.RecordEnqueued(TransportName); + _logger.LogWarning( + "Released job {JobId} for retry (attempt {Attempt}).", + lease.JobId, + lease.Attempt + 1); + } + else + { + _logger.LogInformation( + "Abandoned job {JobId} after {Attempt} attempt(s).", + lease.JobId, + lease.Attempt); + } + } + + internal async Task DeadLetterAsync( + RedisScanQueueLease lease, + string reason, + CancellationToken cancellationToken) + { + if (!lease.TryBeginCompletion()) + { + return; + } + + var db = await GetDatabaseAsync(cancellationToken).ConfigureAwait(false); + + await db.StreamAcknowledgeAsync( + _options.StreamName, + _options.ConsumerGroup, + new RedisValue[] { lease.MessageId }) + .ConfigureAwait(false); + + await db.StreamDeleteAsync( + _options.StreamName, + new RedisValue[] { lease.MessageId }) + .ConfigureAwait(false); + + var now = _timeProvider.GetUtcNow(); + var entries = BuildEntries( + new ScanQueueMessage(lease.JobId, lease.Payload) + { + IdempotencyKey = lease.IdempotencyKey, + Attributes = lease.Attributes, + TraceId = null + }, + now, + lease.Attempt); + + await AddToStreamAsync( + db, + _queueOptions.DeadLetter.StreamName, + entries, + null, + false) + .ConfigureAwait(false); + + _logger.LogError( + "Dead-lettered job {JobId} (attempt {Attempt}): {Reason}", + lease.JobId, + lease.Attempt, + reason); + + QueueMetrics.RecordDeadLetter(TransportName); + } + + private async ValueTask GetDatabaseAsync(CancellationToken cancellationToken) + { + if (_connection is not null) + { + return _connection.GetDatabase(_options.Database ?? -1); + } + + await _connectionLock.WaitAsync(cancellationToken).ConfigureAwait(false); + try + { + if (_connection is null) + { + var config = ConfigurationOptions.Parse(_options.ConnectionString!); + config.AbortOnConnectFail = false; + config.ConnectTimeout = (int)_options.InitializationTimeout.TotalMilliseconds; + config.ConnectRetry = 3; + if (_options.Database is not null) + { + config.DefaultDatabase = _options.Database; + } + + _connection = await _connectionFactory(config).ConfigureAwait(false); + } + + return _connection.GetDatabase(_options.Database ?? -1); + } + finally + { + _connectionLock.Release(); + } + } + + private async Task EnsureConsumerGroupAsync( + IDatabase database, + CancellationToken cancellationToken) + { + if (_groupInitialized) + { + return; + } + + await _groupInitLock.WaitAsync(cancellationToken).ConfigureAwait(false); + try + { + if (_groupInitialized) + { + return; + } + + try + { + await database.StreamCreateConsumerGroupAsync( + _options.StreamName, + _options.ConsumerGroup, + StreamPosition.Beginning, + createStream: true) + .ConfigureAwait(false); + } + catch (RedisServerException ex) when (ex.Message.Contains("BUSYGROUP", StringComparison.OrdinalIgnoreCase)) + { + // Already exists. + } + + _groupInitialized = true; + } + finally + { + _groupInitLock.Release(); + } + } + + private NameValueEntry[] BuildEntries( + ScanQueueMessage message, + DateTimeOffset enqueuedAt, + int attempt) + { + var attributeCount = message.Attributes?.Count ?? 0; + var entries = ArrayPool.Shared.Rent(6 + attributeCount); + var index = 0; + + entries[index++] = new NameValueEntry(QueueEnvelopeFields.JobId, message.JobId); + entries[index++] = new NameValueEntry(QueueEnvelopeFields.Attempt, attempt); + entries[index++] = new NameValueEntry(QueueEnvelopeFields.EnqueuedAt, enqueuedAt.ToUnixTimeMilliseconds()); + entries[index++] = new NameValueEntry( + QueueEnvelopeFields.IdempotencyKey, + message.IdempotencyKey ?? message.JobId); + entries[index++] = new NameValueEntry( + QueueEnvelopeFields.Payload, + message.Payload.ToArray()); + entries[index++] = new NameValueEntry( + QueueEnvelopeFields.TraceId, + message.TraceId ?? string.Empty); + + if (attributeCount > 0) + { + foreach (var kvp in message.Attributes!) + { + entries[index++] = new NameValueEntry( + QueueEnvelopeFields.AttributePrefix + kvp.Key, + kvp.Value); + } + } + + var result = entries.AsSpan(0, index).ToArray(); + ArrayPool.Shared.Return(entries, clearArray: true); + return result; + } + + private RedisScanQueueLease? TryMapLease( + StreamEntry entry, + string consumer, + DateTimeOffset now, + TimeSpan leaseDuration, + int? attemptOverride) + { + if (entry.Values is null || entry.Values.Length == 0) + { + return null; + } + + string? jobId = null; + string? idempotency = null; + long? enqueuedAtUnix = null; + byte[]? payload = null; + string? traceId = null; + var attributes = new Dictionary(StringComparer.Ordinal); + var attempt = attemptOverride ?? 1; + + foreach (var field in entry.Values) + { + var name = field.Name.ToString(); + if (name.Equals(QueueEnvelopeFields.JobId, StringComparison.Ordinal)) + { + jobId = field.Value.ToString(); + } + else if (name.Equals(QueueEnvelopeFields.IdempotencyKey, StringComparison.Ordinal)) + { + idempotency = field.Value.ToString(); + } + else if (name.Equals(QueueEnvelopeFields.EnqueuedAt, StringComparison.Ordinal)) + { + if (long.TryParse(field.Value.ToString(), out var unix)) + { + enqueuedAtUnix = unix; + } + } + else if (name.Equals(QueueEnvelopeFields.Payload, StringComparison.Ordinal)) + { + payload = (byte[]?)field.Value ?? Array.Empty(); + } + else if (name.Equals(QueueEnvelopeFields.Attempt, StringComparison.Ordinal)) + { + if (int.TryParse(field.Value.ToString(), out var parsedAttempt)) + { + attempt = Math.Max(parsedAttempt, attempt); + } + } + else if (name.Equals(QueueEnvelopeFields.TraceId, StringComparison.Ordinal)) + { + traceId = field.Value.ToString(); + } + else if (name.StartsWith(QueueEnvelopeFields.AttributePrefix, StringComparison.Ordinal)) + { + attributes[name[QueueEnvelopeFields.AttributePrefix.Length..]] = field.Value.ToString(); + } + } + + if (jobId is null || payload is null || enqueuedAtUnix is null) + { + return null; + } + + var enqueuedAt = DateTimeOffset.FromUnixTimeMilliseconds(enqueuedAtUnix.Value); + var leaseExpires = now.Add(leaseDuration); + + var attributeView = attributes.Count == 0 + ? EmptyReadOnlyDictionary.Instance + : new ReadOnlyDictionary(attributes); + + return new RedisScanQueueLease( + this, + entry.Id.ToString(), + jobId, + payload, + attempt, + enqueuedAt, + leaseExpires, + consumer, + idempotency, + attributeView); + } + + private TimeSpan CalculateBackoff(int attempt) + { + var configuredInitial = _options.RetryInitialBackoff > TimeSpan.Zero + ? _options.RetryInitialBackoff + : _queueOptions.RetryInitialBackoff; + + var initial = configuredInitial > TimeSpan.Zero + ? configuredInitial + : TimeSpan.Zero; + + if (initial <= TimeSpan.Zero) + { + return TimeSpan.Zero; + } + + if (attempt <= 1) + { + return initial; + } + + var configuredMax = _queueOptions.RetryMaxBackoff > TimeSpan.Zero + ? _queueOptions.RetryMaxBackoff + : initial; + + var max = configuredMax <= TimeSpan.Zero + ? initial + : configuredMax; + + var exponent = attempt - 1; + var scale = Math.Pow(2, exponent - 1); + var scaledTicks = initial.Ticks * scale; + var cappedTicks = Math.Min(max.Ticks, scaledTicks); + + var resultTicks = Math.Max(initial.Ticks, (long)cappedTicks); + return TimeSpan.FromTicks(resultTicks); + } + + private async Task AddToStreamAsync( + IDatabase database, + RedisKey stream, + NameValueEntry[] entries, + int? maxLength, + bool useApproximateLength) + { + var capacity = 4 + (entries.Length * 2); + var args = new List(capacity) + { + stream + }; + + if (maxLength.HasValue) + { + args.Add("MAXLEN"); + if (useApproximateLength) + { + args.Add("~"); + } + + args.Add(maxLength.Value); + } + + args.Add("*"); + for (var i = 0; i < entries.Length; i++) + { + args.Add(entries[i].Name); + args.Add(entries[i].Value); + } + + var result = await database.ExecuteAsync("XADD", args.ToArray()).ConfigureAwait(false); + return (RedisValue)result!; + } + + private static class EmptyReadOnlyDictionary + where TKey : notnull + { + public static readonly IReadOnlyDictionary Instance = + new ReadOnlyDictionary(new Dictionary(0, EqualityComparer.Default)); + } + + internal async ValueTask PingAsync(CancellationToken cancellationToken) + { + var db = await GetDatabaseAsync(cancellationToken).ConfigureAwait(false); + await db.ExecuteAsync("PING").ConfigureAwait(false); + } +} diff --git a/src/StellaOps.Scanner.Queue/Redis/RedisScanQueueLease.cs b/src/StellaOps.Scanner.Queue/Redis/RedisScanQueueLease.cs new file mode 100644 index 00000000..cab293b0 --- /dev/null +++ b/src/StellaOps.Scanner.Queue/Redis/RedisScanQueueLease.cs @@ -0,0 +1,72 @@ +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Queue.Redis; + +internal sealed class RedisScanQueueLease : IScanQueueLease +{ + private readonly RedisScanQueue _queue; + private int _completed; + + internal RedisScanQueueLease( + RedisScanQueue queue, + string messageId, + string jobId, + byte[] payload, + int attempt, + DateTimeOffset enqueuedAt, + DateTimeOffset leaseExpiresAt, + string consumer, + string? idempotencyKey, + IReadOnlyDictionary attributes) + { + _queue = queue; + MessageId = messageId; + JobId = jobId; + Payload = payload; + Attempt = attempt; + EnqueuedAt = enqueuedAt; + LeaseExpiresAt = leaseExpiresAt; + Consumer = consumer; + IdempotencyKey = idempotencyKey; + Attributes = attributes; + } + + public string MessageId { get; } + + public string JobId { get; } + + public ReadOnlyMemory Payload { get; } + + public int Attempt { get; } + + public DateTimeOffset EnqueuedAt { get; } + + public DateTimeOffset LeaseExpiresAt { get; private set; } + + public string Consumer { get; } + + public string? IdempotencyKey { get; } + + public IReadOnlyDictionary Attributes { get; } + + public Task AcknowledgeAsync(CancellationToken cancellationToken = default) + => _queue.AcknowledgeAsync(this, cancellationToken); + + public Task RenewAsync(TimeSpan leaseDuration, CancellationToken cancellationToken = default) + => _queue.RenewLeaseAsync(this, leaseDuration, cancellationToken); + + public Task ReleaseAsync(QueueReleaseDisposition disposition, CancellationToken cancellationToken = default) + => _queue.ReleaseAsync(this, disposition, cancellationToken); + + public Task DeadLetterAsync(string reason, CancellationToken cancellationToken = default) + => _queue.DeadLetterAsync(this, reason, cancellationToken); + + internal bool TryBeginCompletion() + => Interlocked.CompareExchange(ref _completed, 1, 0) == 0; + + internal void RefreshLease(DateTimeOffset expiresAt) + => LeaseExpiresAt = expiresAt; +} diff --git a/src/StellaOps.Scanner.Queue/ScanQueueContracts.cs b/src/StellaOps.Scanner.Queue/ScanQueueContracts.cs new file mode 100644 index 00000000..942c2740 --- /dev/null +++ b/src/StellaOps.Scanner.Queue/ScanQueueContracts.cs @@ -0,0 +1,115 @@ +using System; +using System.Collections.Generic; + +namespace StellaOps.Scanner.Queue; + +public sealed class ScanQueueMessage +{ + private readonly byte[] _payload; + + public ScanQueueMessage(string jobId, ReadOnlyMemory payload) + { + if (string.IsNullOrWhiteSpace(jobId)) + { + throw new ArgumentException("Job identifier must be provided.", nameof(jobId)); + } + + JobId = jobId; + _payload = CopyPayload(payload); + } + + public string JobId { get; } + + public string? IdempotencyKey { get; init; } + + public string? TraceId { get; init; } + + public IReadOnlyDictionary? Attributes { get; init; } + + public ReadOnlyMemory Payload => _payload; + + private static byte[] CopyPayload(ReadOnlyMemory payload) + { + if (payload.Length == 0) + { + return Array.Empty(); + } + + var copy = new byte[payload.Length]; + payload.Span.CopyTo(copy); + return copy; + } +} + +public readonly record struct QueueEnqueueResult(string MessageId, bool Deduplicated); + +public sealed class QueueLeaseRequest +{ + public QueueLeaseRequest(string consumer, int batchSize, TimeSpan leaseDuration) + { + if (string.IsNullOrWhiteSpace(consumer)) + { + throw new ArgumentException("Consumer name must be provided.", nameof(consumer)); + } + + if (batchSize <= 0) + { + throw new ArgumentOutOfRangeException(nameof(batchSize), batchSize, "Batch size must be positive."); + } + + if (leaseDuration <= TimeSpan.Zero) + { + throw new ArgumentOutOfRangeException(nameof(leaseDuration), leaseDuration, "Lease duration must be positive."); + } + + Consumer = consumer; + BatchSize = batchSize; + LeaseDuration = leaseDuration; + } + + public string Consumer { get; } + + public int BatchSize { get; } + + public TimeSpan LeaseDuration { get; } +} + +public sealed class QueueClaimOptions +{ + public QueueClaimOptions( + string claimantConsumer, + int batchSize, + TimeSpan minIdleTime) + { + if (string.IsNullOrWhiteSpace(claimantConsumer)) + { + throw new ArgumentException("Consumer must be provided.", nameof(claimantConsumer)); + } + + if (batchSize <= 0) + { + throw new ArgumentOutOfRangeException(nameof(batchSize), batchSize, "Batch size must be positive."); + } + + if (minIdleTime < TimeSpan.Zero) + { + throw new ArgumentOutOfRangeException(nameof(minIdleTime), minIdleTime, "Idle time cannot be negative."); + } + + ClaimantConsumer = claimantConsumer; + BatchSize = batchSize; + MinIdleTime = minIdleTime; + } + + public string ClaimantConsumer { get; } + + public int BatchSize { get; } + + public TimeSpan MinIdleTime { get; } +} + +public enum QueueReleaseDisposition +{ + Retry, + Abandon +} diff --git a/src/StellaOps.Scanner.Queue/ScannerQueueHealthCheck.cs b/src/StellaOps.Scanner.Queue/ScannerQueueHealthCheck.cs new file mode 100644 index 00000000..25236200 --- /dev/null +++ b/src/StellaOps.Scanner.Queue/ScannerQueueHealthCheck.cs @@ -0,0 +1,55 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Diagnostics.HealthChecks; +using Microsoft.Extensions.Logging; +using StellaOps.Scanner.Queue.Nats; +using StellaOps.Scanner.Queue.Redis; + +namespace StellaOps.Scanner.Queue; + +public sealed class ScannerQueueHealthCheck : IHealthCheck +{ + private readonly IScanQueue _queue; + private readonly ILogger _logger; + + public ScannerQueueHealthCheck( + IScanQueue queue, + ILogger logger) + { + _queue = queue ?? throw new ArgumentNullException(nameof(queue)); + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + } + + public async Task CheckHealthAsync( + HealthCheckContext context, + CancellationToken cancellationToken = default) + { + cancellationToken.ThrowIfCancellationRequested(); + + try + { + switch (_queue) + { + case RedisScanQueue redisQueue: + await redisQueue.PingAsync(cancellationToken).ConfigureAwait(false); + return HealthCheckResult.Healthy("Redis queue reachable."); + + case NatsScanQueue natsQueue: + await natsQueue.PingAsync(cancellationToken).ConfigureAwait(false); + return HealthCheckResult.Healthy("NATS queue reachable."); + + default: + return HealthCheckResult.Healthy("Queue transport without dedicated ping returned healthy."); + } + } + catch (Exception ex) + { + _logger.LogError(ex, "Scanner queue health check failed."); + return new HealthCheckResult( + context.Registration.FailureStatus, + "Queue transport unreachable.", + ex); + } + } +} diff --git a/src/StellaOps.Scanner.Queue/ScannerQueueOptions.cs b/src/StellaOps.Scanner.Queue/ScannerQueueOptions.cs new file mode 100644 index 00000000..5e21fdc3 --- /dev/null +++ b/src/StellaOps.Scanner.Queue/ScannerQueueOptions.cs @@ -0,0 +1,92 @@ +using System; + +namespace StellaOps.Scanner.Queue; + +public sealed class ScannerQueueOptions +{ + public QueueTransportKind Kind { get; set; } = QueueTransportKind.Redis; + + public RedisQueueOptions Redis { get; set; } = new(); + + public NatsQueueOptions Nats { get; set; } = new(); + + /// + /// Default lease duration applied when callers do not override the visibility timeout. + /// + public TimeSpan DefaultLeaseDuration { get; set; } = TimeSpan.FromMinutes(5); + + /// + /// Maximum number of times a message may be delivered before it is shunted to the dead-letter queue. + /// + public int MaxDeliveryAttempts { get; set; } = 5; + + /// + /// Options controlling retry/backoff/dead-letter handling. + /// + public DeadLetterQueueOptions DeadLetter { get; set; } = new(); + + /// + /// Initial backoff applied when a job is retried after failure. + /// + public TimeSpan RetryInitialBackoff { get; set; } = TimeSpan.FromSeconds(5); + + /// + /// Maximum backoff window applied for exponential retry. + /// + public TimeSpan RetryMaxBackoff { get; set; } = TimeSpan.FromMinutes(2); +} + +public sealed class RedisQueueOptions +{ + public string? ConnectionString { get; set; } + + public int? Database { get; set; } + + public string StreamName { get; set; } = "scanner:jobs"; + + public string ConsumerGroup { get; set; } = "scanner-workers"; + + public string IdempotencyKeyPrefix { get; set; } = "scanner:jobs:idemp:"; + + public TimeSpan IdempotencyWindow { get; set; } = TimeSpan.FromHours(12); + + public int? ApproximateMaxLength { get; set; } + + public TimeSpan InitializationTimeout { get; set; } = TimeSpan.FromSeconds(30); + + public TimeSpan ClaimIdleThreshold { get; set; } = TimeSpan.FromMinutes(10); + + public TimeSpan PendingScanWindow { get; set; } = TimeSpan.FromMinutes(30); + + public TimeSpan RetryInitialBackoff { get; set; } = TimeSpan.FromSeconds(5); +} + +public sealed class NatsQueueOptions +{ + public string? Url { get; set; } + + public string Stream { get; set; } = "SCANNER_JOBS"; + + public string Subject { get; set; } = "scanner.jobs"; + + public string DurableConsumer { get; set; } = "scanner-workers"; + + public int MaxInFlight { get; set; } = 64; + + public TimeSpan AckWait { get; set; } = TimeSpan.FromMinutes(5); + + public string DeadLetterStream { get; set; } = "SCANNER_JOBS_DEAD"; + + public string DeadLetterSubject { get; set; } = "scanner.jobs.dead"; + + public TimeSpan RetryDelay { get; set; } = TimeSpan.FromSeconds(10); + + public TimeSpan IdleHeartbeat { get; set; } = TimeSpan.FromSeconds(30); +} + +public sealed class DeadLetterQueueOptions +{ + public string StreamName { get; set; } = "scanner:jobs:dead"; + + public TimeSpan Retention { get; set; } = TimeSpan.FromDays(7); +} diff --git a/src/StellaOps.Scanner.Queue/ScannerQueueServiceCollectionExtensions.cs b/src/StellaOps.Scanner.Queue/ScannerQueueServiceCollectionExtensions.cs new file mode 100644 index 00000000..b40383af --- /dev/null +++ b/src/StellaOps.Scanner.Queue/ScannerQueueServiceCollectionExtensions.cs @@ -0,0 +1,67 @@ +using System; +using Microsoft.Extensions.Configuration; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.DependencyInjection.Extensions; +using Microsoft.Extensions.Diagnostics.HealthChecks; +using Microsoft.Extensions.Logging; +using StellaOps.Scanner.Queue.Nats; +using StellaOps.Scanner.Queue.Redis; + +namespace StellaOps.Scanner.Queue; + +public static class ScannerQueueServiceCollectionExtensions +{ + public static IServiceCollection AddScannerQueue( + this IServiceCollection services, + IConfiguration configuration, + string sectionName = "scanner:queue") + { + ArgumentNullException.ThrowIfNull(services); + ArgumentNullException.ThrowIfNull(configuration); + + var options = new ScannerQueueOptions(); + configuration.GetSection(sectionName).Bind(options); + + services.TryAddSingleton(TimeProvider.System); + services.AddSingleton(options); + + services.AddSingleton(sp => + { + var loggerFactory = sp.GetRequiredService(); + var timeProvider = sp.GetService() ?? TimeProvider.System; + + return options.Kind switch + { + QueueTransportKind.Redis => new RedisScanQueue( + options, + options.Redis, + loggerFactory.CreateLogger(), + timeProvider), + QueueTransportKind.Nats => new NatsScanQueue( + options, + options.Nats, + loggerFactory.CreateLogger(), + timeProvider), + _ => throw new InvalidOperationException($"Unsupported queue transport kind '{options.Kind}'.") + }; + }); + + services.AddSingleton(); + + return services; + } + + public static IHealthChecksBuilder AddScannerQueueHealthCheck( + this IHealthChecksBuilder builder) + { + ArgumentNullException.ThrowIfNull(builder); + + builder.Services.TryAddSingleton(); + builder.AddCheck( + name: "scanner-queue", + failureStatus: HealthStatus.Unhealthy, + tags: new[] { "scanner", "queue" }); + + return builder; + } +} diff --git a/src/StellaOps.Scanner.Queue/StellaOps.Scanner.Queue.csproj b/src/StellaOps.Scanner.Queue/StellaOps.Scanner.Queue.csproj new file mode 100644 index 00000000..ed73c877 --- /dev/null +++ b/src/StellaOps.Scanner.Queue/StellaOps.Scanner.Queue.csproj @@ -0,0 +1,21 @@ + + + net10.0 + enable + enable + false + + + + + + + + + + + + + + + diff --git a/src/StellaOps.Scanner.Queue/TASKS.md b/src/StellaOps.Scanner.Queue/TASKS.md new file mode 100644 index 00000000..44e9ddbe --- /dev/null +++ b/src/StellaOps.Scanner.Queue/TASKS.md @@ -0,0 +1,7 @@ +# Scanner Queue Task Board (Sprint 9) + +| ID | Status | Owner(s) | Depends on | Description | Exit Criteria | +|----|--------|----------|------------|-------------|---------------| +| SCANNER-QUEUE-09-401 | DONE (2025-10-19) | Scanner Queue Guild | — | Implement queue abstraction + Redis Streams adapter with ack/lease semantics, idempotency tokens, and deterministic job IDs. | Interfaces finalized; Redis adapter passes enqueue/dequeue/ack/claim lease tests; structured logs exercised. | +| SCANNER-QUEUE-09-402 | DONE (2025-10-19) | Scanner Queue Guild | SCANNER-QUEUE-09-401 | Add pluggable backend support (Redis, NATS) with configuration binding, health probes, failover documentation. | NATS adapter + DI bindings delivered; health checks documented; configuration tests green. | +| SCANNER-QUEUE-09-403 | DONE (2025-10-19) | Scanner Queue Guild | SCANNER-QUEUE-09-401 | Implement retry and dead-letter flow with structured metrics/logs for offline deployments. | Retry policy configurable; dead-letter queue persisted; metrics counters validated in integration tests. | diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/Attestation/AttestorClientTests.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/Attestation/AttestorClientTests.cs new file mode 100644 index 00000000..fe0a94f0 --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/Attestation/AttestorClientTests.cs @@ -0,0 +1,82 @@ +using System; +using System.Net; +using System.Net.Http; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; +using StellaOps.Scanner.Sbomer.BuildXPlugin; +using StellaOps.Scanner.Sbomer.BuildXPlugin.Attestation; +using StellaOps.Scanner.Sbomer.BuildXPlugin.Descriptor; +using Xunit; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Tests.Attestation; + +public sealed class AttestorClientTests +{ + [Fact] + public async Task SendPlaceholderAsync_PostsJsonPayload() + { + var handler = new RecordingHandler(new HttpResponseMessage(HttpStatusCode.Accepted)); + using var httpClient = new HttpClient(handler); + var client = new AttestorClient(httpClient); + + var document = BuildDescriptorDocument(); + var attestorUri = new Uri("https://attestor.example.com/api/v1/provenance"); + + await client.SendPlaceholderAsync(attestorUri, document, CancellationToken.None); + + Assert.NotNull(handler.CapturedRequest); + Assert.Equal(HttpMethod.Post, handler.CapturedRequest!.Method); + Assert.Equal(attestorUri, handler.CapturedRequest.RequestUri); + + var content = await handler.CapturedRequest.Content!.ReadAsStringAsync(); + var json = JsonDocument.Parse(content); + Assert.Equal(document.Subject.Digest, json.RootElement.GetProperty("imageDigest").GetString()); + Assert.Equal(document.Artifact.Digest, json.RootElement.GetProperty("sbomDigest").GetString()); + Assert.Equal(document.Provenance.ExpectedDsseSha256, json.RootElement.GetProperty("expectedDsseSha256").GetString()); + } + + [Fact] + public async Task SendPlaceholderAsync_ThrowsOnFailure() + { + var handler = new RecordingHandler(new HttpResponseMessage(HttpStatusCode.BadRequest) + { + Content = new StringContent("invalid") + }); + using var httpClient = new HttpClient(handler); + var client = new AttestorClient(httpClient); + + var document = BuildDescriptorDocument(); + var attestorUri = new Uri("https://attestor.example.com/api/v1/provenance"); + + await Assert.ThrowsAsync(() => client.SendPlaceholderAsync(attestorUri, document, CancellationToken.None)); + } + + private static DescriptorDocument BuildDescriptorDocument() + { + var subject = new DescriptorSubject("application/vnd.oci.image.manifest.v1+json", "sha256:img"); + var artifact = new DescriptorArtifact("application/vnd.cyclonedx+json", "sha256:sbom", 42, new System.Collections.Generic.Dictionary()); + var provenance = new DescriptorProvenance("pending", "sha256:dsse", "nonce", "https://attestor.example.com/api/v1/provenance", "https://slsa.dev/provenance/v1"); + var generatorMetadata = new DescriptorGeneratorMetadata("generator", "1.0.0"); + var metadata = new System.Collections.Generic.Dictionary(); + return new DescriptorDocument("schema", DateTimeOffset.UtcNow, generatorMetadata, subject, artifact, provenance, metadata); + } + + private sealed class RecordingHandler : HttpMessageHandler + { + private readonly HttpResponseMessage response; + + public RecordingHandler(HttpResponseMessage response) + { + this.response = response; + } + + public HttpRequestMessage? CapturedRequest { get; private set; } + + protected override Task SendAsync(HttpRequestMessage request, CancellationToken cancellationToken) + { + CapturedRequest = request; + return Task.FromResult(response); + } + } +} diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/Cas/LocalCasClientTests.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/Cas/LocalCasClientTests.cs new file mode 100644 index 00000000..e38584ed --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/Cas/LocalCasClientTests.cs @@ -0,0 +1,34 @@ +using System.IO; +using System.Security.Cryptography; +using System.Threading; +using System.Threading.Tasks; +using StellaOps.Scanner.Sbomer.BuildXPlugin.Cas; +using StellaOps.Scanner.Sbomer.BuildXPlugin.Tests.TestUtilities; +using Xunit; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Tests.Cas; + +public sealed class LocalCasClientTests +{ + [Fact] + public async Task VerifyWriteAsync_WritesProbeObject() + { + await using var temp = new TempDirectory(); + var client = new LocalCasClient(new LocalCasOptions + { + RootDirectory = temp.Path, + Algorithm = "sha256" + }); + + var result = await client.VerifyWriteAsync(CancellationToken.None); + + Assert.Equal("sha256", result.Algorithm); + Assert.True(File.Exists(result.Path)); + + var bytes = await File.ReadAllBytesAsync(result.Path); + Assert.Equal("stellaops-buildx-probe"u8.ToArray(), bytes); + + var expectedDigest = Convert.ToHexString(SHA256.HashData(bytes)).ToLowerInvariant(); + Assert.Equal(expectedDigest, result.Digest); + } +} diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/Descriptor/DescriptorGeneratorTests.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/Descriptor/DescriptorGeneratorTests.cs new file mode 100644 index 00000000..785c8a05 --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/Descriptor/DescriptorGeneratorTests.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using System.Globalization; +using System.IO; +using System.Security.Cryptography; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Time.Testing; +using StellaOps.Scanner.Sbomer.BuildXPlugin.Descriptor; +using StellaOps.Scanner.Sbomer.BuildXPlugin.Tests.TestUtilities; +using Xunit; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Tests.Descriptor; + +public sealed class DescriptorGeneratorTests +{ + [Fact] + public async Task CreateAsync_BuildsDeterministicDescriptor() + { + await using var temp = new TempDirectory(); + var sbomPath = Path.Combine(temp.Path, "sample.cdx.json"); + await File.WriteAllTextAsync(sbomPath, "{\"bomFormat\":\"CycloneDX\",\"specVersion\":\"1.5\"}"); + + var fakeTime = new FakeTimeProvider(new DateTimeOffset(2025, 10, 18, 12, 0, 0, TimeSpan.Zero)); + var generator = new DescriptorGenerator(fakeTime); + + var request = new DescriptorRequest + { + ImageDigest = "sha256:0123456789abcdef", + SbomPath = sbomPath, + SbomMediaType = "application/vnd.cyclonedx+json", + SbomFormat = "cyclonedx-json", + SbomKind = "inventory", + SbomArtifactType = "application/vnd.stellaops.sbom.layer+json", + SubjectMediaType = "application/vnd.oci.image.manifest.v1+json", + GeneratorVersion = "1.2.3", + GeneratorName = "StellaOps.Scanner.Sbomer.BuildXPlugin", + LicenseId = "lic-123", + SbomName = "sample.cdx.json", + Repository = "git.stella-ops.org/stellaops", + BuildRef = "refs/heads/main", + AttestorUri = "https://attestor.local/api/v1/provenance" + }.Validate(); + + var document = await generator.CreateAsync(request, CancellationToken.None); + + Assert.Equal(DescriptorGenerator.Schema, document.Schema); + Assert.Equal(fakeTime.GetUtcNow(), document.GeneratedAt); + Assert.Equal(request.ImageDigest, document.Subject.Digest); + Assert.Equal(request.SbomMediaType, document.Artifact.MediaType); + Assert.Equal(request.SbomName, document.Artifact.Annotations["org.opencontainers.image.title"]); + Assert.Equal("pending", document.Provenance.Status); + Assert.Equal(request.AttestorUri, document.Provenance.AttestorUri); + Assert.Equal(request.PredicateType, document.Provenance.PredicateType); + + var expectedSbomDigest = ComputeSha256File(sbomPath); + Assert.Equal(expectedSbomDigest, document.Artifact.Digest); + Assert.Equal(expectedSbomDigest, document.Metadata["sbomDigest"]); + + var expectedDsse = ComputeExpectedDsse(request.ImageDigest, expectedSbomDigest, document.Provenance.Nonce); + Assert.Equal(expectedDsse, document.Provenance.ExpectedDsseSha256); + Assert.Equal(expectedDsse, document.Artifact.Annotations["org.stellaops.provenance.dsse.sha256"]); + } + + private static string ComputeSha256File(string path) + { + using var stream = File.OpenRead(path); + var hash = SHA256.HashData(stream); + return $"sha256:{Convert.ToHexString(hash).ToLower(CultureInfo.InvariantCulture)}"; + } + + private static string ComputeExpectedDsse(string imageDigest, string sbomDigest, string nonce) + { + var payload = $"{imageDigest}\n{sbomDigest}\n{nonce}"; + Span hash = stackalloc byte[32]; + SHA256.HashData(Encoding.UTF8.GetBytes(payload), hash); + return $"sha256:{Convert.ToHexString(hash).ToLower(CultureInfo.InvariantCulture)}"; + } +} diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/Manifest/BuildxPluginManifestLoaderTests.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/Manifest/BuildxPluginManifestLoaderTests.cs new file mode 100644 index 00000000..11528e88 --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/Manifest/BuildxPluginManifestLoaderTests.cs @@ -0,0 +1,80 @@ +using System.IO; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; +using StellaOps.Scanner.Sbomer.BuildXPlugin; +using StellaOps.Scanner.Sbomer.BuildXPlugin.Manifest; +using StellaOps.Scanner.Sbomer.BuildXPlugin.Tests.TestUtilities; +using Xunit; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Tests.Manifest; + +public sealed class BuildxPluginManifestLoaderTests +{ + private static readonly JsonSerializerOptions SerializerOptions = new(JsonSerializerDefaults.Web) + { + WriteIndented = true + }; + + [Fact] + public async Task LoadAsync_ReturnsManifestWithSourceInformation() + { + await using var temp = new TempDirectory(); + var manifestPath = System.IO.Path.Combine(temp.Path, "stellaops.manifest.json"); + await File.WriteAllTextAsync(manifestPath, BuildSampleManifestJson("stellaops.sbom-indexer")); + + var loader = new BuildxPluginManifestLoader(temp.Path); + var manifests = await loader.LoadAsync(CancellationToken.None); + + var manifest = Assert.Single(manifests); + Assert.Equal("stellaops.sbom-indexer", manifest.Id); + Assert.Equal("0.1.0", manifest.Version); + Assert.Equal(manifestPath, manifest.SourcePath); + Assert.Equal(Path.GetDirectoryName(manifestPath), manifest.SourceDirectory); + } + + [Fact] + public async Task LoadDefaultAsync_ThrowsWhenNoManifests() + { + await using var temp = new TempDirectory(); + var loader = new BuildxPluginManifestLoader(temp.Path); + + await Assert.ThrowsAsync(() => loader.LoadDefaultAsync(CancellationToken.None)); + } + + [Fact] + public async Task LoadAsync_ThrowsWhenRestartRequiredMissing() + { + await using var temp = new TempDirectory(); + var manifestPath = Path.Combine(temp.Path, "failure.manifest.json"); + await File.WriteAllTextAsync(manifestPath, BuildSampleManifestJson("stellaops.failure", requiresRestart: false)); + + var loader = new BuildxPluginManifestLoader(temp.Path); + + await Assert.ThrowsAsync(() => loader.LoadAsync(CancellationToken.None)); + } + + private static string BuildSampleManifestJson(string id, bool requiresRestart = true) + { + var manifest = new BuildxPluginManifest + { + SchemaVersion = BuildxPluginManifest.CurrentSchemaVersion, + Id = id, + DisplayName = "Sample", + Version = "0.1.0", + RequiresRestart = requiresRestart, + EntryPoint = new BuildxPluginEntryPoint + { + Type = "dotnet", + Executable = "StellaOps.Scanner.Sbomer.BuildXPlugin.dll" + }, + Cas = new BuildxPluginCas + { + Protocol = "filesystem", + DefaultRoot = "cas" + } + }; + + return JsonSerializer.Serialize(manifest, SerializerOptions); + } +} diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests.csproj b/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests.csproj new file mode 100644 index 00000000..c250c620 --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests.csproj @@ -0,0 +1,11 @@ + + + net10.0 + enable + enable + + + + + + diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/TestUtilities/TempDirectory.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/TestUtilities/TempDirectory.cs new file mode 100644 index 00000000..f91445f1 --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/TestUtilities/TempDirectory.cs @@ -0,0 +1,44 @@ +using System; +using System.IO; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Tests.TestUtilities; + +internal sealed class TempDirectory : IDisposable, IAsyncDisposable +{ + public string Path { get; } + + public TempDirectory() + { + Path = System.IO.Path.Combine(System.IO.Path.GetTempPath(), $"stellaops-buildx-{Guid.NewGuid():N}"); + Directory.CreateDirectory(Path); + } + + public void Dispose() + { + Cleanup(); + GC.SuppressFinalize(this); + } + + public ValueTask DisposeAsync() + { + Cleanup(); + GC.SuppressFinalize(this); + return ValueTask.CompletedTask; + } + + private void Cleanup() + { + try + { + if (Directory.Exists(Path)) + { + Directory.Delete(Path, recursive: true); + } + } + catch + { + // Best effort cleanup only. + } + } +} diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Attestation/AttestorClient.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Attestation/AttestorClient.cs new file mode 100644 index 00000000..adb07244 --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Attestation/AttestorClient.cs @@ -0,0 +1,49 @@ +using System; +using System.Net.Http; +using System.Net.Http.Json; +using System.Threading; +using System.Threading.Tasks; +using StellaOps.Scanner.Sbomer.BuildXPlugin.Descriptor; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Attestation; + +/// +/// Sends provenance placeholders to the Attestor service for asynchronous DSSE signing. +/// +public sealed class AttestorClient +{ + private readonly HttpClient httpClient; + + public AttestorClient(HttpClient httpClient) + { + this.httpClient = httpClient ?? throw new ArgumentNullException(nameof(httpClient)); + } + + public async Task SendPlaceholderAsync(Uri attestorUri, DescriptorDocument document, CancellationToken cancellationToken) + { + if (attestorUri is null) + { + throw new ArgumentNullException(nameof(attestorUri)); + } + + if (document is null) + { + throw new ArgumentNullException(nameof(document)); + } + + var payload = new AttestorProvenanceRequest( + ImageDigest: document.Subject.Digest, + SbomDigest: document.Artifact.Digest, + ExpectedDsseSha256: document.Provenance.ExpectedDsseSha256, + Nonce: document.Provenance.Nonce, + PredicateType: document.Provenance.PredicateType, + Schema: document.Schema); + + using var response = await httpClient.PostAsJsonAsync(attestorUri, payload, cancellationToken).ConfigureAwait(false); + if (!response.IsSuccessStatusCode) + { + var body = await response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + throw new BuildxPluginException($"Attestor rejected provenance placeholder ({(int)response.StatusCode}): {body}"); + } + } +} diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Attestation/AttestorProvenanceRequest.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Attestation/AttestorProvenanceRequest.cs new file mode 100644 index 00000000..baa32bce --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Attestation/AttestorProvenanceRequest.cs @@ -0,0 +1,11 @@ +using System.Text.Json.Serialization; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Attestation; + +public sealed record AttestorProvenanceRequest( + [property: JsonPropertyName("imageDigest")] string ImageDigest, + [property: JsonPropertyName("sbomDigest")] string SbomDigest, + [property: JsonPropertyName("expectedDsseSha256")] string ExpectedDsseSha256, + [property: JsonPropertyName("nonce")] string Nonce, + [property: JsonPropertyName("predicateType")] string PredicateType, + [property: JsonPropertyName("schema")] string Schema); diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/BuildxPluginException.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/BuildxPluginException.cs new file mode 100644 index 00000000..8af6d81e --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/BuildxPluginException.cs @@ -0,0 +1,19 @@ +using System; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin; + +/// +/// Represents user-facing errors raised by the BuildX plug-in. +/// +public sealed class BuildxPluginException : Exception +{ + public BuildxPluginException(string message) + : base(message) + { + } + + public BuildxPluginException(string message, Exception innerException) + : base(message, innerException) + { + } +} diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Cas/CasWriteResult.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Cas/CasWriteResult.cs new file mode 100644 index 00000000..12bcf67a --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Cas/CasWriteResult.cs @@ -0,0 +1,6 @@ +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Cas; + +/// +/// Result of persisting bytes into the local CAS. +/// +public sealed record CasWriteResult(string Algorithm, string Digest, string Path); diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Cas/LocalCasClient.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Cas/LocalCasClient.cs new file mode 100644 index 00000000..dd0058fc --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Cas/LocalCasClient.cs @@ -0,0 +1,74 @@ +using System; +using System.IO; +using System.Security.Cryptography; +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Cas; + +/// +/// Minimal filesystem-backed CAS used when the BuildX generator runs inside CI. +/// +public sealed class LocalCasClient +{ + private readonly string rootDirectory; + private readonly string algorithm; + + public LocalCasClient(LocalCasOptions options) + { + if (options is null) + { + throw new ArgumentNullException(nameof(options)); + } + + algorithm = options.Algorithm.ToLowerInvariant(); + if (!string.Equals(algorithm, "sha256", StringComparison.OrdinalIgnoreCase)) + { + throw new ArgumentException("Only the sha256 algorithm is supported.", nameof(options)); + } + + rootDirectory = Path.GetFullPath(options.RootDirectory); + } + + public Task VerifyWriteAsync(CancellationToken cancellationToken) + { + ReadOnlyMemory probe = "stellaops-buildx-probe"u8.ToArray(); + return WriteAsync(probe, cancellationToken); + } + + public async Task WriteAsync(ReadOnlyMemory content, CancellationToken cancellationToken) + { + var digest = ComputeDigest(content.Span); + var path = BuildObjectPath(digest); + + Directory.CreateDirectory(Path.GetDirectoryName(path)!); + + await using var stream = new FileStream( + path, + FileMode.Create, + FileAccess.Write, + FileShare.Read, + bufferSize: 16 * 1024, + FileOptions.Asynchronous | FileOptions.SequentialScan); + + await stream.WriteAsync(content, cancellationToken).ConfigureAwait(false); + await stream.FlushAsync(cancellationToken).ConfigureAwait(false); + + return new CasWriteResult(algorithm, digest, path); + } + + private string BuildObjectPath(string digest) + { + // Layout: ///.bin + var prefix = digest.Substring(0, 2); + var suffix = digest[2..]; + return Path.Combine(rootDirectory, algorithm, prefix, $"{suffix}.bin"); + } + + private static string ComputeDigest(ReadOnlySpan content) + { + Span buffer = stackalloc byte[32]; + SHA256.HashData(content, buffer); + return Convert.ToHexString(buffer).ToLowerInvariant(); + } +} diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Cas/LocalCasOptions.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Cas/LocalCasOptions.cs new file mode 100644 index 00000000..af681cf0 --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Cas/LocalCasOptions.cs @@ -0,0 +1,40 @@ +using System; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Cas; + +/// +/// Configuration for the on-disk content-addressable store used during CI. +/// +public sealed record LocalCasOptions +{ + private string rootDirectory = string.Empty; + private string algorithm = "sha256"; + + public string RootDirectory + { + get => rootDirectory; + init + { + if (string.IsNullOrWhiteSpace(value)) + { + throw new ArgumentException("Root directory must be provided.", nameof(value)); + } + + rootDirectory = value; + } + } + + public string Algorithm + { + get => algorithm; + init + { + if (string.IsNullOrWhiteSpace(value)) + { + throw new ArgumentException("Algorithm must be provided.", nameof(value)); + } + + algorithm = value; + } + } +} diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorArtifact.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorArtifact.cs new file mode 100644 index 00000000..53879558 --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorArtifact.cs @@ -0,0 +1,13 @@ +using System.Collections.Generic; +using System.Text.Json.Serialization; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Descriptor; + +/// +/// Represents an OCI artifact descriptor emitted by the BuildX generator. +/// +public sealed record DescriptorArtifact( + [property: JsonPropertyName("mediaType")] string MediaType, + [property: JsonPropertyName("digest")] string Digest, + [property: JsonPropertyName("size")] long Size, + [property: JsonPropertyName("annotations")] IReadOnlyDictionary Annotations); diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorDocument.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorDocument.cs new file mode 100644 index 00000000..4c07288b --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorDocument.cs @@ -0,0 +1,17 @@ +using System; +using System.Collections.Generic; +using System.Text.Json.Serialization; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Descriptor; + +/// +/// Root payload describing BuildX generator output with provenance placeholders. +/// +public sealed record DescriptorDocument( + [property: JsonPropertyName("schema")] string Schema, + [property: JsonPropertyName("generatedAt")] DateTimeOffset GeneratedAt, + [property: JsonPropertyName("generator")] DescriptorGeneratorMetadata Generator, + [property: JsonPropertyName("subject")] DescriptorSubject Subject, + [property: JsonPropertyName("artifact")] DescriptorArtifact Artifact, + [property: JsonPropertyName("provenance")] DescriptorProvenance Provenance, + [property: JsonPropertyName("metadata")] IReadOnlyDictionary Metadata); diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorGenerator.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorGenerator.cs new file mode 100644 index 00000000..de88c7be --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorGenerator.cs @@ -0,0 +1,180 @@ +using System; +using System.Collections.Generic; +using System.Globalization; +using System.IO; +using System.Security.Cryptography; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Descriptor; + +/// +/// Builds immutable OCI descriptors enriched with provenance placeholders. +/// +public sealed class DescriptorGenerator +{ + public const string Schema = "stellaops.buildx.descriptor.v1"; + + private readonly TimeProvider timeProvider; + + public DescriptorGenerator(TimeProvider timeProvider) + { + timeProvider ??= TimeProvider.System; + this.timeProvider = timeProvider; + } + + public async Task CreateAsync(DescriptorRequest request, CancellationToken cancellationToken) + { + if (request is null) + { + throw new ArgumentNullException(nameof(request)); + } + + if (string.IsNullOrWhiteSpace(request.ImageDigest)) + { + throw new BuildxPluginException("Image digest must be provided."); + } + + if (string.IsNullOrWhiteSpace(request.SbomPath)) + { + throw new BuildxPluginException("SBOM path must be provided."); + } + + var sbomFile = new FileInfo(request.SbomPath); + if (!sbomFile.Exists) + { + throw new BuildxPluginException($"SBOM file '{request.SbomPath}' was not found."); + } + + var sbomDigest = await ComputeFileDigestAsync(sbomFile, cancellationToken).ConfigureAwait(false); + + var nonce = Guid.NewGuid().ToString("N", CultureInfo.InvariantCulture); + var expectedDsseSha = ComputeExpectedDsseDigest(request.ImageDigest, sbomDigest, nonce); + + var artifactAnnotations = BuildArtifactAnnotations(request, nonce, expectedDsseSha); + + var subject = new DescriptorSubject( + MediaType: request.SubjectMediaType, + Digest: request.ImageDigest); + + var artifact = new DescriptorArtifact( + MediaType: request.SbomMediaType, + Digest: sbomDigest, + Size: sbomFile.Length, + Annotations: artifactAnnotations); + + var provenance = new DescriptorProvenance( + Status: "pending", + ExpectedDsseSha256: expectedDsseSha, + Nonce: nonce, + AttestorUri: request.AttestorUri, + PredicateType: request.PredicateType); + + var generatorMetadata = new DescriptorGeneratorMetadata( + Name: request.GeneratorName ?? "StellaOps.Scanner.Sbomer.BuildXPlugin", + Version: request.GeneratorVersion); + + var metadata = BuildDocumentMetadata(request, sbomFile, sbomDigest); + + return new DescriptorDocument( + Schema: Schema, + GeneratedAt: timeProvider.GetUtcNow(), + Generator: generatorMetadata, + Subject: subject, + Artifact: artifact, + Provenance: provenance, + Metadata: metadata); + } + + private static async Task ComputeFileDigestAsync(FileInfo file, CancellationToken cancellationToken) + { + await using var stream = new FileStream( + file.FullName, + FileMode.Open, + FileAccess.Read, + FileShare.Read, + bufferSize: 128 * 1024, + FileOptions.Asynchronous | FileOptions.SequentialScan); + + using var hash = IncrementalHash.CreateHash(HashAlgorithmName.SHA256); + + var buffer = new byte[128 * 1024]; + int bytesRead; + while ((bytesRead = await stream.ReadAsync(buffer.AsMemory(0, buffer.Length), cancellationToken).ConfigureAwait(false)) > 0) + { + hash.AppendData(buffer, 0, bytesRead); + } + + var digest = hash.GetHashAndReset(); + return $"sha256:{Convert.ToHexString(digest).ToLowerInvariant()}"; + } + + private static string ComputeExpectedDsseDigest(string imageDigest, string sbomDigest, string nonce) + { + var payload = $"{imageDigest}\n{sbomDigest}\n{nonce}"; + var bytes = System.Text.Encoding.UTF8.GetBytes(payload); + Span hash = stackalloc byte[32]; + SHA256.HashData(bytes, hash); + return $"sha256:{Convert.ToHexString(hash).ToLowerInvariant()}"; + } + + private static IReadOnlyDictionary BuildArtifactAnnotations(DescriptorRequest request, string nonce, string expectedDsse) + { + var annotations = new Dictionary(StringComparer.OrdinalIgnoreCase) + { + ["org.opencontainers.artifact.type"] = request.SbomArtifactType, + ["org.stellaops.scanner.version"] = request.GeneratorVersion, + ["org.stellaops.sbom.kind"] = request.SbomKind, + ["org.stellaops.sbom.format"] = request.SbomFormat, + ["org.stellaops.provenance.status"] = "pending", + ["org.stellaops.provenance.dsse.sha256"] = expectedDsse, + ["org.stellaops.provenance.nonce"] = nonce + }; + + if (!string.IsNullOrWhiteSpace(request.LicenseId)) + { + annotations["org.stellaops.license.id"] = request.LicenseId!; + } + + if (!string.IsNullOrWhiteSpace(request.SbomName)) + { + annotations["org.opencontainers.image.title"] = request.SbomName!; + } + + if (!string.IsNullOrWhiteSpace(request.Repository)) + { + annotations["org.stellaops.repository"] = request.Repository!; + } + + return annotations; + } + + private static IReadOnlyDictionary BuildDocumentMetadata(DescriptorRequest request, FileInfo fileInfo, string sbomDigest) + { + var metadata = new Dictionary(StringComparer.OrdinalIgnoreCase) + { + ["sbomDigest"] = sbomDigest, + ["sbomPath"] = fileInfo.FullName, + ["sbomMediaType"] = request.SbomMediaType, + ["subjectMediaType"] = request.SubjectMediaType + }; + + if (!string.IsNullOrWhiteSpace(request.Repository)) + { + metadata["repository"] = request.Repository!; + } + + if (!string.IsNullOrWhiteSpace(request.BuildRef)) + { + metadata["buildRef"] = request.BuildRef!; + } + + if (!string.IsNullOrWhiteSpace(request.AttestorUri)) + { + metadata["attestorUri"] = request.AttestorUri!; + } + + return metadata; + } +} diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorGeneratorMetadata.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorGeneratorMetadata.cs new file mode 100644 index 00000000..074921eb --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorGeneratorMetadata.cs @@ -0,0 +1,7 @@ +using System.Text.Json.Serialization; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Descriptor; + +public sealed record DescriptorGeneratorMetadata( + [property: JsonPropertyName("name")] string Name, + [property: JsonPropertyName("version")] string Version); diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorProvenance.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorProvenance.cs new file mode 100644 index 00000000..12534cfd --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorProvenance.cs @@ -0,0 +1,13 @@ +using System.Text.Json.Serialization; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Descriptor; + +/// +/// Provenance placeholders that the Attestor will fulfil post-build. +/// +public sealed record DescriptorProvenance( + [property: JsonPropertyName("status")] string Status, + [property: JsonPropertyName("expectedDsseSha256")] string ExpectedDsseSha256, + [property: JsonPropertyName("nonce")] string Nonce, + [property: JsonPropertyName("attestorUri")] string? AttestorUri, + [property: JsonPropertyName("predicateType")] string PredicateType); diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorRequest.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorRequest.cs new file mode 100644 index 00000000..3fac81b6 --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorRequest.cs @@ -0,0 +1,45 @@ +using System; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Descriptor; + +/// +/// Request for generating BuildX descriptor artifacts. +/// +public sealed record DescriptorRequest +{ + public string ImageDigest { get; init; } = string.Empty; + public string SbomPath { get; init; } = string.Empty; + public string SbomMediaType { get; init; } = "application/vnd.cyclonedx+json"; + public string SbomFormat { get; init; } = "cyclonedx-json"; + public string SbomArtifactType { get; init; } = "application/vnd.stellaops.sbom.layer+json"; + public string SbomKind { get; init; } = "inventory"; + public string SubjectMediaType { get; init; } = "application/vnd.oci.image.manifest.v1+json"; + public string GeneratorVersion { get; init; } = "0.0.0"; + public string? GeneratorName { get; init; } + public string? LicenseId { get; init; } + public string? SbomName { get; init; } + public string? Repository { get; init; } + public string? BuildRef { get; init; } + public string? AttestorUri { get; init; } + public string PredicateType { get; init; } = "https://slsa.dev/provenance/v1"; + + public DescriptorRequest Validate() + { + if (string.IsNullOrWhiteSpace(ImageDigest)) + { + throw new BuildxPluginException("Image digest is required."); + } + + if (!ImageDigest.Contains(':', StringComparison.Ordinal)) + { + throw new BuildxPluginException("Image digest must include the algorithm prefix, e.g. 'sha256:...'."); + } + + if (string.IsNullOrWhiteSpace(SbomPath)) + { + throw new BuildxPluginException("SBOM path is required."); + } + + return this; + } +} diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorSubject.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorSubject.cs new file mode 100644 index 00000000..02d3f6c8 --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Descriptor/DescriptorSubject.cs @@ -0,0 +1,7 @@ +using System.Text.Json.Serialization; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Descriptor; + +public sealed record DescriptorSubject( + [property: JsonPropertyName("mediaType")] string MediaType, + [property: JsonPropertyName("digest")] string Digest); diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Manifest/BuildxPluginCas.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Manifest/BuildxPluginCas.cs new file mode 100644 index 00000000..5987d168 --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Manifest/BuildxPluginCas.cs @@ -0,0 +1,18 @@ +using System.Text.Json.Serialization; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Manifest; + +/// +/// Describes default Content Addressable Storage configuration for the plug-in. +/// +public sealed record BuildxPluginCas +{ + [JsonPropertyName("protocol")] + public string Protocol { get; init; } = "filesystem"; + + [JsonPropertyName("defaultRoot")] + public string DefaultRoot { get; init; } = "cas"; + + [JsonPropertyName("compression")] + public string Compression { get; init; } = "zstd"; +} diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Manifest/BuildxPluginEntryPoint.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Manifest/BuildxPluginEntryPoint.cs new file mode 100644 index 00000000..93f95ee0 --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Manifest/BuildxPluginEntryPoint.cs @@ -0,0 +1,20 @@ +using System; +using System.Collections.Generic; +using System.Text.Json.Serialization; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Manifest; + +/// +/// Describes how the buildx plug-in executable should be invoked. +/// +public sealed record BuildxPluginEntryPoint +{ + [JsonPropertyName("type")] + public string Type { get; init; } = "dotnet"; + + [JsonPropertyName("executable")] + public string Executable { get; init; } = string.Empty; + + [JsonPropertyName("arguments")] + public IReadOnlyList Arguments { get; init; } = Array.Empty(); +} diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Manifest/BuildxPluginImage.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Manifest/BuildxPluginImage.cs new file mode 100644 index 00000000..acd0754c --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Manifest/BuildxPluginImage.cs @@ -0,0 +1,20 @@ +using System; +using System.Collections.Generic; +using System.Text.Json.Serialization; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Manifest; + +/// +/// Provides distribution information for the container image form-factor. +/// +public sealed record BuildxPluginImage +{ + [JsonPropertyName("name")] + public string Name { get; init; } = string.Empty; + + [JsonPropertyName("digest")] + public string? Digest { get; init; } + + [JsonPropertyName("platforms")] + public IReadOnlyList Platforms { get; init; } = Array.Empty(); +} diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Manifest/BuildxPluginManifest.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Manifest/BuildxPluginManifest.cs new file mode 100644 index 00000000..8883291e --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Manifest/BuildxPluginManifest.cs @@ -0,0 +1,49 @@ +using System; +using System.Collections.Generic; +using System.Text.Json.Serialization; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Manifest; + +/// +/// Canonical manifest describing a buildx generator plug-in. +/// +public sealed record BuildxPluginManifest +{ + public const string CurrentSchemaVersion = "1.0"; + + [JsonPropertyName("schemaVersion")] + public string SchemaVersion { get; init; } = CurrentSchemaVersion; + + [JsonPropertyName("id")] + public string Id { get; init; } = string.Empty; + + [JsonPropertyName("displayName")] + public string DisplayName { get; init; } = string.Empty; + + [JsonPropertyName("version")] + public string Version { get; init; } = string.Empty; + + [JsonPropertyName("entryPoint")] + public BuildxPluginEntryPoint EntryPoint { get; init; } = new(); + + [JsonPropertyName("requiresRestart")] + public bool RequiresRestart { get; init; } = true; + + [JsonPropertyName("capabilities")] + public IReadOnlyList Capabilities { get; init; } = Array.Empty(); + + [JsonPropertyName("cas")] + public BuildxPluginCas Cas { get; init; } = new(); + + [JsonPropertyName("image")] + public BuildxPluginImage? Image { get; init; } + + [JsonPropertyName("metadata")] + public IReadOnlyDictionary? Metadata { get; init; } + + [JsonIgnore] + public string? SourcePath { get; init; } + + [JsonIgnore] + public string? SourceDirectory { get; init; } +} diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Manifest/BuildxPluginManifestLoader.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Manifest/BuildxPluginManifestLoader.cs new file mode 100644 index 00000000..3bbbaa39 --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Manifest/BuildxPluginManifestLoader.cs @@ -0,0 +1,189 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin.Manifest; + +/// +/// Loads buildx plug-in manifests from the restart-time plug-in directory. +/// +public sealed class BuildxPluginManifestLoader +{ + public const string DefaultSearchPattern = "*.manifest.json"; + + private static readonly JsonSerializerOptions SerializerOptions = new(JsonSerializerDefaults.Web) + { + AllowTrailingCommas = true, + ReadCommentHandling = JsonCommentHandling.Skip, + PropertyNameCaseInsensitive = true + }; + + private readonly string manifestDirectory; + private readonly string searchPattern; + + public BuildxPluginManifestLoader(string manifestDirectory, string? searchPattern = null) + { + if (string.IsNullOrWhiteSpace(manifestDirectory)) + { + throw new ArgumentException("Manifest directory is required.", nameof(manifestDirectory)); + } + + this.manifestDirectory = Path.GetFullPath(manifestDirectory); + this.searchPattern = string.IsNullOrWhiteSpace(searchPattern) + ? DefaultSearchPattern + : searchPattern; + } + + /// + /// Loads all manifests in the configured directory. + /// + public async Task> LoadAsync(CancellationToken cancellationToken) + { + if (!Directory.Exists(manifestDirectory)) + { + return Array.Empty(); + } + + var manifests = new List(); + + foreach (var file in Directory.EnumerateFiles(manifestDirectory, searchPattern, SearchOption.TopDirectoryOnly)) + { + if (IsHiddenPath(file)) + { + continue; + } + + var manifest = await DeserializeManifestAsync(file, cancellationToken).ConfigureAwait(false); + manifests.Add(manifest); + } + + return manifests + .OrderBy(static m => m.Id, StringComparer.OrdinalIgnoreCase) + .ThenBy(static m => m.Version, StringComparer.OrdinalIgnoreCase) + .ToArray(); + } + + /// + /// Loads the manifest with the specified identifier. + /// + public async Task LoadByIdAsync(string manifestId, CancellationToken cancellationToken) + { + if (string.IsNullOrWhiteSpace(manifestId)) + { + throw new ArgumentException("Manifest identifier is required.", nameof(manifestId)); + } + + var manifests = await LoadAsync(cancellationToken).ConfigureAwait(false); + var manifest = manifests.FirstOrDefault(m => string.Equals(m.Id, manifestId, StringComparison.OrdinalIgnoreCase)); + if (manifest is null) + { + throw new BuildxPluginException($"Buildx plug-in manifest '{manifestId}' was not found in '{manifestDirectory}'."); + } + + return manifest; + } + + /// + /// Loads the first available manifest. + /// + public async Task LoadDefaultAsync(CancellationToken cancellationToken) + { + var manifests = await LoadAsync(cancellationToken).ConfigureAwait(false); + if (manifests.Count == 0) + { + throw new BuildxPluginException($"No buildx plug-in manifests were discovered under '{manifestDirectory}'."); + } + + return manifests[0]; + } + + private static bool IsHiddenPath(string path) + { + var directory = Path.GetDirectoryName(path); + while (!string.IsNullOrEmpty(directory)) + { + var segment = Path.GetFileName(directory); + if (segment.StartsWith(".", StringComparison.Ordinal)) + { + return true; + } + + directory = Path.GetDirectoryName(directory); + } + + return false; + } + + private static async Task DeserializeManifestAsync(string file, CancellationToken cancellationToken) + { + await using var stream = new FileStream(file, FileMode.Open, FileAccess.Read, FileShare.Read, 4096, FileOptions.Asynchronous); + BuildxPluginManifest? manifest; + + try + { + manifest = await JsonSerializer.DeserializeAsync(stream, SerializerOptions, cancellationToken) + .ConfigureAwait(false); + } + catch (JsonException ex) + { + throw new BuildxPluginException($"Failed to parse manifest '{file}'.", ex); + } + + if (manifest is null) + { + throw new BuildxPluginException($"Manifest '{file}' is empty or invalid."); + } + + ValidateManifest(manifest, file); + + var directory = Path.GetDirectoryName(file); + return manifest with + { + SourcePath = file, + SourceDirectory = directory + }; + } + + private static void ValidateManifest(BuildxPluginManifest manifest, string file) + { + if (!string.Equals(manifest.SchemaVersion, BuildxPluginManifest.CurrentSchemaVersion, StringComparison.OrdinalIgnoreCase)) + { + throw new BuildxPluginException( + $"Manifest '{file}' uses unsupported schema version '{manifest.SchemaVersion}'. Expected '{BuildxPluginManifest.CurrentSchemaVersion}'."); + } + + if (string.IsNullOrWhiteSpace(manifest.Id)) + { + throw new BuildxPluginException($"Manifest '{file}' must specify a non-empty 'id'."); + } + + if (manifest.EntryPoint is null) + { + throw new BuildxPluginException($"Manifest '{file}' must specify an 'entryPoint'."); + } + + if (string.IsNullOrWhiteSpace(manifest.EntryPoint.Executable)) + { + throw new BuildxPluginException($"Manifest '{file}' must specify an executable entry point."); + } + + if (!manifest.RequiresRestart) + { + throw new BuildxPluginException($"Manifest '{file}' must enforce restart-required activation."); + } + + if (manifest.Cas is null) + { + throw new BuildxPluginException($"Manifest '{file}' must define CAS defaults."); + } + + if (string.IsNullOrWhiteSpace(manifest.Cas.DefaultRoot)) + { + throw new BuildxPluginException($"Manifest '{file}' must specify a CAS default root directory."); + } + } +} diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Program.cs b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Program.cs new file mode 100644 index 00000000..38efd2e5 --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/Program.cs @@ -0,0 +1,327 @@ +using System; +using System.IO; +using System.Linq; +using System.Reflection; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; +using System.Net.Http; +using System.Net.Http.Headers; +using System.Text.Json.Serialization; +using StellaOps.Scanner.Sbomer.BuildXPlugin.Attestation; +using StellaOps.Scanner.Sbomer.BuildXPlugin.Cas; +using StellaOps.Scanner.Sbomer.BuildXPlugin.Descriptor; +using StellaOps.Scanner.Sbomer.BuildXPlugin.Manifest; + +namespace StellaOps.Scanner.Sbomer.BuildXPlugin; + +internal static class Program +{ + private static readonly JsonSerializerOptions ManifestPrintOptions = new(JsonSerializerDefaults.Web) + { + WriteIndented = true, + DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull + }; + + private static readonly JsonSerializerOptions DescriptorJsonOptions = new(JsonSerializerDefaults.Web) + { + WriteIndented = true, + DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull + }; + + private static async Task Main(string[] args) + { + using var cancellation = new CancellationTokenSource(); + Console.CancelKeyPress += (_, eventArgs) => + { + eventArgs.Cancel = true; + cancellation.Cancel(); + }; + + var command = args.Length > 0 ? args[0].ToLowerInvariant() : "handshake"; + var commandArgs = args.Skip(1).ToArray(); + + try + { + return command switch + { + "handshake" => await RunHandshakeAsync(commandArgs, cancellation.Token).ConfigureAwait(false), + "manifest" => await RunManifestAsync(commandArgs, cancellation.Token).ConfigureAwait(false), + "descriptor" or "annotate" => await RunDescriptorAsync(commandArgs, cancellation.Token).ConfigureAwait(false), + "version" => RunVersion(), + "help" or "--help" or "-h" => PrintHelp(), + _ => UnknownCommand(command) + }; + } + catch (OperationCanceledException) + { + Console.Error.WriteLine("Operation cancelled."); + return 130; + } + catch (BuildxPluginException ex) + { + Console.Error.WriteLine(ex.Message); + return 2; + } + catch (Exception ex) + { + Console.Error.WriteLine($"Unhandled error: {ex}"); + return 1; + } + } + + private static async Task RunHandshakeAsync(string[] args, CancellationToken cancellationToken) + { + var manifestDirectory = ResolveManifestDirectory(args); + var loader = new BuildxPluginManifestLoader(manifestDirectory); + var manifest = await loader.LoadDefaultAsync(cancellationToken).ConfigureAwait(false); + + var casRoot = ResolveCasRoot(args, manifest); + var casClient = new LocalCasClient(new LocalCasOptions + { + RootDirectory = casRoot, + Algorithm = "sha256" + }); + + var result = await casClient.VerifyWriteAsync(cancellationToken).ConfigureAwait(false); + + Console.WriteLine($"handshake ok: {manifest.Id}@{manifest.Version} → {result.Algorithm}:{result.Digest}"); + Console.WriteLine(result.Path); + return 0; + } + + private static async Task RunManifestAsync(string[] args, CancellationToken cancellationToken) + { + var manifestDirectory = ResolveManifestDirectory(args); + var loader = new BuildxPluginManifestLoader(manifestDirectory); + var manifest = await loader.LoadDefaultAsync(cancellationToken).ConfigureAwait(false); + + var json = JsonSerializer.Serialize(manifest, ManifestPrintOptions); + Console.WriteLine(json); + return 0; + } + + private static int RunVersion() + { + var assembly = Assembly.GetExecutingAssembly(); + var version = assembly.GetCustomAttribute()?.InformationalVersion + ?? assembly.GetName().Version?.ToString() + ?? "unknown"; + Console.WriteLine(version); + return 0; + } + + private static int PrintHelp() + { + Console.WriteLine("StellaOps BuildX SBOM generator"); + Console.WriteLine("Usage:"); + Console.WriteLine(" stellaops-buildx [handshake|manifest|descriptor|version]"); + Console.WriteLine(); + Console.WriteLine("Commands:"); + Console.WriteLine(" handshake Probe the local CAS and ensure manifests are discoverable."); + Console.WriteLine(" manifest Print the resolved manifest JSON."); + Console.WriteLine(" descriptor Emit OCI descriptor + provenance placeholder for the provided SBOM."); + Console.WriteLine(" version Print the plug-in version."); + Console.WriteLine(); + Console.WriteLine("Options:"); + Console.WriteLine(" --manifest Override the manifest directory."); + Console.WriteLine(" --cas Override the CAS root directory."); + Console.WriteLine(" --image (descriptor) Image digest the SBOM belongs to."); + Console.WriteLine(" --sbom (descriptor) Path to the SBOM file to describe."); + Console.WriteLine(" --attestor (descriptor) Optional Attestor endpoint for provenance placeholders."); + Console.WriteLine(" --attestor-token Bearer token for Attestor requests (or STELLAOPS_ATTESTOR_TOKEN)."); + Console.WriteLine(" --attestor-insecure Skip TLS verification for Attestor requests (dev/test only)."); + return 0; + } + + private static int UnknownCommand(string command) + { + Console.Error.WriteLine($"Unknown command '{command}'. Use 'help' for usage."); + return 1; + } + + private static string ResolveManifestDirectory(string[] args) + { + var explicitPath = GetOption(args, "--manifest") + ?? Environment.GetEnvironmentVariable("STELLAOPS_BUILDX_MANIFEST_DIR"); + + if (!string.IsNullOrWhiteSpace(explicitPath)) + { + return Path.GetFullPath(explicitPath); + } + + var defaultDirectory = Path.Combine(AppContext.BaseDirectory, "plugins", "scanner", "buildx"); + if (Directory.Exists(defaultDirectory)) + { + return defaultDirectory; + } + + return AppContext.BaseDirectory; + } + + private static string ResolveCasRoot(string[] args, BuildxPluginManifest manifest) + { + var overrideValue = GetOption(args, "--cas") + ?? Environment.GetEnvironmentVariable("STELLAOPS_SCANNER_CAS_ROOT"); + + if (!string.IsNullOrWhiteSpace(overrideValue)) + { + return Path.GetFullPath(overrideValue); + } + + var manifestDefault = manifest.Cas.DefaultRoot; + if (!string.IsNullOrWhiteSpace(manifestDefault)) + { + if (Path.IsPathRooted(manifestDefault)) + { + return Path.GetFullPath(manifestDefault); + } + + var baseDirectory = manifest.SourceDirectory ?? AppContext.BaseDirectory; + return Path.GetFullPath(Path.Combine(baseDirectory, manifestDefault)); + } + + return Path.Combine(AppContext.BaseDirectory, "cas"); + } + + private static async Task RunDescriptorAsync(string[] args, CancellationToken cancellationToken) + { + var imageDigest = RequireOption(args, "--image"); + var sbomPath = RequireOption(args, "--sbom"); + + var sbomMediaType = GetOption(args, "--media-type") ?? "application/vnd.cyclonedx+json"; + var sbomFormat = GetOption(args, "--sbom-format") ?? "cyclonedx-json"; + var sbomKind = GetOption(args, "--sbom-kind") ?? "inventory"; + var artifactType = GetOption(args, "--artifact-type") ?? "application/vnd.stellaops.sbom.layer+json"; + var subjectMediaType = GetOption(args, "--subject-media-type") ?? "application/vnd.oci.image.manifest.v1+json"; + var predicateType = GetOption(args, "--predicate-type") ?? "https://slsa.dev/provenance/v1"; + var licenseId = GetOption(args, "--license-id") ?? Environment.GetEnvironmentVariable("STELLAOPS_LICENSE_ID"); + var repository = GetOption(args, "--repository"); + var buildRef = GetOption(args, "--build-ref"); + var sbomName = GetOption(args, "--sbom-name") ?? Path.GetFileName(sbomPath); + + var attestorUriText = GetOption(args, "--attestor") ?? Environment.GetEnvironmentVariable("STELLAOPS_ATTESTOR_URL"); + var attestorToken = GetOption(args, "--attestor-token") ?? Environment.GetEnvironmentVariable("STELLAOPS_ATTESTOR_TOKEN"); + var attestorInsecure = GetFlag(args, "--attestor-insecure") + || string.Equals(Environment.GetEnvironmentVariable("STELLAOPS_ATTESTOR_INSECURE"), "true", StringComparison.OrdinalIgnoreCase); + Uri? attestorUri = null; + if (!string.IsNullOrWhiteSpace(attestorUriText)) + { + attestorUri = new Uri(attestorUriText, UriKind.Absolute); + } + + var assembly = Assembly.GetExecutingAssembly(); + var version = assembly.GetCustomAttribute()?.InformationalVersion + ?? assembly.GetName().Version?.ToString() + ?? "0.0.0"; + + var request = new DescriptorRequest + { + ImageDigest = imageDigest, + SbomPath = sbomPath, + SbomMediaType = sbomMediaType, + SbomFormat = sbomFormat, + SbomKind = sbomKind, + SbomArtifactType = artifactType, + SubjectMediaType = subjectMediaType, + PredicateType = predicateType, + GeneratorVersion = version, + GeneratorName = assembly.GetName().Name, + LicenseId = licenseId, + SbomName = sbomName, + Repository = repository, + BuildRef = buildRef, + AttestorUri = attestorUri?.ToString() + }.Validate(); + + var generator = new DescriptorGenerator(TimeProvider.System); + var document = await generator.CreateAsync(request, cancellationToken).ConfigureAwait(false); + + if (attestorUri is not null) + { + using var httpClient = CreateAttestorHttpClient(attestorUri, attestorToken, attestorInsecure); + var attestorClient = new AttestorClient(httpClient); + await attestorClient.SendPlaceholderAsync(attestorUri, document, cancellationToken).ConfigureAwait(false); + } + + var json = JsonSerializer.Serialize(document, DescriptorJsonOptions); + Console.WriteLine(json); + return 0; + } + + private static string? GetOption(string[] args, string optionName) + { + for (var i = 0; i < args.Length; i++) + { + var argument = args[i]; + if (string.Equals(argument, optionName, StringComparison.OrdinalIgnoreCase)) + { + if (i + 1 >= args.Length) + { + throw new BuildxPluginException($"Option '{optionName}' requires a value."); + } + + return args[i + 1]; + } + + if (argument.StartsWith(optionName + "=", StringComparison.OrdinalIgnoreCase)) + { + return argument[(optionName.Length + 1)..]; + } + } + + return null; + } + + private static bool GetFlag(string[] args, string optionName) + { + foreach (var argument in args) + { + if (string.Equals(argument, optionName, StringComparison.OrdinalIgnoreCase)) + { + return true; + } + } + + return false; + } + + private static string RequireOption(string[] args, string optionName) + { + var value = GetOption(args, optionName); + if (string.IsNullOrWhiteSpace(value)) + { + throw new BuildxPluginException($"Option '{optionName}' is required."); + } + + return value; + } + + private static HttpClient CreateAttestorHttpClient(Uri attestorUri, string? bearerToken, bool insecure) + { + var handler = new HttpClientHandler + { + CheckCertificateRevocationList = true, + }; + + if (insecure && string.Equals(attestorUri.Scheme, Uri.UriSchemeHttps, StringComparison.OrdinalIgnoreCase)) + { +#pragma warning disable S4830 // Explicitly gated by --attestor-insecure flag/env for dev/test usage. + handler.ServerCertificateCustomValidationCallback = (_, _, _, _) => true; +#pragma warning restore S4830 + } + + var client = new HttpClient(handler, disposeHandler: true) + { + Timeout = TimeSpan.FromSeconds(30) + }; + client.DefaultRequestHeaders.Accept.Add(new MediaTypeWithQualityHeaderValue("application/json")); + + if (!string.IsNullOrWhiteSpace(bearerToken)) + { + client.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Bearer", bearerToken); + } + + return client; + } +} diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/StellaOps.Scanner.Sbomer.BuildXPlugin.csproj b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/StellaOps.Scanner.Sbomer.BuildXPlugin.csproj index 1094c46d..19fd8054 100644 --- a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/StellaOps.Scanner.Sbomer.BuildXPlugin.csproj +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/StellaOps.Scanner.Sbomer.BuildXPlugin.csproj @@ -4,5 +4,17 @@ enable enable Exe + StellaOps.Scanner.Sbomer.BuildXPlugin + StellaOps.Scanner.Sbomer.BuildXPlugin + 0.1.0-alpha + 0.1.0.0 + 0.1.0.0 + 0.1.0-alpha + + + + PreserveNewest + + diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/TASKS.md b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/TASKS.md index 8cd30819..58015509 100644 --- a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/TASKS.md +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/TASKS.md @@ -2,6 +2,6 @@ | ID | Status | Owner(s) | Depends on | Description | Exit Criteria | |----|--------|----------|------------|-------------|---------------| -| SP9-BLDX-09-001 | TODO | BuildX Guild | SCANNER-EMIT-10-601 (awareness) | Scaffold buildx driver, manifest, local CAS handshake; ensure plugin loads from `plugins/scanner/buildx/`. | Plugin manifest + loader tests; local CAS writes succeed; restart required to activate. | -| SP9-BLDX-09-002 | TODO | BuildX Guild | SP9-BLDX-09-001 | Emit OCI annotations + provenance metadata for Attestor handoff (image + SBOM). | OCI descriptors include DSSE/provenance placeholders; Attestor mock accepts payload. | -| SP9-BLDX-09-003 | TODO | BuildX Guild | SP9-BLDX-09-002 | CI demo pipeline: build sample image, produce SBOM, verify backend report wiring. | GitHub/CI job runs sample build within 5 s overhead; artifacts saved; documentation updated. | +| SP9-BLDX-09-001 | DONE | BuildX Guild | SCANNER-EMIT-10-601 (awareness) | Scaffold buildx driver, manifest, local CAS handshake; ensure plugin loads from `plugins/scanner/buildx/`. | Plugin manifest + loader tests; local CAS writes succeed; restart required to activate. | +| SP9-BLDX-09-002 | DONE | BuildX Guild | SP9-BLDX-09-001 | Emit OCI annotations + provenance metadata for Attestor handoff (image + SBOM). | OCI descriptors include DSSE/provenance placeholders; Attestor mock accepts payload. | +| SP9-BLDX-09-003 | DONE | BuildX Guild | SP9-BLDX-09-002 | CI demo pipeline: build sample image, produce SBOM, verify backend report wiring. | GitHub/CI job runs sample build within 5 s overhead; artifacts saved; documentation updated. | diff --git a/src/StellaOps.Scanner.Sbomer.BuildXPlugin/stellaops.sbom-indexer.manifest.json b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/stellaops.sbom-indexer.manifest.json new file mode 100644 index 00000000..cabadebd --- /dev/null +++ b/src/StellaOps.Scanner.Sbomer.BuildXPlugin/stellaops.sbom-indexer.manifest.json @@ -0,0 +1,35 @@ +{ + "schemaVersion": "1.0", + "id": "stellaops.sbom-indexer", + "displayName": "StellaOps SBOM BuildX Generator", + "version": "0.1.0-alpha", + "requiresRestart": true, + "entryPoint": { + "type": "dotnet", + "executable": "StellaOps.Scanner.Sbomer.BuildXPlugin.dll", + "arguments": [ + "handshake" + ] + }, + "capabilities": [ + "generator", + "sbom" + ], + "cas": { + "protocol": "filesystem", + "defaultRoot": "cas", + "compression": "zstd" + }, + "image": { + "name": "stellaops/sbom-indexer", + "digest": null, + "platforms": [ + "linux/amd64", + "linux/arm64" + ] + }, + "metadata": { + "org.stellaops.plugin.kind": "buildx-generator", + "org.stellaops.restart.required": "true" + } +} diff --git a/src/StellaOps.Scanner.Storage.Tests/InMemoryArtifactObjectStore.cs b/src/StellaOps.Scanner.Storage.Tests/InMemoryArtifactObjectStore.cs new file mode 100644 index 00000000..33bc37b2 --- /dev/null +++ b/src/StellaOps.Scanner.Storage.Tests/InMemoryArtifactObjectStore.cs @@ -0,0 +1,34 @@ +using System.Collections.Concurrent; +using StellaOps.Scanner.Storage.ObjectStore; + +namespace StellaOps.Scanner.Storage.Tests; + +internal sealed class InMemoryArtifactObjectStore : IArtifactObjectStore +{ + private readonly ConcurrentDictionary<(string Bucket, string Key), byte[]> _objects = new(); + + public IReadOnlyDictionary<(string Bucket, string Key), byte[]> Objects => _objects; + + public Task DeleteAsync(ArtifactObjectDescriptor descriptor, CancellationToken cancellationToken) + { + _objects.TryRemove((descriptor.Bucket, descriptor.Key), out _); + return Task.CompletedTask; + } + + public Task GetAsync(ArtifactObjectDescriptor descriptor, CancellationToken cancellationToken) + { + if (_objects.TryGetValue((descriptor.Bucket, descriptor.Key), out var bytes)) + { + return Task.FromResult(new MemoryStream(bytes, writable: false)); + } + + return Task.FromResult(null); + } + + public async Task PutAsync(ArtifactObjectDescriptor descriptor, Stream content, CancellationToken cancellationToken) + { + using var buffer = new MemoryStream(); + await content.CopyToAsync(buffer, cancellationToken).ConfigureAwait(false); + _objects[(descriptor.Bucket, descriptor.Key)] = buffer.ToArray(); + } +} diff --git a/src/StellaOps.Scanner.Storage.Tests/ScannerMongoFixture.cs b/src/StellaOps.Scanner.Storage.Tests/ScannerMongoFixture.cs new file mode 100644 index 00000000..0efefdc1 --- /dev/null +++ b/src/StellaOps.Scanner.Storage.Tests/ScannerMongoFixture.cs @@ -0,0 +1,26 @@ +using Mongo2Go; +using MongoDB.Driver; +using Xunit; + +namespace StellaOps.Scanner.Storage.Tests; + +public sealed class ScannerMongoFixture : IAsyncLifetime +{ + public MongoDbRunner Runner { get; private set; } = null!; + public IMongoClient Client { get; private set; } = null!; + public IMongoDatabase Database { get; private set; } = null!; + + public Task InitializeAsync() + { + Runner = MongoDbRunner.Start(singleNodeReplSet: true); + Client = new MongoClient(Runner.ConnectionString); + Database = Client.GetDatabase($"scanner-tests-{Guid.NewGuid():N}"); + return Task.CompletedTask; + } + + public Task DisposeAsync() + { + Runner.Dispose(); + return Task.CompletedTask; + } +} diff --git a/src/StellaOps.Scanner.Storage.Tests/StellaOps.Scanner.Storage.Tests.csproj b/src/StellaOps.Scanner.Storage.Tests/StellaOps.Scanner.Storage.Tests.csproj new file mode 100644 index 00000000..4d89e468 --- /dev/null +++ b/src/StellaOps.Scanner.Storage.Tests/StellaOps.Scanner.Storage.Tests.csproj @@ -0,0 +1,10 @@ + + + net10.0 + enable + enable + + + + + diff --git a/src/StellaOps.Scanner.Storage.Tests/StorageDualWriteFixture.cs b/src/StellaOps.Scanner.Storage.Tests/StorageDualWriteFixture.cs new file mode 100644 index 00000000..55cdf50d --- /dev/null +++ b/src/StellaOps.Scanner.Storage.Tests/StorageDualWriteFixture.cs @@ -0,0 +1,142 @@ +using System.Security.Cryptography; +using Microsoft.Extensions.Logging.Abstractions; +using Microsoft.Extensions.Options; +using MongoDB.Bson; +using MongoDB.Driver; +using StellaOps.Scanner.Storage; +using StellaOps.Scanner.Storage.Catalog; +using StellaOps.Scanner.Storage.Migrations; +using StellaOps.Scanner.Storage.Mongo; +using StellaOps.Scanner.Storage.ObjectStore; +using StellaOps.Scanner.Storage.Repositories; +using StellaOps.Scanner.Storage.Services; +using Xunit; + +namespace StellaOps.Scanner.Storage.Tests; + +[CollectionDefinition("scanner-mongo-fixture")] +public sealed class ScannerMongoCollection : ICollectionFixture +{ +} + +[Collection("scanner-mongo-fixture")] +public sealed class StorageDualWriteFixture +{ + private readonly ScannerMongoFixture _fixture; + + public StorageDualWriteFixture(ScannerMongoFixture fixture) + { + _fixture = fixture; + } + + [Fact] + public async Task StoreArtifactAsync_DualWrite_WritesToMirrorAndCatalog() + { + var options = BuildOptions(dualWrite: true, mirrorBucket: "mirror-bucket"); + var objectStore = new InMemoryArtifactObjectStore(); + + await InitializeMongoAsync(options); + var provider = new MongoCollectionProvider(_fixture.Database, Options.Create(options)); + var artifactRepository = new ArtifactRepository(provider); + var lifecycleRepository = new LifecycleRuleRepository(provider); + var service = new ArtifactStorageService( + artifactRepository, + lifecycleRepository, + objectStore, + Options.Create(options), + NullLogger.Instance); + + var bytes = System.Text.Encoding.UTF8.GetBytes("test artifact payload"); + using var stream = new MemoryStream(bytes); + var expiresAt = DateTime.UtcNow.AddHours(6); + + var document = await service.StoreArtifactAsync( + ArtifactDocumentType.LayerBom, + ArtifactDocumentFormat.CycloneDxJson, + mediaType: "application/vnd.cyclonedx+json", + content: stream, + immutable: true, + ttlClass: "compliance", + expiresAtUtc: expiresAt, + cancellationToken: CancellationToken.None); + + var digest = Convert.ToHexString(SHA256.HashData(bytes)).ToLowerInvariant(); + var expectedKey = $"{options.ObjectStore.RootPrefix.TrimEnd('/')}/layers/{digest}/sbom.cdx.json"; + Assert.Contains(objectStore.Objects.Keys, key => key.Bucket == options.ObjectStore.BucketName && key.Key == expectedKey); + Assert.Contains(objectStore.Objects.Keys, key => key.Bucket == options.DualWrite.MirrorBucket && key.Key == expectedKey); + + var artifact = await artifactRepository.GetAsync(document.Id, CancellationToken.None); + Assert.NotNull(artifact); + Assert.Equal($"sha256:{digest}", artifact!.BytesSha256); + Assert.Equal(1, artifact.RefCount); + Assert.Equal("compliance", artifact.TtlClass); + Assert.True(artifact.Immutable); + + var lifecycleCollection = _fixture.Database.GetCollection(ScannerStorageDefaults.Collections.LifecycleRules); + var lifecycle = await lifecycleCollection.Find(x => x.ArtifactId == document.Id).FirstOrDefaultAsync(); + Assert.NotNull(lifecycle); + Assert.Equal("compliance", lifecycle!.Class); + Assert.True(lifecycle.ExpiresAtUtc.HasValue); + Assert.True(lifecycle.ExpiresAtUtc.Value <= expiresAt.AddSeconds(5)); + } + + [Fact] + public async Task Bootstrapper_CreatesLifecycleTtlIndex() + { + var options = BuildOptions(dualWrite: false, mirrorBucket: null); + await InitializeMongoAsync(options); + + var collection = _fixture.Database.GetCollection(ScannerStorageDefaults.Collections.LifecycleRules); + var cursor = await collection.Indexes.ListAsync(); + var indexes = await cursor.ToListAsync(); + var ttlIndex = indexes.SingleOrDefault(x => string.Equals(x["name"].AsString, "lifecycle_expiresAt", StringComparison.Ordinal)); + + Assert.NotNull(ttlIndex); + Assert.True(ttlIndex!.TryGetValue("expireAfterSeconds", out var expireValue)); + Assert.Equal(0, expireValue.ToInt64()); + + var uniqueIndex = indexes.SingleOrDefault(x => string.Equals(x["name"].AsString, "lifecycle_artifact_class", StringComparison.Ordinal)); + Assert.NotNull(uniqueIndex); + Assert.True(uniqueIndex!["unique"].AsBoolean); + } + + private ScannerStorageOptions BuildOptions(bool dualWrite, string? mirrorBucket) + { + var options = new ScannerStorageOptions + { + Mongo = + { + ConnectionString = _fixture.Runner.ConnectionString, + DatabaseName = _fixture.Database.DatabaseNamespace.DatabaseName, + }, + ObjectStore = + { + BucketName = "primary-bucket", + RootPrefix = "scanner", + EnableObjectLock = true, + }, + }; + + options.DualWrite.Enabled = dualWrite; + options.DualWrite.MirrorBucket = mirrorBucket; + return options; + } + + private async Task InitializeMongoAsync(ScannerStorageOptions options) + { + await _fixture.Client.DropDatabaseAsync(options.Mongo.DatabaseName); + var migrations = new IMongoMigration[] { new EnsureLifecycleRuleTtlMigration() }; + var runner = new MongoMigrationRunner( + _fixture.Database, + migrations, + NullLogger.Instance, + TimeProvider.System); + var bootstrapper = new MongoBootstrapper( + _fixture.Database, + Options.Create(options), + NullLogger.Instance, + runner); + + await bootstrapper.InitializeAsync(CancellationToken.None); + } +} diff --git a/src/StellaOps.Scanner.Storage/AGENTS.md b/src/StellaOps.Scanner.Storage/AGENTS.md new file mode 100644 index 00000000..4c83560b --- /dev/null +++ b/src/StellaOps.Scanner.Storage/AGENTS.md @@ -0,0 +1,28 @@ +# AGENTS +## Role +Provide durable catalog and artifact storage for the Scanner plane, spanning Mongo catalog collections and MinIO object storage. Expose repositories and services used by WebService and Worker components to persist job state, image metadata, and exported artefacts deterministically. +## Scope +- Mongo collections: artifacts, images, layers, links, jobs, lifecycle_rules, migrations. +- Metadata documents: enforce majority write/read concerns, UTC timestamps, deterministic identifiers (SHA-256 digests, ULIDs for jobs). +- Bootstrapper: create collections + indexes (unique digests, compound references, TTL on lifecycle rules, sparse lookup helpers) and run schema migrations. +- Object storage (MinIO/S3): manage bucket layout (layers/, images/, indexes/, attest/), immutability policies, deterministic paths, and retention classes. +- Services: coordinate dual-write between Mongo metadata and MinIO blobs, compute digests, manage reference counts, and expose typed repositories for WebService/Worker interactions. +## Participants +- Scanner.WebService binds configuration, runs bootstrapper during startup, and uses repositories to enqueue scans, look up catalog entries, and manage lifecycle policies. +- Scanner.Worker writes job progress, uploads SBOM artefacts, and updates artefact reference counts. +- Policy / Notify consumers resolve artefact metadata for reports via catalog APIs once exposed. +## Interfaces & contracts +- Options configured via `ScannerStorageOptions` (Mongo + object store). `EnsureValid` rejects incomplete/unsafe configuration. +- Mongo access uses `IMongoDatabase` scoped with majority `ReadConcern`/`WriteConcern` and cancellation tokens. +- Object store abstraction (`IArtifactObjectStore`) encapsulates MinIO (S3) operations with server-side checksum validation and optional object-lock retain-until. +- Service APIs follow deterministic naming: digests normalized (`sha256:`), ULIDs sortable, timestamps ISO-8601 UTC. +## In/Out of scope +In: persistence models, bootstrap/migrations, catalog repositories, object storage client, retention helpers, dual-write coordination, deterministic digests. +Out: HTTP endpoints, queue processing, analyzer logic, SBOM composition, policy decisions, UI contracts. +## Observability & security expectations +- Emit structured logs for catalog/object-store writes including correlation IDs and digests. +- Guard against double writes; idempotent operations keyed by digests. +- Do not log credentials; redact connection strings. Honour cancellation tokens. +- Metrics hooks (pending) must expose duration counters for Mongo and MinIO operations. +## Tests +- Integration tests with ephemeral Mongo/MinIO stubs covering bootstrapper indexes, TTL enforcement, dual-write coordination, digest determinism, and majority read/write concerns. diff --git a/src/StellaOps.Scanner.Storage/Catalog/ArtifactDocument.cs b/src/StellaOps.Scanner.Storage/Catalog/ArtifactDocument.cs new file mode 100644 index 00000000..9009e0c5 --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Catalog/ArtifactDocument.cs @@ -0,0 +1,85 @@ +using MongoDB.Bson.Serialization.Attributes; + +namespace StellaOps.Scanner.Storage.Catalog; + +public enum ArtifactDocumentType +{ + LayerBom, + ImageBom, + Diff, + Index, + Attestation, +} + +public enum ArtifactDocumentFormat +{ + CycloneDxJson, + CycloneDxProtobuf, + SpdxJson, + BomIndex, + DsseJson, +} + +[BsonIgnoreExtraElements] +public sealed class ArtifactDocument +{ + [BsonId] + public string Id { get; set; } = string.Empty; + + [BsonElement("type")] + public ArtifactDocumentType Type { get; set; } + = ArtifactDocumentType.ImageBom; + + [BsonElement("format")] + public ArtifactDocumentFormat Format { get; set; } + = ArtifactDocumentFormat.CycloneDxJson; + + [BsonElement("mediaType")] + public string MediaType { get; set; } = string.Empty; + + [BsonElement("bytesSha256")] + public string BytesSha256 { get; set; } = string.Empty; + + [BsonElement("sizeBytes")] + public long SizeBytes { get; set; } + = 0; + + [BsonElement("immutable")] + public bool Immutable { get; set; } + = false; + + [BsonElement("refCount")] + public long RefCount { get; set; } + = 0; + + [BsonElement("rekor")] + [BsonIgnoreIfNull] + public RekorReference? Rekor { get; set; } + = null; + + [BsonElement("createdAt")] + public DateTime CreatedAtUtc { get; set; } + = DateTime.UtcNow; + + [BsonElement("updatedAt")] + public DateTime UpdatedAtUtc { get; set; } + = DateTime.UtcNow; + + [BsonElement("ttlClass")] + public string TtlClass { get; set; } = "default"; +} + +public sealed class RekorReference +{ + [BsonElement("uuid")] + public string? Uuid { get; set; } + = null; + + [BsonElement("index")] + public long? Index { get; set; } + = null; + + [BsonElement("url")] + public string? Url { get; set; } + = null; +} diff --git a/src/StellaOps.Scanner.Storage/Catalog/CatalogIdFactory.cs b/src/StellaOps.Scanner.Storage/Catalog/CatalogIdFactory.cs new file mode 100644 index 00000000..8a19a936 --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Catalog/CatalogIdFactory.cs @@ -0,0 +1,43 @@ +using System.Security.Cryptography; +using System.Text; + +namespace StellaOps.Scanner.Storage.Catalog; + +public static class CatalogIdFactory +{ + public static string CreateArtifactId(ArtifactDocumentType type, string digest) + { + ArgumentException.ThrowIfNullOrWhiteSpace(digest); + return $"{type.ToString().ToLowerInvariant()}::{NormalizeDigest(digest)}"; + } + + public static string CreateLinkId(LinkSourceType type, string fromDigest, string artifactId) + { + ArgumentException.ThrowIfNullOrWhiteSpace(fromDigest); + ArgumentException.ThrowIfNullOrWhiteSpace(artifactId); + + var input = Encoding.UTF8.GetBytes($"{type}:{NormalizeDigest(fromDigest)}:{artifactId}"); + var hash = SHA256.HashData(input); + return Convert.ToHexString(hash).ToLowerInvariant(); + } + + public static string CreateLifecycleRuleId(string artifactId, string @class) + { + ArgumentException.ThrowIfNullOrWhiteSpace(artifactId); + var normalizedClass = string.IsNullOrWhiteSpace(@class) ? "default" : @class.Trim().ToLowerInvariant(); + var payload = Encoding.UTF8.GetBytes($"{artifactId}:{normalizedClass}"); + var hash = SHA256.HashData(payload); + return Convert.ToHexString(hash).ToLowerInvariant(); + } + + private static string NormalizeDigest(string digest) + { + if (!digest.Contains(':', StringComparison.Ordinal)) + { + return $"sha256:{digest.Trim().ToLowerInvariant()}"; + } + + var parts = digest.Split(':', 2, StringSplitOptions.TrimEntries); + return $"{parts[0].ToLowerInvariant()}:{parts[1].ToLowerInvariant()}"; + } +} diff --git a/src/StellaOps.Scanner.Storage/Catalog/ImageDocument.cs b/src/StellaOps.Scanner.Storage/Catalog/ImageDocument.cs new file mode 100644 index 00000000..1b66d993 --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Catalog/ImageDocument.cs @@ -0,0 +1,29 @@ +using MongoDB.Bson.Serialization.Attributes; + +namespace StellaOps.Scanner.Storage.Catalog; + +[BsonIgnoreExtraElements] +public sealed class ImageDocument +{ + [BsonId] + public string ImageDigest { get; set; } = string.Empty; + + [BsonElement("repository")] + public string Repository { get; set; } = string.Empty; + + [BsonElement("tag")] + [BsonIgnoreIfNull] + public string? Tag { get; set; } + = null; + + [BsonElement("architecture")] + public string Architecture { get; set; } = string.Empty; + + [BsonElement("createdAt")] + public DateTime CreatedAtUtc { get; set; } + = DateTime.UtcNow; + + [BsonElement("lastSeenAt")] + public DateTime LastSeenAtUtc { get; set; } + = DateTime.UtcNow; +} diff --git a/src/StellaOps.Scanner.Storage/Catalog/JobDocument.cs b/src/StellaOps.Scanner.Storage/Catalog/JobDocument.cs new file mode 100644 index 00000000..5f94c32e --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Catalog/JobDocument.cs @@ -0,0 +1,54 @@ +using MongoDB.Bson; +using MongoDB.Bson.Serialization.Attributes; + +namespace StellaOps.Scanner.Storage.Catalog; + +public enum JobState +{ + Pending, + Running, + Succeeded, + Failed, + Cancelled, +} + +[BsonIgnoreExtraElements] +public sealed class JobDocument +{ + [BsonId] + public string Id { get; set; } = string.Empty; + + [BsonElement("kind")] + public string Kind { get; set; } = string.Empty; + + [BsonElement("state")] + public JobState State { get; set; } = JobState.Pending; + + [BsonElement("args")] + public BsonDocument Arguments { get; set; } + = new(); + + [BsonElement("createdAt")] + public DateTime CreatedAtUtc { get; set; } + = DateTime.UtcNow; + + [BsonElement("startedAt")] + [BsonIgnoreIfNull] + public DateTime? StartedAtUtc { get; set; } + = null; + + [BsonElement("completedAt")] + [BsonIgnoreIfNull] + public DateTime? CompletedAtUtc { get; set; } + = null; + + [BsonElement("heartbeatAt")] + [BsonIgnoreIfNull] + public DateTime? HeartbeatAtUtc { get; set; } + = null; + + [BsonElement("error")] + [BsonIgnoreIfNull] + public string? Error { get; set; } + = null; +} diff --git a/src/StellaOps.Scanner.Storage/Catalog/LayerDocument.cs b/src/StellaOps.Scanner.Storage/Catalog/LayerDocument.cs new file mode 100644 index 00000000..0763cc4c --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Catalog/LayerDocument.cs @@ -0,0 +1,25 @@ +using MongoDB.Bson.Serialization.Attributes; + +namespace StellaOps.Scanner.Storage.Catalog; + +[BsonIgnoreExtraElements] +public sealed class LayerDocument +{ + [BsonId] + public string LayerDigest { get; set; } = string.Empty; + + [BsonElement("mediaType")] + public string MediaType { get; set; } = string.Empty; + + [BsonElement("sizeBytes")] + public long SizeBytes { get; set; } + = 0; + + [BsonElement("createdAt")] + public DateTime CreatedAtUtc { get; set; } + = DateTime.UtcNow; + + [BsonElement("lastSeenAt")] + public DateTime LastSeenAtUtc { get; set; } + = DateTime.UtcNow; +} diff --git a/src/StellaOps.Scanner.Storage/Catalog/LifecycleRuleDocument.cs b/src/StellaOps.Scanner.Storage/Catalog/LifecycleRuleDocument.cs new file mode 100644 index 00000000..0c73c569 --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Catalog/LifecycleRuleDocument.cs @@ -0,0 +1,25 @@ +using MongoDB.Bson.Serialization.Attributes; + +namespace StellaOps.Scanner.Storage.Catalog; + +[BsonIgnoreExtraElements] +public sealed class LifecycleRuleDocument +{ + [BsonId] + public string Id { get; set; } = string.Empty; + + [BsonElement("artifactId")] + public string ArtifactId { get; set; } = string.Empty; + + [BsonElement("class")] + public string Class { get; set; } = "default"; + + [BsonElement("expiresAt")] + [BsonIgnoreIfNull] + public DateTime? ExpiresAtUtc { get; set; } + = null; + + [BsonElement("createdAt")] + public DateTime CreatedAtUtc { get; set; } + = DateTime.UtcNow; +} diff --git a/src/StellaOps.Scanner.Storage/Catalog/LinkDocument.cs b/src/StellaOps.Scanner.Storage/Catalog/LinkDocument.cs new file mode 100644 index 00000000..8860e169 --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Catalog/LinkDocument.cs @@ -0,0 +1,30 @@ +using MongoDB.Bson.Serialization.Attributes; + +namespace StellaOps.Scanner.Storage.Catalog; + +public enum LinkSourceType +{ + Image, + Layer, +} + +[BsonIgnoreExtraElements] +public sealed class LinkDocument +{ + [BsonId] + public string Id { get; set; } = string.Empty; + + [BsonElement("fromType")] + public LinkSourceType FromType { get; set; } + = LinkSourceType.Image; + + [BsonElement("fromDigest")] + public string FromDigest { get; set; } = string.Empty; + + [BsonElement("artifactId")] + public string ArtifactId { get; set; } = string.Empty; + + [BsonElement("createdAt")] + public DateTime CreatedAtUtc { get; set; } + = DateTime.UtcNow; +} diff --git a/src/StellaOps.Scanner.Storage/Extensions/ServiceCollectionExtensions.cs b/src/StellaOps.Scanner.Storage/Extensions/ServiceCollectionExtensions.cs new file mode 100644 index 00000000..a2f80846 --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Extensions/ServiceCollectionExtensions.cs @@ -0,0 +1,112 @@ +using Amazon; +using Amazon.S3; +using Microsoft.Extensions.Configuration; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.DependencyInjection.Extensions; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using MongoDB.Driver; +using StellaOps.Scanner.Storage.Migrations; +using StellaOps.Scanner.Storage.Mongo; +using StellaOps.Scanner.Storage.ObjectStore; +using StellaOps.Scanner.Storage.Repositories; +using StellaOps.Scanner.Storage.Services; + +namespace StellaOps.Scanner.Storage.Extensions; + +public static class ServiceCollectionExtensions +{ + public static IServiceCollection AddScannerStorage(this IServiceCollection services, Action configure) + { + ArgumentNullException.ThrowIfNull(configure); + + services.AddOptions().Configure(configure).PostConfigure(options => options.EnsureValid()); + RegisterScannerStorageServices(services); + return services; + } + + public static IServiceCollection AddScannerStorage(this IServiceCollection services, IConfiguration configuration) + { + ArgumentNullException.ThrowIfNull(configuration); + + services.AddOptions() + .Bind(configuration) + .PostConfigure(options => options.EnsureValid()); + + RegisterScannerStorageServices(services); + return services; + } + + private static void RegisterScannerStorageServices(IServiceCollection services) + { + services.TryAddSingleton(CreateMongoClient); + services.TryAddSingleton(CreateMongoDatabase); + services.TryAddSingleton(); + services.TryAddEnumerable(ServiceDescriptor.Singleton()); + services.TryAddSingleton(provider => + { + var migrations = provider.GetServices(); + return new MongoMigrationRunner( + provider.GetRequiredService(), + migrations, + provider.GetRequiredService>(), + TimeProvider.System); + }); + + services.TryAddSingleton(); + services.TryAddSingleton(); + services.TryAddSingleton(); + services.TryAddSingleton(); + services.TryAddSingleton(); + services.TryAddSingleton(); + services.TryAddSingleton(); + + services.TryAddSingleton(CreateAmazonS3Client); + services.TryAddSingleton(); + services.TryAddSingleton(); + } + + private static IMongoClient CreateMongoClient(IServiceProvider provider) + { + var options = provider.GetRequiredService>().Value; + options.EnsureValid(); + + var settings = MongoClientSettings.FromConnectionString(options.Mongo.ConnectionString); + settings.RetryReads = true; + settings.RetryWrites = true; + settings.DirectConnection = false; + settings.ReadPreference = ReadPreference.PrimaryPreferred; + settings.ServerSelectionTimeout = options.Mongo.CommandTimeout; + settings.ConnectTimeout = options.Mongo.CommandTimeout; + settings.SocketTimeout = options.Mongo.CommandTimeout; + settings.ReadConcern = options.Mongo.UseMajorityReadConcern ? ReadConcern.Majority : ReadConcern.Local; + settings.WriteConcern = options.Mongo.UseMajorityWriteConcern ? WriteConcern.WMajority : WriteConcern.W1; + + return new MongoClient(settings); + } + + private static IMongoDatabase CreateMongoDatabase(IServiceProvider provider) + { + var options = provider.GetRequiredService>().Value; + var client = provider.GetRequiredService(); + var databaseName = options.Mongo.ResolveDatabaseName(); + return client.GetDatabase(databaseName); + } + + private static IAmazonS3 CreateAmazonS3Client(IServiceProvider provider) + { + var options = provider.GetRequiredService>().Value.ObjectStore; + var config = new AmazonS3Config + { + RegionEndpoint = RegionEndpoint.GetBySystemName(options.Region), + ForcePathStyle = options.ForcePathStyle, + }; + + if (!string.IsNullOrWhiteSpace(options.ServiceUrl)) + { + config.ServiceURL = options.ServiceUrl; + } + + return new AmazonS3Client(config); + } +} diff --git a/src/StellaOps.Scanner.Storage/Migrations/EnsureLifecycleRuleTtlMigration.cs b/src/StellaOps.Scanner.Storage/Migrations/EnsureLifecycleRuleTtlMigration.cs new file mode 100644 index 00000000..790f483d --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Migrations/EnsureLifecycleRuleTtlMigration.cs @@ -0,0 +1,30 @@ +using System.Linq; +using MongoDB.Driver; +using StellaOps.Scanner.Storage.Catalog; + +namespace StellaOps.Scanner.Storage.Migrations; + +public sealed class EnsureLifecycleRuleTtlMigration : IMongoMigration +{ + public string Id => "20251018-lifecycle-ttl"; + + public string Description => "Ensure lifecycle_rules expiresAt TTL index exists."; + + public async Task ApplyAsync(IMongoDatabase database, CancellationToken cancellationToken) + { + var collection = database.GetCollection(ScannerStorageDefaults.Collections.LifecycleRules); + var indexes = await collection.Indexes.ListAsync(cancellationToken).ConfigureAwait(false); + var existing = await indexes.ToListAsync(cancellationToken).ConfigureAwait(false); + + if (existing.Any(x => string.Equals(x["name"].AsString, "lifecycle_expiresAt", StringComparison.Ordinal))) + { + return; + } + + var model = new CreateIndexModel( + Builders.IndexKeys.Ascending(x => x.ExpiresAtUtc), + new CreateIndexOptions { Name = "lifecycle_expiresAt", ExpireAfter = TimeSpan.Zero }); + + await collection.Indexes.CreateOneAsync(model, cancellationToken: cancellationToken).ConfigureAwait(false); + } +} diff --git a/src/StellaOps.Scanner.Storage/Migrations/IMongoMigration.cs b/src/StellaOps.Scanner.Storage/Migrations/IMongoMigration.cs new file mode 100644 index 00000000..8acf1e93 --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Migrations/IMongoMigration.cs @@ -0,0 +1,12 @@ +using MongoDB.Driver; + +namespace StellaOps.Scanner.Storage.Migrations; + +public interface IMongoMigration +{ + string Id { get; } + + string Description { get; } + + Task ApplyAsync(IMongoDatabase database, CancellationToken cancellationToken); +} diff --git a/src/StellaOps.Scanner.Storage/Migrations/MongoMigrationDocument.cs b/src/StellaOps.Scanner.Storage/Migrations/MongoMigrationDocument.cs new file mode 100644 index 00000000..cb488272 --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Migrations/MongoMigrationDocument.cs @@ -0,0 +1,19 @@ +using MongoDB.Bson.Serialization.Attributes; + +namespace StellaOps.Scanner.Storage.Migrations; + +[BsonIgnoreExtraElements] +internal sealed class MongoMigrationDocument +{ + [BsonId] + public string Id { get; set; } = string.Empty; + + [BsonElement("description")] + [BsonIgnoreIfNull] + public string? Description { get; set; } + = null; + + [BsonElement("appliedAt")] + public DateTime AppliedAtUtc { get; set; } + = DateTime.UtcNow; +} diff --git a/src/StellaOps.Scanner.Storage/Migrations/MongoMigrationRunner.cs b/src/StellaOps.Scanner.Storage/Migrations/MongoMigrationRunner.cs new file mode 100644 index 00000000..c0118662 --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Migrations/MongoMigrationRunner.cs @@ -0,0 +1,94 @@ +using Microsoft.Extensions.Logging; +using MongoDB.Driver; + +namespace StellaOps.Scanner.Storage.Migrations; + +public sealed class MongoMigrationRunner +{ + private readonly IMongoDatabase _database; + private readonly IReadOnlyList _migrations; + private readonly ILogger _logger; + private readonly TimeProvider _timeProvider; + + public MongoMigrationRunner( + IMongoDatabase database, + IEnumerable migrations, + ILogger logger, + TimeProvider? timeProvider = null) + { + _database = database ?? throw new ArgumentNullException(nameof(database)); + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + _timeProvider = timeProvider ?? TimeProvider.System; + _migrations = (migrations ?? throw new ArgumentNullException(nameof(migrations))) + .OrderBy(m => m.Id, StringComparer.Ordinal) + .ToArray(); + } + + public async Task RunAsync(CancellationToken cancellationToken) + { + if (_migrations.Count == 0) + { + return; + } + + await EnsureCollectionExistsAsync(_database, cancellationToken).ConfigureAwait(false); + var collection = _database.GetCollection(ScannerStorageDefaults.Collections.Migrations); + var applied = await LoadAppliedMigrationIdsAsync(collection, cancellationToken).ConfigureAwait(false); + + foreach (var migration in _migrations) + { + if (applied.Contains(migration.Id, StringComparer.Ordinal)) + { + continue; + } + + _logger.LogInformation("Applying scanner Mongo migration {MigrationId}: {Description}", migration.Id, migration.Description); + try + { + await migration.ApplyAsync(_database, cancellationToken).ConfigureAwait(false); + var document = new MongoMigrationDocument + { + Id = migration.Id, + Description = string.IsNullOrWhiteSpace(migration.Description) ? null : migration.Description, + AppliedAtUtc = _timeProvider.GetUtcNow().UtcDateTime, + }; + + await collection.InsertOneAsync(document, cancellationToken: cancellationToken).ConfigureAwait(false); + _logger.LogInformation("Scanner Mongo migration {MigrationId} applied", migration.Id); + } + catch (Exception ex) + { + _logger.LogError(ex, "Scanner Mongo migration {MigrationId} failed", migration.Id); + throw; + } + } + } + + private static async Task EnsureCollectionExistsAsync(IMongoDatabase database, CancellationToken cancellationToken) + { + using var cursor = await database.ListCollectionNamesAsync(cancellationToken: cancellationToken).ConfigureAwait(false); + var names = await cursor.ToListAsync(cancellationToken).ConfigureAwait(false); + if (!names.Contains(ScannerStorageDefaults.Collections.Migrations, StringComparer.Ordinal)) + { + await database.CreateCollectionAsync(ScannerStorageDefaults.Collections.Migrations, cancellationToken: cancellationToken).ConfigureAwait(false); + } + } + + private static async Task> LoadAppliedMigrationIdsAsync( + IMongoCollection collection, + CancellationToken cancellationToken) + { + using var cursor = await collection.FindAsync(FilterDefinition.Empty, cancellationToken: cancellationToken).ConfigureAwait(false); + var documents = await cursor.ToListAsync(cancellationToken).ConfigureAwait(false); + var ids = new HashSet(StringComparer.Ordinal); + foreach (var doc in documents) + { + if (!string.IsNullOrWhiteSpace(doc.Id)) + { + ids.Add(doc.Id); + } + } + + return ids; + } +} diff --git a/src/StellaOps.Scanner.Storage/Mongo/MongoBootstrapper.cs b/src/StellaOps.Scanner.Storage/Mongo/MongoBootstrapper.cs new file mode 100644 index 00000000..05f08116 --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Mongo/MongoBootstrapper.cs @@ -0,0 +1,181 @@ +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using MongoDB.Bson; +using MongoDB.Driver; +using StellaOps.Scanner.Storage.Catalog; +using StellaOps.Scanner.Storage.Migrations; + +namespace StellaOps.Scanner.Storage.Mongo; + +public sealed class MongoBootstrapper +{ + private readonly IMongoDatabase _database; + private readonly ScannerStorageOptions _options; + private readonly ILogger _logger; + private readonly MongoMigrationRunner _migrationRunner; + + public MongoBootstrapper( + IMongoDatabase database, + IOptions options, + ILogger logger, + MongoMigrationRunner migrationRunner) + { + _database = database ?? throw new ArgumentNullException(nameof(database)); + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + _migrationRunner = migrationRunner ?? throw new ArgumentNullException(nameof(migrationRunner)); + _options = (options ?? throw new ArgumentNullException(nameof(options))).Value; + } + + public async Task InitializeAsync(CancellationToken cancellationToken) + { + _options.EnsureValid(); + + await EnsureCollectionsAsync(cancellationToken).ConfigureAwait(false); + await EnsureIndexesAsync(cancellationToken).ConfigureAwait(false); + await _migrationRunner.RunAsync(cancellationToken).ConfigureAwait(false); + } + + private async Task EnsureCollectionsAsync(CancellationToken cancellationToken) + { + var targetCollections = new[] + { + ScannerStorageDefaults.Collections.Artifacts, + ScannerStorageDefaults.Collections.Images, + ScannerStorageDefaults.Collections.Layers, + ScannerStorageDefaults.Collections.Links, + ScannerStorageDefaults.Collections.Jobs, + ScannerStorageDefaults.Collections.LifecycleRules, + ScannerStorageDefaults.Collections.Migrations, + }; + + using var cursor = await _database.ListCollectionNamesAsync(cancellationToken: cancellationToken).ConfigureAwait(false); + var existing = await cursor.ToListAsync(cancellationToken).ConfigureAwait(false); + + foreach (var name in targetCollections) + { + if (existing.Contains(name, StringComparer.Ordinal)) + { + continue; + } + + _logger.LogInformation("Creating Mongo collection {Collection}", name); + await _database.CreateCollectionAsync(name, cancellationToken: cancellationToken).ConfigureAwait(false); + } + } + + private async Task EnsureIndexesAsync(CancellationToken cancellationToken) + { + await EnsureArtifactIndexesAsync(cancellationToken).ConfigureAwait(false); + await EnsureImageIndexesAsync(cancellationToken).ConfigureAwait(false); + await EnsureLayerIndexesAsync(cancellationToken).ConfigureAwait(false); + await EnsureLinkIndexesAsync(cancellationToken).ConfigureAwait(false); + await EnsureJobIndexesAsync(cancellationToken).ConfigureAwait(false); + await EnsureLifecycleIndexesAsync(cancellationToken).ConfigureAwait(false); + } + + private Task EnsureArtifactIndexesAsync(CancellationToken cancellationToken) + { + var collection = _database.GetCollection(ScannerStorageDefaults.Collections.Artifacts); + var models = new List> + { + new( + Builders.IndexKeys + .Ascending(x => x.Type) + .Ascending(x => x.BytesSha256), + new CreateIndexOptions { Name = "artifact_type_bytesSha256", Unique = true }), + new( + Builders.IndexKeys.Ascending(x => x.RefCount), + new CreateIndexOptions { Name = "artifact_refCount" }), + new( + Builders.IndexKeys.Ascending(x => x.CreatedAtUtc), + new CreateIndexOptions { Name = "artifact_createdAt" }) + }; + + return collection.Indexes.CreateManyAsync(models, cancellationToken); + } + + private Task EnsureImageIndexesAsync(CancellationToken cancellationToken) + { + var collection = _database.GetCollection(ScannerStorageDefaults.Collections.Images); + var models = new List> + { + new( + Builders.IndexKeys + .Ascending(x => x.Repository) + .Ascending(x => x.Tag), + new CreateIndexOptions { Name = "image_repo_tag" }), + new( + Builders.IndexKeys.Ascending(x => x.LastSeenAtUtc), + new CreateIndexOptions { Name = "image_lastSeen" }) + }; + + return collection.Indexes.CreateManyAsync(models, cancellationToken); + } + + private Task EnsureLayerIndexesAsync(CancellationToken cancellationToken) + { + var collection = _database.GetCollection(ScannerStorageDefaults.Collections.Layers); + var models = new List> + { + new( + Builders.IndexKeys.Ascending(x => x.LastSeenAtUtc), + new CreateIndexOptions { Name = "layer_lastSeen" }) + }; + + return collection.Indexes.CreateManyAsync(models, cancellationToken); + } + + private Task EnsureLinkIndexesAsync(CancellationToken cancellationToken) + { + var collection = _database.GetCollection(ScannerStorageDefaults.Collections.Links); + var models = new List> + { + new( + Builders.IndexKeys + .Ascending(x => x.FromType) + .Ascending(x => x.FromDigest) + .Ascending(x => x.ArtifactId), + new CreateIndexOptions { Name = "link_from_artifact", Unique = true }) + }; + + return collection.Indexes.CreateManyAsync(models, cancellationToken); + } + + private Task EnsureJobIndexesAsync(CancellationToken cancellationToken) + { + var collection = _database.GetCollection(ScannerStorageDefaults.Collections.Jobs); + var models = new List> + { + new( + Builders.IndexKeys + .Ascending(x => x.State) + .Ascending(x => x.CreatedAtUtc), + new CreateIndexOptions { Name = "job_state_createdAt" }), + new( + Builders.IndexKeys.Ascending(x => x.HeartbeatAtUtc), + new CreateIndexOptions { Name = "job_heartbeat" }) + }; + + return collection.Indexes.CreateManyAsync(models, cancellationToken); + } + + private Task EnsureLifecycleIndexesAsync(CancellationToken cancellationToken) + { + var collection = _database.GetCollection(ScannerStorageDefaults.Collections.LifecycleRules); + var expiresIndex = new CreateIndexModel( + Builders.IndexKeys.Ascending(x => x.ExpiresAtUtc), + new CreateIndexOptions + { + Name = "lifecycle_expiresAt", + ExpireAfter = TimeSpan.Zero, + }); + + var artifactIndex = new CreateIndexModel( + Builders.IndexKeys + .Ascending(x => x.ArtifactId) + .Ascending(x => x.Class), + new CreateIndexOptions { Name = "lifecycle_artifact_class", Unique = true }); + + return collection.Indexes.CreateManyAsync(new[] { expiresIndex, artifactIndex }, cancellationToken); + } +} diff --git a/src/StellaOps.Scanner.Storage/Mongo/MongoCollectionProvider.cs b/src/StellaOps.Scanner.Storage/Mongo/MongoCollectionProvider.cs new file mode 100644 index 00000000..e971515a --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Mongo/MongoCollectionProvider.cs @@ -0,0 +1,41 @@ +using Microsoft.Extensions.Options; +using MongoDB.Driver; +using StellaOps.Scanner.Storage.Catalog; + +namespace StellaOps.Scanner.Storage.Mongo; + +public sealed class MongoCollectionProvider +{ + private readonly IMongoDatabase _database; + private readonly MongoOptions _options; + + public MongoCollectionProvider(IMongoDatabase database, IOptions options) + { + _database = database ?? throw new ArgumentNullException(nameof(database)); + _options = (options ?? throw new ArgumentNullException(nameof(options))).Value.Mongo; + } + + public IMongoCollection Artifacts => GetCollection(ScannerStorageDefaults.Collections.Artifacts); + public IMongoCollection Images => GetCollection(ScannerStorageDefaults.Collections.Images); + public IMongoCollection Layers => GetCollection(ScannerStorageDefaults.Collections.Layers); + public IMongoCollection Links => GetCollection(ScannerStorageDefaults.Collections.Links); + public IMongoCollection Jobs => GetCollection(ScannerStorageDefaults.Collections.Jobs); + public IMongoCollection LifecycleRules => GetCollection(ScannerStorageDefaults.Collections.LifecycleRules); + + private IMongoCollection GetCollection(string name) + { + var database = _database; + + if (_options.UseMajorityReadConcern) + { + database = database.WithReadConcern(ReadConcern.Majority); + } + + if (_options.UseMajorityWriteConcern) + { + database = database.WithWriteConcern(WriteConcern.WMajority); + } + + return database.GetCollection(name); + } +} diff --git a/src/StellaOps.Scanner.Storage/ObjectStore/IArtifactObjectStore.cs b/src/StellaOps.Scanner.Storage/ObjectStore/IArtifactObjectStore.cs new file mode 100644 index 00000000..17f1b60f --- /dev/null +++ b/src/StellaOps.Scanner.Storage/ObjectStore/IArtifactObjectStore.cs @@ -0,0 +1,12 @@ +namespace StellaOps.Scanner.Storage.ObjectStore; + +public interface IArtifactObjectStore +{ + Task PutAsync(ArtifactObjectDescriptor descriptor, Stream content, CancellationToken cancellationToken); + + Task GetAsync(ArtifactObjectDescriptor descriptor, CancellationToken cancellationToken); + + Task DeleteAsync(ArtifactObjectDescriptor descriptor, CancellationToken cancellationToken); +} + +public sealed record ArtifactObjectDescriptor(string Bucket, string Key, bool Immutable, TimeSpan? RetainFor = null); diff --git a/src/StellaOps.Scanner.Storage/ObjectStore/S3ArtifactObjectStore.cs b/src/StellaOps.Scanner.Storage/ObjectStore/S3ArtifactObjectStore.cs new file mode 100644 index 00000000..5778a28e --- /dev/null +++ b/src/StellaOps.Scanner.Storage/ObjectStore/S3ArtifactObjectStore.cs @@ -0,0 +1,75 @@ +using Amazon.S3; +using Amazon.S3.Model; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; + +namespace StellaOps.Scanner.Storage.ObjectStore; + +public sealed class S3ArtifactObjectStore : IArtifactObjectStore +{ + private readonly IAmazonS3 _s3; + private readonly ObjectStoreOptions _options; + private readonly ILogger _logger; + + public S3ArtifactObjectStore(IAmazonS3 s3, IOptions options, ILogger logger) + { + _s3 = s3 ?? throw new ArgumentNullException(nameof(s3)); + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + _options = (options ?? throw new ArgumentNullException(nameof(options))).Value.ObjectStore; + } + + public async Task PutAsync(ArtifactObjectDescriptor descriptor, Stream content, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(descriptor); + ArgumentNullException.ThrowIfNull(content); + + var request = new PutObjectRequest + { + BucketName = descriptor.Bucket, + Key = descriptor.Key, + InputStream = content, + AutoCloseStream = false, + }; + + if (descriptor.Immutable && _options.EnableObjectLock) + { + request.ObjectLockMode = ObjectLockMode.Compliance; + if (descriptor.RetainFor is { } retention && retention > TimeSpan.Zero) + { + request.ObjectLockRetainUntilDate = DateTime.UtcNow + retention; + } + else if (_options.ComplianceRetention is { } defaultRetention && defaultRetention > TimeSpan.Zero) + { + request.ObjectLockRetainUntilDate = DateTime.UtcNow + defaultRetention; + } + } + + await _s3.PutObjectAsync(request, cancellationToken).ConfigureAwait(false); + _logger.LogDebug("Uploaded scanner object {Bucket}/{Key}", descriptor.Bucket, descriptor.Key); + } + + public async Task GetAsync(ArtifactObjectDescriptor descriptor, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(descriptor); + try + { + var response = await _s3.GetObjectAsync(descriptor.Bucket, descriptor.Key, cancellationToken).ConfigureAwait(false); + var buffer = new MemoryStream(); + await response.ResponseStream.CopyToAsync(buffer, cancellationToken).ConfigureAwait(false); + buffer.Position = 0; + return buffer; + } + catch (AmazonS3Exception ex) when (ex.StatusCode == System.Net.HttpStatusCode.NotFound) + { + _logger.LogDebug("Scanner object {Bucket}/{Key} not found", descriptor.Bucket, descriptor.Key); + return null; + } + } + + public async Task DeleteAsync(ArtifactObjectDescriptor descriptor, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(descriptor); + await _s3.DeleteObjectAsync(descriptor.Bucket, descriptor.Key, cancellationToken).ConfigureAwait(false); + _logger.LogDebug("Deleted scanner object {Bucket}/{Key}", descriptor.Bucket, descriptor.Key); + } +} diff --git a/src/StellaOps.Scanner.Storage/Repositories/ArtifactRepository.cs b/src/StellaOps.Scanner.Storage/Repositories/ArtifactRepository.cs new file mode 100644 index 00000000..bd25fbac --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Repositories/ArtifactRepository.cs @@ -0,0 +1,67 @@ +using MongoDB.Driver; +using StellaOps.Scanner.Storage.Catalog; +using StellaOps.Scanner.Storage.Mongo; + +namespace StellaOps.Scanner.Storage.Repositories; + +public sealed class ArtifactRepository +{ + private readonly MongoCollectionProvider _collections; + + public ArtifactRepository(MongoCollectionProvider collections) + { + _collections = collections ?? throw new ArgumentNullException(nameof(collections)); + } + + public async Task GetAsync(string artifactId, CancellationToken cancellationToken) + { + ArgumentException.ThrowIfNullOrWhiteSpace(artifactId); + return await _collections.Artifacts + .Find(x => x.Id == artifactId) + .FirstOrDefaultAsync(cancellationToken) + .ConfigureAwait(false); + } + + public async Task UpsertAsync(ArtifactDocument document, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(document); + document.UpdatedAtUtc = DateTime.UtcNow; + var options = new ReplaceOptions { IsUpsert = true }; + await _collections.Artifacts + .ReplaceOneAsync(x => x.Id == document.Id, document, options, cancellationToken) + .ConfigureAwait(false); + } + + public async Task UpdateRekorAsync(string artifactId, RekorReference reference, CancellationToken cancellationToken) + { + ArgumentException.ThrowIfNullOrWhiteSpace(artifactId); + ArgumentNullException.ThrowIfNull(reference); + + var update = Builders.Update + .Set(x => x.Rekor, reference) + .Set(x => x.UpdatedAtUtc, DateTime.UtcNow); + + await _collections.Artifacts.UpdateOneAsync(x => x.Id == artifactId, update, cancellationToken: cancellationToken).ConfigureAwait(false); + } + + public async Task IncrementRefCountAsync(string artifactId, long delta, CancellationToken cancellationToken) + { + ArgumentException.ThrowIfNullOrWhiteSpace(artifactId); + + var update = Builders.Update + .Inc(x => x.RefCount, delta) + .Set(x => x.UpdatedAtUtc, DateTime.UtcNow); + + var options = new FindOneAndUpdateOptions + { + ReturnDocument = ReturnDocument.After, + IsUpsert = false, + }; + + var result = await _collections.Artifacts + .FindOneAndUpdateAsync(x => x.Id == artifactId, update, options, cancellationToken) + .ConfigureAwait(false); + + return result?.RefCount ?? 0; + } +} diff --git a/src/StellaOps.Scanner.Storage/Repositories/ImageRepository.cs b/src/StellaOps.Scanner.Storage/Repositories/ImageRepository.cs new file mode 100644 index 00000000..600f504a --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Repositories/ImageRepository.cs @@ -0,0 +1,34 @@ +using MongoDB.Driver; +using StellaOps.Scanner.Storage.Catalog; +using StellaOps.Scanner.Storage.Mongo; + +namespace StellaOps.Scanner.Storage.Repositories; + +public sealed class ImageRepository +{ + private readonly MongoCollectionProvider _collections; + + public ImageRepository(MongoCollectionProvider collections) + { + _collections = collections ?? throw new ArgumentNullException(nameof(collections)); + } + + public async Task UpsertAsync(ImageDocument document, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(document); + document.LastSeenAtUtc = DateTime.UtcNow; + var updateOptions = new ReplaceOptions { IsUpsert = true }; + await _collections.Images + .ReplaceOneAsync(x => x.ImageDigest == document.ImageDigest, document, updateOptions, cancellationToken) + .ConfigureAwait(false); + } + + public async Task GetAsync(string imageDigest, CancellationToken cancellationToken) + { + ArgumentException.ThrowIfNullOrWhiteSpace(imageDigest); + return await _collections.Images + .Find(x => x.ImageDigest == imageDigest) + .FirstOrDefaultAsync(cancellationToken) + .ConfigureAwait(false); + } +} diff --git a/src/StellaOps.Scanner.Storage/Repositories/JobRepository.cs b/src/StellaOps.Scanner.Storage/Repositories/JobRepository.cs new file mode 100644 index 00000000..ead92ca6 --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Repositories/JobRepository.cs @@ -0,0 +1,78 @@ +using MongoDB.Bson; +using MongoDB.Driver; +using StellaOps.Scanner.Storage.Catalog; +using StellaOps.Scanner.Storage.Mongo; + +namespace StellaOps.Scanner.Storage.Repositories; + +public sealed class JobRepository +{ + private readonly MongoCollectionProvider _collections; + private readonly TimeProvider _timeProvider; + + public JobRepository(MongoCollectionProvider collections, TimeProvider? timeProvider = null) + { + _collections = collections ?? throw new ArgumentNullException(nameof(collections)); + _timeProvider = timeProvider ?? TimeProvider.System; + } + + public async Task InsertAsync(JobDocument document, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(document); + document.CreatedAtUtc = _timeProvider.GetUtcNow().UtcDateTime; + await _collections.Jobs.InsertOneAsync(document, cancellationToken: cancellationToken).ConfigureAwait(false); + return document; + } + + public async Task TryTransitionAsync(string jobId, JobState expected, JobState next, CancellationToken cancellationToken) + { + ArgumentException.ThrowIfNullOrWhiteSpace(jobId); + var now = _timeProvider.GetUtcNow().UtcDateTime; + var update = Builders.Update + .Set(x => x.State, next) + .Set(x => x.HeartbeatAtUtc, now); + + if (next == JobState.Running) + { + update = update.Set(x => x.StartedAtUtc, now); + } + + if (next is JobState.Succeeded or JobState.Failed or JobState.Cancelled) + { + update = update.Set(x => x.CompletedAtUtc, now); + } + + var result = await _collections.Jobs.UpdateOneAsync( + Builders.Filter.And( + Builders.Filter.Eq(x => x.Id, jobId), + Builders.Filter.Eq(x => x.State, expected)), + update, + cancellationToken: cancellationToken).ConfigureAwait(false); + + return result.ModifiedCount == 1; + } + + public async Task GetAsync(string jobId, CancellationToken cancellationToken) + { + ArgumentException.ThrowIfNullOrWhiteSpace(jobId); + return await _collections.Jobs + .Find(x => x.Id == jobId) + .FirstOrDefaultAsync(cancellationToken) + .ConfigureAwait(false); + } + + public Task> ListStaleAsync(TimeSpan heartbeatThreshold, CancellationToken cancellationToken) + { + if (heartbeatThreshold <= TimeSpan.Zero) + { + throw new ArgumentOutOfRangeException(nameof(heartbeatThreshold)); + } + + var cutoff = _timeProvider.GetUtcNow().UtcDateTime - heartbeatThreshold; + var filter = Builders.Filter.And( + Builders.Filter.Eq(x => x.State, JobState.Running), + Builders.Filter.Lt(x => x.HeartbeatAtUtc, cutoff)); + + return _collections.Jobs.Find(filter).ToListAsync(cancellationToken); + } +} diff --git a/src/StellaOps.Scanner.Storage/Repositories/LayerRepository.cs b/src/StellaOps.Scanner.Storage/Repositories/LayerRepository.cs new file mode 100644 index 00000000..f8ccb5f8 --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Repositories/LayerRepository.cs @@ -0,0 +1,34 @@ +using MongoDB.Driver; +using StellaOps.Scanner.Storage.Catalog; +using StellaOps.Scanner.Storage.Mongo; + +namespace StellaOps.Scanner.Storage.Repositories; + +public sealed class LayerRepository +{ + private readonly MongoCollectionProvider _collections; + + public LayerRepository(MongoCollectionProvider collections) + { + _collections = collections ?? throw new ArgumentNullException(nameof(collections)); + } + + public async Task UpsertAsync(LayerDocument document, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(document); + document.LastSeenAtUtc = DateTime.UtcNow; + var options = new ReplaceOptions { IsUpsert = true }; + await _collections.Layers + .ReplaceOneAsync(x => x.LayerDigest == document.LayerDigest, document, options, cancellationToken) + .ConfigureAwait(false); + } + + public async Task GetAsync(string layerDigest, CancellationToken cancellationToken) + { + ArgumentException.ThrowIfNullOrWhiteSpace(layerDigest); + return await _collections.Layers + .Find(x => x.LayerDigest == layerDigest) + .FirstOrDefaultAsync(cancellationToken) + .ConfigureAwait(false); + } +} diff --git a/src/StellaOps.Scanner.Storage/Repositories/LifecycleRuleRepository.cs b/src/StellaOps.Scanner.Storage/Repositories/LifecycleRuleRepository.cs new file mode 100644 index 00000000..06dde535 --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Repositories/LifecycleRuleRepository.cs @@ -0,0 +1,33 @@ +using MongoDB.Driver; +using StellaOps.Scanner.Storage.Catalog; +using StellaOps.Scanner.Storage.Mongo; + +namespace StellaOps.Scanner.Storage.Repositories; + +public sealed class LifecycleRuleRepository +{ + private readonly MongoCollectionProvider _collections; + + public LifecycleRuleRepository(MongoCollectionProvider collections) + { + _collections = collections ?? throw new ArgumentNullException(nameof(collections)); + } + + public async Task UpsertAsync(LifecycleRuleDocument document, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(document); + document.CreatedAtUtc = document.CreatedAtUtc == default ? DateTime.UtcNow : document.CreatedAtUtc; + var options = new ReplaceOptions { IsUpsert = true }; + await _collections.LifecycleRules + .ReplaceOneAsync(x => x.Id == document.Id, document, options, cancellationToken) + .ConfigureAwait(false); + } + + public Task> ListExpiredAsync(DateTime utcNow, CancellationToken cancellationToken) + { + var filter = Builders.Filter.Lt(x => x.ExpiresAtUtc, utcNow); + return _collections.LifecycleRules + .Find(filter) + .ToListAsync(cancellationToken); + } +} diff --git a/src/StellaOps.Scanner.Storage/Repositories/LinkRepository.cs b/src/StellaOps.Scanner.Storage/Repositories/LinkRepository.cs new file mode 100644 index 00000000..39c5ee0f --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Repositories/LinkRepository.cs @@ -0,0 +1,32 @@ +using MongoDB.Driver; +using StellaOps.Scanner.Storage.Catalog; +using StellaOps.Scanner.Storage.Mongo; + +namespace StellaOps.Scanner.Storage.Repositories; + +public sealed class LinkRepository +{ + private readonly MongoCollectionProvider _collections; + + public LinkRepository(MongoCollectionProvider collections) + { + _collections = collections ?? throw new ArgumentNullException(nameof(collections)); + } + + public async Task UpsertAsync(LinkDocument document, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(document); + var options = new ReplaceOptions { IsUpsert = true }; + await _collections.Links + .ReplaceOneAsync(x => x.Id == document.Id, document, options, cancellationToken) + .ConfigureAwait(false); + } + + public Task> ListBySourceAsync(LinkSourceType type, string digest, CancellationToken cancellationToken) + { + ArgumentException.ThrowIfNullOrWhiteSpace(digest); + return _collections.Links + .Find(x => x.FromType == type && x.FromDigest == digest) + .ToListAsync(cancellationToken); + } +} diff --git a/src/StellaOps.Scanner.Storage/ScannerStorageDefaults.cs b/src/StellaOps.Scanner.Storage/ScannerStorageDefaults.cs new file mode 100644 index 00000000..c089d5d3 --- /dev/null +++ b/src/StellaOps.Scanner.Storage/ScannerStorageDefaults.cs @@ -0,0 +1,27 @@ +namespace StellaOps.Scanner.Storage; + +public static class ScannerStorageDefaults +{ + public const string DefaultDatabaseName = "scanner"; + public const string DefaultBucketName = "stellaops"; + public const string DefaultRootPrefix = "scanner"; + + public static class Collections + { + public const string Artifacts = "artifacts"; + public const string Images = "images"; + public const string Layers = "layers"; + public const string Links = "links"; + public const string Jobs = "jobs"; + public const string LifecycleRules = "lifecycle_rules"; + public const string Migrations = "schema_migrations"; + } + + public static class ObjectPrefixes + { + public const string Layers = "layers"; + public const string Images = "images"; + public const string Indexes = "indexes"; + public const string Attestations = "attest"; + } +} diff --git a/src/StellaOps.Scanner.Storage/ScannerStorageOptions.cs b/src/StellaOps.Scanner.Storage/ScannerStorageOptions.cs new file mode 100644 index 00000000..e0aafc79 --- /dev/null +++ b/src/StellaOps.Scanner.Storage/ScannerStorageOptions.cs @@ -0,0 +1,124 @@ +using MongoDB.Driver; + +namespace StellaOps.Scanner.Storage; + +public sealed class ScannerStorageOptions +{ + public MongoOptions Mongo { get; set; } = new(); + + public ObjectStoreOptions ObjectStore { get; set; } = new(); + + public DualWriteOptions DualWrite { get; set; } = new(); + + public void EnsureValid() + { + Mongo.EnsureValid(); + ObjectStore.EnsureValid(); + DualWrite.EnsureValid(); + } +} + +public sealed class MongoOptions +{ + public string ConnectionString { get; set; } = string.Empty; + + public string? DatabaseName { get; set; } + = null; + + public TimeSpan CommandTimeout { get; set; } + = TimeSpan.FromSeconds(30); + + public bool UseMajorityReadConcern { get; set; } + = true; + + public bool UseMajorityWriteConcern { get; set; } + = true; + + public string ResolveDatabaseName() + { + if (!string.IsNullOrWhiteSpace(DatabaseName)) + { + return DatabaseName.Trim(); + } + + if (!string.IsNullOrWhiteSpace(ConnectionString)) + { + var url = MongoUrl.Create(ConnectionString); + if (!string.IsNullOrWhiteSpace(url.DatabaseName)) + { + return url.DatabaseName; + } + } + + return ScannerStorageDefaults.DefaultDatabaseName; + } + + public void EnsureValid() + { + if (string.IsNullOrWhiteSpace(ConnectionString)) + { + throw new InvalidOperationException("Scanner storage Mongo connection string is not configured."); + } + + if (CommandTimeout <= TimeSpan.Zero) + { + throw new InvalidOperationException("Scanner storage Mongo command timeout must be positive."); + } + + _ = ResolveDatabaseName(); + } +} + +public sealed class ObjectStoreOptions +{ + public string Region { get; set; } = "us-east-1"; + + public string? ServiceUrl { get; set; } + = null; + + public string BucketName { get; set; } = ScannerStorageDefaults.DefaultBucketName; + + public string RootPrefix { get; set; } = ScannerStorageDefaults.DefaultRootPrefix; + + public bool ForcePathStyle { get; set; } = true; + + public bool EnableObjectLock { get; set; } = false; + + public TimeSpan? ComplianceRetention { get; set; } + = TimeSpan.FromDays(90); + + public void EnsureValid() + { + if (string.IsNullOrWhiteSpace(BucketName)) + { + throw new InvalidOperationException("Scanner storage bucket name cannot be empty."); + } + + if (string.IsNullOrWhiteSpace(RootPrefix)) + { + throw new InvalidOperationException("Scanner storage root prefix cannot be empty."); + } + + if (ComplianceRetention is { } retention && retention <= TimeSpan.Zero) + { + throw new InvalidOperationException("Compliance retention must be positive when specified."); + } + } +} + +public sealed class DualWriteOptions +{ + public bool Enabled { get; set; } + = false; + + public string? MirrorBucket { get; set; } + = null; + + public void EnsureValid() + { + if (Enabled && string.IsNullOrWhiteSpace(MirrorBucket)) + { + throw new InvalidOperationException("Dual-write mirror bucket must be configured when enabled."); + } + } +} diff --git a/src/StellaOps.Scanner.Storage/Services/ArtifactStorageService.cs b/src/StellaOps.Scanner.Storage/Services/ArtifactStorageService.cs new file mode 100644 index 00000000..f2792d2d --- /dev/null +++ b/src/StellaOps.Scanner.Storage/Services/ArtifactStorageService.cs @@ -0,0 +1,177 @@ +using System.Buffers; +using System.Security.Cryptography; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using StellaOps.Scanner.Storage.Catalog; +using StellaOps.Scanner.Storage.ObjectStore; +using StellaOps.Scanner.Storage.Repositories; + +namespace StellaOps.Scanner.Storage.Services; + +public sealed class ArtifactStorageService +{ + private readonly ArtifactRepository _artifactRepository; + private readonly LifecycleRuleRepository _lifecycleRuleRepository; + private readonly IArtifactObjectStore _objectStore; + private readonly ScannerStorageOptions _options; + private readonly ILogger _logger; + + public ArtifactStorageService( + ArtifactRepository artifactRepository, + LifecycleRuleRepository lifecycleRuleRepository, + IArtifactObjectStore objectStore, + IOptions options, + ILogger logger) + { + _artifactRepository = artifactRepository ?? throw new ArgumentNullException(nameof(artifactRepository)); + _lifecycleRuleRepository = lifecycleRuleRepository ?? throw new ArgumentNullException(nameof(lifecycleRuleRepository)); + _objectStore = objectStore ?? throw new ArgumentNullException(nameof(objectStore)); + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + _options = (options ?? throw new ArgumentNullException(nameof(options))).Value; + } + + public async Task StoreArtifactAsync( + ArtifactDocumentType type, + ArtifactDocumentFormat format, + string mediaType, + Stream content, + bool immutable, + string ttlClass, + DateTime? expiresAtUtc, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(content); + ArgumentException.ThrowIfNullOrWhiteSpace(mediaType); + + var (buffer, size, digestHex) = await BufferAndHashAsync(content, cancellationToken).ConfigureAwait(false); + try + { + var normalizedDigest = $"sha256:{digestHex}"; + var artifactId = CatalogIdFactory.CreateArtifactId(type, normalizedDigest); + var key = BuildObjectKey(type, format, normalizedDigest); + var descriptor = new ArtifactObjectDescriptor( + _options.ObjectStore.BucketName, + key, + immutable, + _options.ObjectStore.ComplianceRetention); + + buffer.Position = 0; + await _objectStore.PutAsync(descriptor, buffer, cancellationToken).ConfigureAwait(false); + + if (_options.DualWrite.Enabled) + { + buffer.Position = 0; + var mirrorDescriptor = descriptor with { Bucket = _options.DualWrite.MirrorBucket! }; + await _objectStore.PutAsync(mirrorDescriptor, buffer, cancellationToken).ConfigureAwait(false); + } + + var document = new ArtifactDocument + { + Id = artifactId, + Type = type, + Format = format, + MediaType = mediaType, + BytesSha256 = normalizedDigest, + SizeBytes = size, + Immutable = immutable, + RefCount = 1, + CreatedAtUtc = DateTime.UtcNow, + UpdatedAtUtc = DateTime.UtcNow, + TtlClass = ttlClass, + }; + + await _artifactRepository.UpsertAsync(document, cancellationToken).ConfigureAwait(false); + + if (expiresAtUtc.HasValue) + { + var lifecycle = new LifecycleRuleDocument + { + Id = CatalogIdFactory.CreateLifecycleRuleId(document.Id, ttlClass), + ArtifactId = document.Id, + Class = ttlClass, + ExpiresAtUtc = expiresAtUtc, + CreatedAtUtc = DateTime.UtcNow, + }; + + await _lifecycleRuleRepository.UpsertAsync(lifecycle, cancellationToken).ConfigureAwait(false); + } + + _logger.LogInformation("Stored scanner artifact {ArtifactId} ({SizeBytes} bytes, digest {Digest})", document.Id, size, normalizedDigest); + return document; + } + finally + { + await buffer.DisposeAsync().ConfigureAwait(false); + } + } + + private static async Task<(MemoryStream Buffer, long Size, string DigestHex)> BufferAndHashAsync(Stream content, CancellationToken cancellationToken) + { + var bufferStream = new MemoryStream(); + var hasher = IncrementalHash.CreateHash(HashAlgorithmName.SHA256); + var rented = ArrayPool.Shared.Rent(81920); + long total = 0; + + try + { + int read; + while ((read = await content.ReadAsync(rented.AsMemory(0, rented.Length), cancellationToken).ConfigureAwait(false)) > 0) + { + hasher.AppendData(rented, 0, read); + await bufferStream.WriteAsync(rented.AsMemory(0, read), cancellationToken).ConfigureAwait(false); + total += read; + } + } + finally + { + ArrayPool.Shared.Return(rented); + } + + bufferStream.Position = 0; + var digest = hasher.GetCurrentHash(); + var digestHex = Convert.ToHexString(digest).ToLowerInvariant(); + return (bufferStream, total, digestHex); + } + + private string BuildObjectKey(ArtifactDocumentType type, ArtifactDocumentFormat format, string digest) + { + var normalizedDigest = digest.Split(':', 2, StringSplitOptions.TrimEntries)[^1]; + var prefix = type switch + { + ArtifactDocumentType.LayerBom => ScannerStorageDefaults.ObjectPrefixes.Layers, + ArtifactDocumentType.ImageBom => ScannerStorageDefaults.ObjectPrefixes.Images, + ArtifactDocumentType.Diff => "diffs", + ArtifactDocumentType.Index => ScannerStorageDefaults.ObjectPrefixes.Indexes, + ArtifactDocumentType.Attestation => ScannerStorageDefaults.ObjectPrefixes.Attestations, + _ => ScannerStorageDefaults.ObjectPrefixes.Images, + }; + + var extension = format switch + { + ArtifactDocumentFormat.CycloneDxJson => "sbom.cdx.json", + ArtifactDocumentFormat.CycloneDxProtobuf => "sbom.cdx.pb", + ArtifactDocumentFormat.SpdxJson => "sbom.spdx.json", + ArtifactDocumentFormat.BomIndex => "bom-index.bin", + ArtifactDocumentFormat.DsseJson => "artifact.dsse.json", + _ => "artifact.bin", + }; + + var rootPrefix = _options.ObjectStore.RootPrefix; + if (string.IsNullOrWhiteSpace(rootPrefix)) + { + return $"{prefix}/{normalizedDigest}/{extension}"; + } + + return $"{TrimTrailingSlash(rootPrefix)}/{prefix}/{normalizedDigest}/{extension}"; + } + + private static string TrimTrailingSlash(string prefix) + { + if (string.IsNullOrWhiteSpace(prefix)) + { + return string.Empty; + } + + return prefix.TrimEnd('/'); + } +} diff --git a/src/StellaOps.Scanner.Storage/StellaOps.Scanner.Storage.csproj b/src/StellaOps.Scanner.Storage/StellaOps.Scanner.Storage.csproj new file mode 100644 index 00000000..4d77df47 --- /dev/null +++ b/src/StellaOps.Scanner.Storage/StellaOps.Scanner.Storage.csproj @@ -0,0 +1,18 @@ + + + net10.0 + preview + enable + enable + true + + + + + + + + + + + diff --git a/src/StellaOps.Scanner.Storage/TASKS.md b/src/StellaOps.Scanner.Storage/TASKS.md new file mode 100644 index 00000000..20d9fb65 --- /dev/null +++ b/src/StellaOps.Scanner.Storage/TASKS.md @@ -0,0 +1,7 @@ +# Scanner Storage Task Board + +| ID | Status | Owner(s) | Depends on | Description | Exit Criteria | +|----|--------|----------|------------|-------------|---------------| +| SCANNER-STORAGE-09-301 | DONE (2025-10-18) | Scanner Storage Guild | SCANNER-CORE-09-501 | Mongo catalog schemas/indexes for images, layers, artifacts, jobs, lifecycle rules plus migrations. | Collections created via bootstrapper; migrations recorded; indexes enforce uniqueness + TTL; majority read/write configured. | +| SCANNER-STORAGE-09-302 | DONE (2025-10-18) | Scanner Storage Guild | SCANNER-STORAGE-09-301 | MinIO layout, immutability policies, client abstraction, and configuration binding. | S3 client abstraction configurable via options; bucket/prefix defaults documented; immutability flags enforced with tests; config binding validated. | +| SCANNER-STORAGE-09-303 | DONE (2025-10-18) | Scanner Storage Guild | SCANNER-STORAGE-09-301, SCANNER-STORAGE-09-302 | Repositories/services with dual-write feature flag, deterministic digests, TTL enforcement tests. | Dual-write service writes metadata + objects atomically; digest determinism covered by tests; TTL enforcement fixture passing. | diff --git a/src/StellaOps.Scanner.WebService.Tests/AuthorizationTests.cs b/src/StellaOps.Scanner.WebService.Tests/AuthorizationTests.cs new file mode 100644 index 00000000..e5559cd4 --- /dev/null +++ b/src/StellaOps.Scanner.WebService.Tests/AuthorizationTests.cs @@ -0,0 +1,25 @@ +using System.Net; + +namespace StellaOps.Scanner.WebService.Tests; + +public sealed class AuthorizationTests +{ + [Fact] + public async Task ApiRoutesRequireAuthenticationWhenAuthorityEnabled() + { + using var factory = new ScannerApplicationFactory(configuration => + { + configuration["scanner:authority:enabled"] = "true"; + configuration["scanner:authority:allowAnonymousFallback"] = "false"; + configuration["scanner:authority:issuer"] = "https://authority.local"; + configuration["scanner:authority:audiences:0"] = "scanner-api"; + configuration["scanner:authority:clientId"] = "scanner-web"; + configuration["scanner:authority:clientSecret"] = "secret"; + }); + + using var client = factory.CreateClient(); + var response = await client.GetAsync("/api/v1/__auth-probe"); + + Assert.Equal(HttpStatusCode.Unauthorized, response.StatusCode); + } +} diff --git a/src/StellaOps.Scanner.WebService.Tests/HealthEndpointsTests.cs b/src/StellaOps.Scanner.WebService.Tests/HealthEndpointsTests.cs new file mode 100644 index 00000000..7b5d7d69 --- /dev/null +++ b/src/StellaOps.Scanner.WebService.Tests/HealthEndpointsTests.cs @@ -0,0 +1,49 @@ +using System.Net.Http.Json; + +namespace StellaOps.Scanner.WebService.Tests; + +public sealed class HealthEndpointsTests +{ + [Fact] + public async Task HealthAndReadyEndpointsRespond() + { + using var factory = new ScannerApplicationFactory(); + using var client = factory.CreateClient(); + + var healthResponse = await client.GetAsync("/healthz"); + Assert.True(healthResponse.IsSuccessStatusCode, $"Expected 200 from /healthz, received {(int)healthResponse.StatusCode}."); + + var readyResponse = await client.GetAsync("/readyz"); + Assert.True(readyResponse.IsSuccessStatusCode, $"Expected 200 from /readyz, received {(int)readyResponse.StatusCode}."); + + var healthDocument = await healthResponse.Content.ReadFromJsonAsync(); + Assert.NotNull(healthDocument); + Assert.Equal("healthy", healthDocument!.Status); + Assert.True(healthDocument.UptimeSeconds >= 0); + Assert.NotNull(healthDocument.Telemetry); + + var readyDocument = await readyResponse.Content.ReadFromJsonAsync(); + Assert.NotNull(readyDocument); + Assert.Equal("ready", readyDocument!.Status); + Assert.Null(readyDocument.Error); + } + + private sealed record HealthDocument( + string Status, + DateTimeOffset StartedAt, + DateTimeOffset CapturedAt, + double UptimeSeconds, + TelemetryDocument Telemetry); + + private sealed record TelemetryDocument( + bool Enabled, + bool Logging, + bool Metrics, + bool Tracing); + + private sealed record ReadyDocument( + string Status, + DateTimeOffset CheckedAt, + double? LatencyMs, + string? Error); +} diff --git a/src/StellaOps.Scanner.WebService.Tests/ScannerApplicationFactory.cs b/src/StellaOps.Scanner.WebService.Tests/ScannerApplicationFactory.cs new file mode 100644 index 00000000..d8100dd9 --- /dev/null +++ b/src/StellaOps.Scanner.WebService.Tests/ScannerApplicationFactory.cs @@ -0,0 +1,162 @@ +using System.Collections.Generic; +using Microsoft.AspNetCore.Hosting; +using Microsoft.AspNetCore.Mvc.Testing; +using Microsoft.AspNetCore.TestHost; +using Microsoft.Extensions.Configuration; +using Microsoft.Extensions.DependencyInjection; +using Mongo2Go; + +namespace StellaOps.Scanner.WebService.Tests; + +internal sealed class ScannerApplicationFactory : WebApplicationFactory +{ + private readonly MongoDbRunner mongoRunner; + private readonly Dictionary configuration = new() + { + ["scanner:storage:driver"] = "mongo", + ["scanner:storage:dsn"] = string.Empty, + ["scanner:queue:driver"] = "redis", + ["scanner:queue:dsn"] = "redis://localhost:6379", + ["scanner:artifactStore:driver"] = "minio", + ["scanner:artifactStore:endpoint"] = "https://minio.local", + ["scanner:artifactStore:accessKey"] = "test-access", + ["scanner:artifactStore:secretKey"] = "test-secret", + ["scanner:artifactStore:bucket"] = "scanner-artifacts", + ["scanner:telemetry:minimumLogLevel"] = "Information", + ["scanner:telemetry:enableRequestLogging"] = "false", + ["scanner:features:enableSignedReports"] = "false" + }; + + private readonly Action>? configureConfiguration; + private readonly Action? configureServices; + + public ScannerApplicationFactory( + Action>? configureConfiguration = null, + Action? configureServices = null) + { + EnsureMongo2GoEnvironment(); + mongoRunner = MongoDbRunner.Start(singleNodeReplSet: true); + configuration["scanner:storage:dsn"] = mongoRunner.ConnectionString; + this.configureConfiguration = configureConfiguration; + this.configureServices = configureServices; + } + + protected override void ConfigureWebHost(IWebHostBuilder builder) + { + configureConfiguration?.Invoke(configuration); + + builder.UseEnvironment("Testing"); + + Environment.SetEnvironmentVariable("SCANNER__AUTHORITY__ENABLED", null); + Environment.SetEnvironmentVariable("SCANNER__AUTHORITY__ALLOWANONYMOUSFALLBACK", null); + Environment.SetEnvironmentVariable("SCANNER__AUTHORITY__ISSUER", null); + Environment.SetEnvironmentVariable("SCANNER__AUTHORITY__AUDIENCES__0", null); + Environment.SetEnvironmentVariable("SCANNER__AUTHORITY__CLIENTID", null); + Environment.SetEnvironmentVariable("SCANNER__AUTHORITY__CLIENTSECRET", null); + Environment.SetEnvironmentVariable("SCANNER__STORAGE__DSN", configuration["scanner:storage:dsn"]); + Environment.SetEnvironmentVariable("SCANNER__QUEUE__DSN", configuration["scanner:queue:dsn"]); + Environment.SetEnvironmentVariable("SCANNER__ARTIFACTSTORE__ENDPOINT", configuration["scanner:artifactStore:endpoint"]); + Environment.SetEnvironmentVariable("SCANNER__ARTIFACTSTORE__ACCESSKEY", configuration["scanner:artifactStore:accessKey"]); + Environment.SetEnvironmentVariable("SCANNER__ARTIFACTSTORE__SECRETKEY", configuration["scanner:artifactStore:secretKey"]); + + if (configuration.TryGetValue("scanner:authority:enabled", out var authorityEnabled)) + { + Environment.SetEnvironmentVariable("SCANNER__AUTHORITY__ENABLED", authorityEnabled); + } + + if (configuration.TryGetValue("scanner:authority:allowAnonymousFallback", out var allowAnonymous)) + { + Environment.SetEnvironmentVariable("SCANNER__AUTHORITY__ALLOWANONYMOUSFALLBACK", allowAnonymous); + } + + if (configuration.TryGetValue("scanner:authority:issuer", out var authorityIssuer)) + { + Environment.SetEnvironmentVariable("SCANNER__AUTHORITY__ISSUER", authorityIssuer); + } + + if (configuration.TryGetValue("scanner:authority:audiences:0", out var primaryAudience)) + { + Environment.SetEnvironmentVariable("SCANNER__AUTHORITY__AUDIENCES__0", primaryAudience); + } + + if (configuration.TryGetValue("scanner:authority:clientId", out var clientId)) + { + Environment.SetEnvironmentVariable("SCANNER__AUTHORITY__CLIENTID", clientId); + } + + if (configuration.TryGetValue("scanner:authority:clientSecret", out var clientSecret)) + { + Environment.SetEnvironmentVariable("SCANNER__AUTHORITY__CLIENTSECRET", clientSecret); + } + + builder.ConfigureAppConfiguration((_, configBuilder) => + { + configBuilder.AddInMemoryCollection(configuration); + }); + + builder.ConfigureTestServices(services => + { + configureServices?.Invoke(services); + }); + } + + protected override void Dispose(bool disposing) + { + base.Dispose(disposing); + + if (disposing) + { + mongoRunner.Dispose(); + } + } + + private static void EnsureMongo2GoEnvironment() + { + if (!OperatingSystem.IsLinux()) + { + return; + } + + var libraryPath = ResolveOpenSslLibraryPath(); + if (libraryPath is null) + { + return; + } + + var existing = Environment.GetEnvironmentVariable("LD_LIBRARY_PATH"); + if (string.IsNullOrEmpty(existing)) + { + Environment.SetEnvironmentVariable("LD_LIBRARY_PATH", libraryPath); + return; + } + + var segments = existing.Split(':', StringSplitOptions.RemoveEmptyEntries | StringSplitOptions.TrimEntries); + if (Array.IndexOf(segments, libraryPath) < 0) + { + Environment.SetEnvironmentVariable("LD_LIBRARY_PATH", string.Join(':', new[] { libraryPath }.Concat(segments))); + } + } + + private static string? ResolveOpenSslLibraryPath() + { + var current = AppContext.BaseDirectory; + while (!string.IsNullOrEmpty(current)) + { + var candidate = Path.Combine(current, "tools", "openssl", "linux-x64"); + if (Directory.Exists(candidate)) + { + return candidate; + } + + var parent = Directory.GetParent(current); + if (parent is null) + { + break; + } + + current = parent.FullName; + } + + return null; + } +} diff --git a/src/StellaOps.Scanner.WebService.Tests/ScansEndpointsTests.cs b/src/StellaOps.Scanner.WebService.Tests/ScansEndpointsTests.cs new file mode 100644 index 00000000..756f6b17 --- /dev/null +++ b/src/StellaOps.Scanner.WebService.Tests/ScansEndpointsTests.cs @@ -0,0 +1,260 @@ +using System.Collections.Generic; +using System.IO; +using System.Net; +using System.Net.Http.Json; +using System.Text.Json; +using System.Threading.Tasks; +using Microsoft.AspNetCore.Http; +using Microsoft.AspNetCore.Mvc.Testing; +using Microsoft.AspNetCore.TestHost; +using Microsoft.Extensions.DependencyInjection; +using StellaOps.Scanner.WebService.Contracts; +using StellaOps.Scanner.WebService.Domain; +using StellaOps.Scanner.WebService.Services; + +namespace StellaOps.Scanner.WebService.Tests; + +public sealed class ScansEndpointsTests +{ + [Fact] + public async Task SubmitScanReturnsAcceptedAndStatusRetrievable() + { + using var factory = new ScannerApplicationFactory(); + using var client = factory.CreateClient(); + + var request = new ScanSubmitRequest + { + Image = new ScanImageDescriptor { Reference = "ghcr.io/demo/app:1.0.0" }, + Force = false + }; + + var response = await client.PostAsJsonAsync("/api/v1/scans", request); + Assert.Equal(HttpStatusCode.Accepted, response.StatusCode); + + var payload = await response.Content.ReadFromJsonAsync(); + Assert.NotNull(payload); + Assert.False(string.IsNullOrWhiteSpace(payload!.ScanId)); + Assert.Equal("Pending", payload.Status); + Assert.True(payload.Created); + Assert.False(string.IsNullOrWhiteSpace(payload.Location)); + + var statusResponse = await client.GetAsync(payload.Location); + Assert.Equal(HttpStatusCode.OK, statusResponse.StatusCode); + + var status = await statusResponse.Content.ReadFromJsonAsync(); + Assert.NotNull(status); + Assert.Equal(payload.ScanId, status!.ScanId); + Assert.Equal("Pending", status.Status); + Assert.Equal("ghcr.io/demo/app:1.0.0", status.Image.Reference); + } + + [Fact] + public async Task SubmitScanIsDeterministicForIdenticalPayloads() + { + using var factory = new ScannerApplicationFactory(); + using var client = factory.CreateClient(); + + var request = new ScanSubmitRequest + { + Image = new ScanImageDescriptor { Reference = "registry.example.com/acme/app:latest" }, + Force = false, + ClientRequestId = "client-123", + Metadata = new Dictionary { ["origin"] = "unit-test" } + }; + + var first = await client.PostAsJsonAsync("/api/v1/scans", request); + var firstPayload = await first.Content.ReadFromJsonAsync(); + + var second = await client.PostAsJsonAsync("/api/v1/scans", request); + var secondPayload = await second.Content.ReadFromJsonAsync(); + + Assert.NotNull(firstPayload); + Assert.NotNull(secondPayload); + Assert.Equal(firstPayload!.ScanId, secondPayload!.ScanId); + Assert.True(firstPayload.Created); + Assert.False(secondPayload.Created); + } + + [Fact] + public async Task SubmitScanValidatesImageDescriptor() + { + using var factory = new ScannerApplicationFactory(); + using var client = factory.CreateClient(); + + var request = new + { + image = new { reference = "", digest = "" } + }; + + var response = await client.PostAsJsonAsync("/api/v1/scans", request); + Assert.Equal(HttpStatusCode.BadRequest, response.StatusCode); + } + + [Fact] + public async Task SubmitScanPropagatesRequestAbortedToken() + { + RecordingCoordinator coordinator = null!; + using var factory = new ScannerApplicationFactory(configuration => + { + configuration["scanner:authority:enabled"] = "false"; + }, services => + { + services.AddSingleton(sp => + { + coordinator = new RecordingCoordinator( + sp.GetRequiredService(), + sp.GetRequiredService(), + sp.GetRequiredService()); + return coordinator; + }); + }); + + using var client = factory.CreateClient(new WebApplicationFactoryClientOptions + { + AllowAutoRedirect = false + }); + + var cts = new CancellationTokenSource(); + var request = new ScanSubmitRequest + { + Image = new ScanImageDescriptor { Reference = "example.com/demo:1.0" } + }; + + var response = await client.PostAsJsonAsync("/api/v1/scans", request, cts.Token); + Assert.Equal(HttpStatusCode.Accepted, response.StatusCode); + + Assert.NotNull(coordinator); + Assert.True(coordinator.TokenMatched); + Assert.True(coordinator.LastToken.CanBeCanceled); + } + + private sealed class RecordingCoordinator : IScanCoordinator + { + private readonly IHttpContextAccessor accessor; + private readonly InMemoryScanCoordinator inner; + + public RecordingCoordinator(IHttpContextAccessor accessor, TimeProvider timeProvider, IScanProgressPublisher publisher) + { + this.accessor = accessor; + inner = new InMemoryScanCoordinator(timeProvider, publisher); + } + + public CancellationToken LastToken { get; private set; } + + public bool TokenMatched { get; private set; } + + public async ValueTask SubmitAsync(ScanSubmission submission, CancellationToken cancellationToken) + { + LastToken = cancellationToken; + TokenMatched = accessor.HttpContext?.RequestAborted.Equals(cancellationToken) ?? false; + return await inner.SubmitAsync(submission, cancellationToken); + } + + public ValueTask GetAsync(ScanId scanId, CancellationToken cancellationToken) + => inner.GetAsync(scanId, cancellationToken); + } + + [Fact] + public async Task ProgressStreamReturnsInitialPendingEvent() + { + using var factory = new ScannerApplicationFactory(); + using var client = factory.CreateClient(); + + var request = new ScanSubmitRequest + { + Image = new ScanImageDescriptor { Reference = "ghcr.io/demo/app:2.0.0" } + }; + + var submit = await client.PostAsJsonAsync("/api/v1/scans", request); + var submitPayload = await submit.Content.ReadFromJsonAsync(); + Assert.NotNull(submitPayload); + + var response = await client.GetAsync($"/api/v1/scans/{submitPayload!.ScanId}/events?format=jsonl", HttpCompletionOption.ResponseHeadersRead); + Assert.Equal(HttpStatusCode.OK, response.StatusCode); + Assert.Equal("application/x-ndjson", response.Content.Headers.ContentType?.MediaType); + + await using var stream = await response.Content.ReadAsStreamAsync(); + using var reader = new StreamReader(stream); + var line = await reader.ReadLineAsync(); + Assert.False(string.IsNullOrWhiteSpace(line)); + + var envelope = JsonSerializer.Deserialize(line!, SerializerOptions); + Assert.NotNull(envelope); + Assert.Equal(submitPayload.ScanId, envelope!.ScanId); + Assert.Equal("Pending", envelope.State); + Assert.Equal(1, envelope.Sequence); + Assert.NotEqual(default, envelope.Timestamp); + } + + [Fact] + public async Task ProgressStreamYieldsSubsequentEvents() + { + using var factory = new ScannerApplicationFactory(); + using var client = factory.CreateClient(); + + var request = new ScanSubmitRequest + { + Image = new ScanImageDescriptor { Reference = "registry.example.com/acme/app:stream" } + }; + + var submit = await client.PostAsJsonAsync("/api/v1/scans", request); + var submitPayload = await submit.Content.ReadFromJsonAsync(); + Assert.NotNull(submitPayload); + + var publisher = factory.Services.GetRequiredService(); + + var response = await client.GetAsync($"/api/v1/scans/{submitPayload!.ScanId}/events?format=jsonl", HttpCompletionOption.ResponseHeadersRead); + await using var stream = await response.Content.ReadAsStreamAsync(); + using var reader = new StreamReader(stream); + + var firstLine = await reader.ReadLineAsync(); + Assert.NotNull(firstLine); + var firstEnvelope = JsonSerializer.Deserialize(firstLine!, SerializerOptions); + Assert.NotNull(firstEnvelope); + Assert.Equal("Pending", firstEnvelope!.State); + + _ = Task.Run(async () => + { + await Task.Delay(50); + publisher.Publish(new ScanId(submitPayload.ScanId), "Running", "worker-started", new Dictionary + { + ["stage"] = "download" + }); + }); + + ProgressEnvelope? envelope = null; + string? line; + do + { + line = await reader.ReadLineAsync(); + if (line is null) + { + break; + } + + if (line.Length == 0) + { + continue; + } + + envelope = JsonSerializer.Deserialize(line, SerializerOptions); + } + while (envelope is not null && envelope.State == "Pending"); + + Assert.NotNull(envelope); + Assert.Equal("Running", envelope!.State); + Assert.True(envelope.Sequence >= 2); + Assert.Contains(envelope.Data.Keys, key => key == "stage"); + } + + private static readonly JsonSerializerOptions SerializerOptions = new(JsonSerializerDefaults.Web); + + private sealed record ProgressEnvelope( + string ScanId, + int Sequence, + string State, + string? Message, + DateTimeOffset Timestamp, + string CorrelationId, + Dictionary Data); +} diff --git a/src/StellaOps.Scanner.WebService.Tests/StellaOps.Scanner.WebService.Tests.csproj b/src/StellaOps.Scanner.WebService.Tests/StellaOps.Scanner.WebService.Tests.csproj new file mode 100644 index 00000000..35a72be6 --- /dev/null +++ b/src/StellaOps.Scanner.WebService.Tests/StellaOps.Scanner.WebService.Tests.csproj @@ -0,0 +1,12 @@ + + + net10.0 + enable + enable + false + StellaOps.Scanner.WebService.Tests + + + + + diff --git a/src/StellaOps.Scanner.WebService/Constants/ProblemTypes.cs b/src/StellaOps.Scanner.WebService/Constants/ProblemTypes.cs new file mode 100644 index 00000000..caaefcdd --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Constants/ProblemTypes.cs @@ -0,0 +1,9 @@ +namespace StellaOps.Scanner.WebService.Constants; + +internal static class ProblemTypes +{ + public const string Validation = "https://stellaops.org/problems/validation"; + public const string Conflict = "https://stellaops.org/problems/conflict"; + public const string NotFound = "https://stellaops.org/problems/not-found"; + public const string InternalError = "https://stellaops.org/problems/internal-error"; +} diff --git a/src/StellaOps.Scanner.WebService/Contracts/PolicyPreviewContracts.cs b/src/StellaOps.Scanner.WebService/Contracts/PolicyPreviewContracts.cs new file mode 100644 index 00000000..7c8fe368 --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Contracts/PolicyPreviewContracts.cs @@ -0,0 +1,164 @@ +using System; +using System.Collections.Generic; +using System.Text.Json.Serialization; + +namespace StellaOps.Scanner.WebService.Contracts; + +public sealed record PolicyPreviewRequestDto +{ + [JsonPropertyName("imageDigest")] + public string? ImageDigest { get; init; } + + [JsonPropertyName("findings")] + public IReadOnlyList? Findings { get; init; } + + [JsonPropertyName("baseline")] + public IReadOnlyList? Baseline { get; init; } + + [JsonPropertyName("policy")] + public PolicyPreviewPolicyDto? Policy { get; init; } +} + +public sealed record PolicyPreviewFindingDto +{ + [JsonPropertyName("id")] + public string? Id { get; init; } + + [JsonPropertyName("severity")] + public string? Severity { get; init; } + + [JsonPropertyName("environment")] + public string? Environment { get; init; } + + [JsonPropertyName("source")] + public string? Source { get; init; } + + [JsonPropertyName("vendor")] + public string? Vendor { get; init; } + + [JsonPropertyName("license")] + public string? License { get; init; } + + [JsonPropertyName("image")] + public string? Image { get; init; } + + [JsonPropertyName("repository")] + public string? Repository { get; init; } + + [JsonPropertyName("package")] + public string? Package { get; init; } + + [JsonPropertyName("purl")] + public string? Purl { get; init; } + + [JsonPropertyName("cve")] + public string? Cve { get; init; } + + [JsonPropertyName("path")] + public string? Path { get; init; } + + [JsonPropertyName("layerDigest")] + public string? LayerDigest { get; init; } + + [JsonPropertyName("tags")] + public IReadOnlyList? Tags { get; init; } +} + +public sealed record PolicyPreviewVerdictDto +{ + [JsonPropertyName("findingId")] + public string? FindingId { get; init; } + + [JsonPropertyName("status")] + public string? Status { get; init; } + + [JsonPropertyName("ruleName")] + public string? RuleName { get; init; } + + [JsonPropertyName("ruleAction")] + public string? RuleAction { get; init; } + + [JsonPropertyName("notes")] + public string? Notes { get; init; } + + [JsonPropertyName("score")] + public double? Score { get; init; } + + [JsonPropertyName("configVersion")] + public string? ConfigVersion { get; init; } + + [JsonPropertyName("inputs")] + public IReadOnlyDictionary? Inputs { get; init; } + + [JsonPropertyName("quietedBy")] + public string? QuietedBy { get; init; } + + [JsonPropertyName("quiet")] + public bool? Quiet { get; init; } +} + +public sealed record PolicyPreviewPolicyDto +{ + [JsonPropertyName("content")] + public string? Content { get; init; } + + [JsonPropertyName("format")] + public string? Format { get; init; } + + [JsonPropertyName("actor")] + public string? Actor { get; init; } + + [JsonPropertyName("description")] + public string? Description { get; init; } +} + +public sealed record PolicyPreviewResponseDto +{ + [JsonPropertyName("success")] + public bool Success { get; init; } + + [JsonPropertyName("policyDigest")] + public string? PolicyDigest { get; init; } + + [JsonPropertyName("revisionId")] + public string? RevisionId { get; init; } + + [JsonPropertyName("changed")] + public int Changed { get; init; } + + [JsonPropertyName("diffs")] + public IReadOnlyList Diffs { get; init; } = Array.Empty(); + + [JsonPropertyName("issues")] + public IReadOnlyList Issues { get; init; } = Array.Empty(); +} + +public sealed record PolicyPreviewDiffDto +{ + [JsonPropertyName("findingId")] + public string? FindingId { get; init; } + + [JsonPropertyName("baseline")] + public PolicyPreviewVerdictDto? Baseline { get; init; } + + [JsonPropertyName("projected")] + public PolicyPreviewVerdictDto? Projected { get; init; } + + [JsonPropertyName("changed")] + public bool Changed { get; init; } +} + +public sealed record PolicyPreviewIssueDto +{ + [JsonPropertyName("code")] + public string Code { get; init; } = string.Empty; + + [JsonPropertyName("message")] + public string Message { get; init; } = string.Empty; + + [JsonPropertyName("severity")] + public string Severity { get; init; } = string.Empty; + + [JsonPropertyName("path")] + public string Path { get; init; } = string.Empty; +} diff --git a/src/StellaOps.Scanner.WebService/Contracts/ScanStatusResponse.cs b/src/StellaOps.Scanner.WebService/Contracts/ScanStatusResponse.cs new file mode 100644 index 00000000..7e4bc7fd --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Contracts/ScanStatusResponse.cs @@ -0,0 +1,13 @@ +namespace StellaOps.Scanner.WebService.Contracts; + +public sealed record ScanStatusResponse( + string ScanId, + string Status, + ScanStatusTarget Image, + DateTimeOffset CreatedAt, + DateTimeOffset UpdatedAt, + string? FailureReason); + +public sealed record ScanStatusTarget( + string? Reference, + string? Digest); diff --git a/src/StellaOps.Scanner.WebService/Contracts/ScanSubmitRequest.cs b/src/StellaOps.Scanner.WebService/Contracts/ScanSubmitRequest.cs new file mode 100644 index 00000000..c802bea9 --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Contracts/ScanSubmitRequest.cs @@ -0,0 +1,21 @@ +using System.Collections.Generic; + +namespace StellaOps.Scanner.WebService.Contracts; + +public sealed record ScanSubmitRequest +{ + public required ScanImageDescriptor Image { get; init; } = new(); + + public bool Force { get; init; } + + public string? ClientRequestId { get; init; } + + public IDictionary Metadata { get; init; } = new Dictionary(StringComparer.OrdinalIgnoreCase); +} + +public sealed record ScanImageDescriptor +{ + public string? Reference { get; init; } + + public string? Digest { get; init; } +} diff --git a/src/StellaOps.Scanner.WebService/Contracts/ScanSubmitResponse.cs b/src/StellaOps.Scanner.WebService/Contracts/ScanSubmitResponse.cs new file mode 100644 index 00000000..d05737ca --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Contracts/ScanSubmitResponse.cs @@ -0,0 +1,7 @@ +namespace StellaOps.Scanner.WebService.Contracts; + +public sealed record ScanSubmitResponse( + string ScanId, + string Status, + string? Location, + bool Created); diff --git a/src/StellaOps.Scanner.WebService/Diagnostics/ServiceStatus.cs b/src/StellaOps.Scanner.WebService/Diagnostics/ServiceStatus.cs new file mode 100644 index 00000000..96332ae5 --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Diagnostics/ServiceStatus.cs @@ -0,0 +1,47 @@ +using System; + +namespace StellaOps.Scanner.WebService.Diagnostics; + +/// +/// Tracks runtime health snapshots for the Scanner WebService. +/// +public sealed class ServiceStatus +{ + private readonly TimeProvider timeProvider; + private readonly DateTimeOffset startedAt; + private ReadySnapshot readySnapshot; + + public ServiceStatus(TimeProvider timeProvider) + { + this.timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider)); + startedAt = timeProvider.GetUtcNow(); + readySnapshot = ReadySnapshot.CreateInitial(startedAt); + } + + public ServiceSnapshot CreateSnapshot() + { + var now = timeProvider.GetUtcNow(); + return new ServiceSnapshot(startedAt, now, readySnapshot); + } + + public void RecordReadyCheck(bool success, TimeSpan latency, string? error) + { + var now = timeProvider.GetUtcNow(); + readySnapshot = new ReadySnapshot(now, latency, success, success ? null : error); + } + + public readonly record struct ServiceSnapshot( + DateTimeOffset StartedAt, + DateTimeOffset CapturedAt, + ReadySnapshot Ready); + + public readonly record struct ReadySnapshot( + DateTimeOffset CheckedAt, + TimeSpan? Latency, + bool IsReady, + string? Error) + { + public static ReadySnapshot CreateInitial(DateTimeOffset timestamp) + => new ReadySnapshot(timestamp, null, true, null); + } +} diff --git a/src/StellaOps.Scanner.WebService/Domain/ScanId.cs b/src/StellaOps.Scanner.WebService/Domain/ScanId.cs new file mode 100644 index 00000000..a9d6afb3 --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Domain/ScanId.cs @@ -0,0 +1,18 @@ +namespace StellaOps.Scanner.WebService.Domain; + +public readonly record struct ScanId(string Value) +{ + public override string ToString() => Value; + + public static bool TryParse(string? value, out ScanId scanId) + { + if (!string.IsNullOrWhiteSpace(value)) + { + scanId = new ScanId(value.Trim()); + return true; + } + + scanId = default; + return false; + } +} diff --git a/src/StellaOps.Scanner.WebService/Domain/ScanProgressEvent.cs b/src/StellaOps.Scanner.WebService/Domain/ScanProgressEvent.cs new file mode 100644 index 00000000..cb6e369a --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Domain/ScanProgressEvent.cs @@ -0,0 +1,12 @@ +using System.Collections.Generic; + +namespace StellaOps.Scanner.WebService.Domain; + +public sealed record ScanProgressEvent( + ScanId ScanId, + int Sequence, + DateTimeOffset Timestamp, + string State, + string? Message, + string CorrelationId, + IReadOnlyDictionary Data); diff --git a/src/StellaOps.Scanner.WebService/Domain/ScanSnapshot.cs b/src/StellaOps.Scanner.WebService/Domain/ScanSnapshot.cs new file mode 100644 index 00000000..8a37a795 --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Domain/ScanSnapshot.cs @@ -0,0 +1,9 @@ +namespace StellaOps.Scanner.WebService.Domain; + +public sealed record ScanSnapshot( + ScanId ScanId, + ScanTarget Target, + ScanStatus Status, + DateTimeOffset CreatedAt, + DateTimeOffset UpdatedAt, + string? FailureReason); diff --git a/src/StellaOps.Scanner.WebService/Domain/ScanStatus.cs b/src/StellaOps.Scanner.WebService/Domain/ScanStatus.cs new file mode 100644 index 00000000..f3eac868 --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Domain/ScanStatus.cs @@ -0,0 +1,10 @@ +namespace StellaOps.Scanner.WebService.Domain; + +public enum ScanStatus +{ + Pending, + Running, + Succeeded, + Failed, + Cancelled +} diff --git a/src/StellaOps.Scanner.WebService/Domain/ScanSubmission.cs b/src/StellaOps.Scanner.WebService/Domain/ScanSubmission.cs new file mode 100644 index 00000000..241d7694 --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Domain/ScanSubmission.cs @@ -0,0 +1,13 @@ +using System.Collections.Generic; + +namespace StellaOps.Scanner.WebService.Domain; + +public sealed record ScanSubmission( + ScanTarget Target, + bool Force, + string? ClientRequestId, + IReadOnlyDictionary Metadata); + +public sealed record ScanSubmissionResult( + ScanSnapshot Snapshot, + bool Created); diff --git a/src/StellaOps.Scanner.WebService/Domain/ScanTarget.cs b/src/StellaOps.Scanner.WebService/Domain/ScanTarget.cs new file mode 100644 index 00000000..42bb1508 --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Domain/ScanTarget.cs @@ -0,0 +1,11 @@ +namespace StellaOps.Scanner.WebService.Domain; + +public sealed record ScanTarget(string? Reference, string? Digest) +{ + public ScanTarget Normalize() + { + var normalizedReference = string.IsNullOrWhiteSpace(Reference) ? null : Reference.Trim(); + var normalizedDigest = string.IsNullOrWhiteSpace(Digest) ? null : Digest.Trim().ToLowerInvariant(); + return new ScanTarget(normalizedReference, normalizedDigest); + } +} diff --git a/src/StellaOps.Scanner.WebService/Endpoints/HealthEndpoints.cs b/src/StellaOps.Scanner.WebService/Endpoints/HealthEndpoints.cs new file mode 100644 index 00000000..d75f236a --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Endpoints/HealthEndpoints.cs @@ -0,0 +1,112 @@ +using System.Diagnostics; +using System.Text; +using System.Text.Json; +using Microsoft.AspNetCore.Builder; +using Microsoft.AspNetCore.Http; +using Microsoft.AspNetCore.Routing; +using Microsoft.Extensions.Options; +using StellaOps.Scanner.WebService.Diagnostics; +using StellaOps.Scanner.WebService.Options; + +namespace StellaOps.Scanner.WebService.Endpoints; + +internal static class HealthEndpoints +{ + private static readonly JsonSerializerOptions JsonOptions = new(JsonSerializerDefaults.Web); + + public static void MapHealthEndpoints(this IEndpointRouteBuilder endpoints) + { + ArgumentNullException.ThrowIfNull(endpoints); + + var group = endpoints.MapGroup("/"); + group.MapGet("/healthz", HandleHealth) + .WithName("scanner.health") + .Produces(StatusCodes.Status200OK) + .AllowAnonymous(); + + group.MapGet("/readyz", HandleReady) + .WithName("scanner.ready") + .Produces(StatusCodes.Status200OK) + .AllowAnonymous(); + } + + private static IResult HandleHealth( + ServiceStatus status, + IOptions options, + HttpContext context) + { + ApplyNoCache(context.Response); + + var snapshot = status.CreateSnapshot(); + var uptimeSeconds = Math.Max((snapshot.CapturedAt - snapshot.StartedAt).TotalSeconds, 0d); + + var telemetry = new TelemetrySnapshot( + Enabled: options.Value.Telemetry.Enabled, + Logging: options.Value.Telemetry.EnableLogging, + Metrics: options.Value.Telemetry.EnableMetrics, + Tracing: options.Value.Telemetry.EnableTracing); + + var document = new HealthDocument( + Status: "healthy", + StartedAt: snapshot.StartedAt, + CapturedAt: snapshot.CapturedAt, + UptimeSeconds: uptimeSeconds, + Telemetry: telemetry); + + return Json(document, StatusCodes.Status200OK); + } + + private static async Task HandleReady( + ServiceStatus status, + HttpContext context, + CancellationToken cancellationToken) + { + ApplyNoCache(context.Response); + + await Task.CompletedTask; + + status.RecordReadyCheck(success: true, latency: TimeSpan.Zero, error: null); + var snapshot = status.CreateSnapshot(); + var ready = snapshot.Ready; + + var document = new ReadyDocument( + Status: ready.IsReady ? "ready" : "unready", + CheckedAt: ready.CheckedAt, + LatencyMs: ready.Latency?.TotalMilliseconds, + Error: ready.Error); + + return Json(document, StatusCodes.Status200OK); + } + + private static void ApplyNoCache(HttpResponse response) + { + response.Headers.CacheControl = "no-store, no-cache, max-age=0, must-revalidate"; + response.Headers.Pragma = "no-cache"; + response.Headers["Expires"] = "0"; + } + + private static IResult Json(T value, int statusCode) + { + var payload = JsonSerializer.Serialize(value, JsonOptions); + return Results.Content(payload, "application/json", Encoding.UTF8, statusCode); + } + + internal sealed record TelemetrySnapshot( + bool Enabled, + bool Logging, + bool Metrics, + bool Tracing); + + internal sealed record HealthDocument( + string Status, + DateTimeOffset StartedAt, + DateTimeOffset CapturedAt, + double UptimeSeconds, + TelemetrySnapshot Telemetry); + + internal sealed record ReadyDocument( + string Status, + DateTimeOffset CheckedAt, + double? LatencyMs, + string? Error); +} diff --git a/src/StellaOps.Scanner.WebService/Endpoints/ScanEndpoints.cs b/src/StellaOps.Scanner.WebService/Endpoints/ScanEndpoints.cs new file mode 100644 index 00000000..627e42c4 --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Endpoints/ScanEndpoints.cs @@ -0,0 +1,298 @@ +using System.Collections.Generic; +using System.IO.Pipelines; +using System.Runtime.CompilerServices; +using System.Text.Json; +using System.Text.Json.Serialization; +using System.Threading.Tasks; +using System.Text; +using Microsoft.AspNetCore.Http; +using Microsoft.AspNetCore.Routing; +using StellaOps.Scanner.WebService.Constants; +using StellaOps.Scanner.WebService.Contracts; +using StellaOps.Scanner.WebService.Domain; +using StellaOps.Scanner.WebService.Infrastructure; +using StellaOps.Scanner.WebService.Security; +using StellaOps.Scanner.WebService.Services; + +namespace StellaOps.Scanner.WebService.Endpoints; + +internal static class ScanEndpoints +{ + private static readonly JsonSerializerOptions SerializerOptions = new(JsonSerializerDefaults.Web) + { + Converters = { new JsonStringEnumConverter() } + }; + + public static void MapScanEndpoints(this RouteGroupBuilder apiGroup) + { + ArgumentNullException.ThrowIfNull(apiGroup); + + var scans = apiGroup.MapGroup("/scans"); + + scans.MapPost("/", HandleSubmitAsync) + .WithName("scanner.scans.submit") + .Produces(StatusCodes.Status202Accepted) + .Produces(StatusCodes.Status400BadRequest) + .Produces(StatusCodes.Status409Conflict) + .RequireAuthorization(ScannerPolicies.ScansEnqueue); + + scans.MapGet("/{scanId}", HandleStatusAsync) + .WithName("scanner.scans.status") + .Produces(StatusCodes.Status200OK) + .Produces(StatusCodes.Status404NotFound) + .RequireAuthorization(ScannerPolicies.ScansRead); + + scans.MapGet("/{scanId}/events", HandleProgressStreamAsync) + .WithName("scanner.scans.events") + .Produces(StatusCodes.Status200OK) + .Produces(StatusCodes.Status404NotFound) + .RequireAuthorization(ScannerPolicies.ScansRead); + } + + private static async Task HandleSubmitAsync( + ScanSubmitRequest request, + IScanCoordinator coordinator, + LinkGenerator links, + HttpContext context, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(request); + ArgumentNullException.ThrowIfNull(coordinator); + ArgumentNullException.ThrowIfNull(links); + + if (request.Image is null) + { + return ProblemResultFactory.Create( + context, + ProblemTypes.Validation, + "Invalid scan submission", + StatusCodes.Status400BadRequest, + detail: "Request image descriptor is required."); + } + + var reference = request.Image.Reference; + var digest = request.Image.Digest; + if (string.IsNullOrWhiteSpace(reference) && string.IsNullOrWhiteSpace(digest)) + { + return ProblemResultFactory.Create( + context, + ProblemTypes.Validation, + "Invalid scan submission", + StatusCodes.Status400BadRequest, + detail: "Either image.reference or image.digest must be provided."); + } + + if (!string.IsNullOrWhiteSpace(digest) && !digest.Contains(':', StringComparison.Ordinal)) + { + return ProblemResultFactory.Create( + context, + ProblemTypes.Validation, + "Invalid scan submission", + StatusCodes.Status400BadRequest, + detail: "Image digest must include algorithm prefix (e.g. sha256:...)."); + } + + var target = new ScanTarget(reference, digest).Normalize(); + var metadata = NormalizeMetadata(request.Metadata); + var submission = new ScanSubmission( + Target: target, + Force: request.Force, + ClientRequestId: request.ClientRequestId?.Trim(), + Metadata: metadata); + + ScanSubmissionResult result; + try + { + result = await coordinator.SubmitAsync(submission, context.RequestAborted).ConfigureAwait(false); + } + catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested) + { + throw; + } + + var statusText = result.Snapshot.Status.ToString(); + var location = links.GetPathByName( + httpContext: context, + endpointName: "scanner.scans.status", + values: new { scanId = result.Snapshot.ScanId.Value }); + + if (!string.IsNullOrWhiteSpace(location)) + { + context.Response.Headers.Location = location; + } + + var response = new ScanSubmitResponse( + ScanId: result.Snapshot.ScanId.Value, + Status: statusText, + Location: location, + Created: result.Created); + + return Json(response, StatusCodes.Status202Accepted); + } + + private static async Task HandleStatusAsync( + string scanId, + IScanCoordinator coordinator, + HttpContext context, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(coordinator); + + if (!ScanId.TryParse(scanId, out var parsed)) + { + return ProblemResultFactory.Create( + context, + ProblemTypes.Validation, + "Invalid scan identifier", + StatusCodes.Status400BadRequest, + detail: "Scan identifier is required."); + } + + var snapshot = await coordinator.GetAsync(parsed, context.RequestAborted).ConfigureAwait(false); + if (snapshot is null) + { + return ProblemResultFactory.Create( + context, + ProblemTypes.NotFound, + "Scan not found", + StatusCodes.Status404NotFound, + detail: "Requested scan could not be located."); + } + + var response = new ScanStatusResponse( + ScanId: snapshot.ScanId.Value, + Status: snapshot.Status.ToString(), + Image: new ScanStatusTarget(snapshot.Target.Reference, snapshot.Target.Digest), + CreatedAt: snapshot.CreatedAt, + UpdatedAt: snapshot.UpdatedAt, + FailureReason: snapshot.FailureReason); + + return Json(response, StatusCodes.Status200OK); + } + + private static async Task HandleProgressStreamAsync( + string scanId, + string? format, + IScanProgressReader progressReader, + HttpContext context, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(progressReader); + + if (!ScanId.TryParse(scanId, out var parsed)) + { + return ProblemResultFactory.Create( + context, + ProblemTypes.Validation, + "Invalid scan identifier", + StatusCodes.Status400BadRequest, + detail: "Scan identifier is required."); + } + + if (!progressReader.Exists(parsed)) + { + return ProblemResultFactory.Create( + context, + ProblemTypes.NotFound, + "Scan not found", + StatusCodes.Status404NotFound, + detail: "Requested scan could not be located."); + } + + var streamFormat = string.Equals(format, "jsonl", StringComparison.OrdinalIgnoreCase) + ? "jsonl" + : "sse"; + + context.Response.StatusCode = StatusCodes.Status200OK; + context.Response.Headers.CacheControl = "no-store"; + context.Response.Headers["X-Accel-Buffering"] = "no"; + context.Response.Headers["Connection"] = "keep-alive"; + + if (streamFormat == "jsonl") + { + context.Response.ContentType = "application/x-ndjson"; + } + else + { + context.Response.ContentType = "text/event-stream"; + } + + await foreach (var progressEvent in progressReader.SubscribeAsync(parsed, context.RequestAborted).WithCancellation(context.RequestAborted)) + { + var payload = new + { + scanId = progressEvent.ScanId.Value, + sequence = progressEvent.Sequence, + state = progressEvent.State, + message = progressEvent.Message, + timestamp = progressEvent.Timestamp, + correlationId = progressEvent.CorrelationId, + data = progressEvent.Data + }; + + if (streamFormat == "jsonl") + { + await WriteJsonLineAsync(context.Response.BodyWriter, payload, cancellationToken).ConfigureAwait(false); + } + else + { + await WriteSseAsync(context.Response.BodyWriter, payload, progressEvent, cancellationToken).ConfigureAwait(false); + } + + await context.Response.BodyWriter.FlushAsync(cancellationToken).ConfigureAwait(false); + } + + return Results.Empty; + } + + private static IReadOnlyDictionary NormalizeMetadata(IDictionary metadata) + { + if (metadata is null || metadata.Count == 0) + { + return new Dictionary(); + } + + var normalized = new Dictionary(StringComparer.OrdinalIgnoreCase); + foreach (var pair in metadata) + { + if (string.IsNullOrWhiteSpace(pair.Key)) + { + continue; + } + + var key = pair.Key.Trim(); + var value = pair.Value?.Trim() ?? string.Empty; + normalized[key] = value; + } + + return normalized; + } + + private static async Task WriteJsonLineAsync(PipeWriter writer, object payload, CancellationToken cancellationToken) + { + var json = JsonSerializer.Serialize(payload, SerializerOptions); + var jsonBytes = Encoding.UTF8.GetBytes(json); + await writer.WriteAsync(jsonBytes, cancellationToken).ConfigureAwait(false); + await writer.WriteAsync(new[] { (byte)'\n' }, cancellationToken).ConfigureAwait(false); + } + + private static async Task WriteSseAsync(PipeWriter writer, object payload, ScanProgressEvent progressEvent, CancellationToken cancellationToken) + { + var json = JsonSerializer.Serialize(payload, SerializerOptions); + var eventName = progressEvent.State.ToLowerInvariant(); + var builder = new StringBuilder(); + builder.Append("id: ").Append(progressEvent.Sequence).Append('\n'); + builder.Append("event: ").Append(eventName).Append('\n'); + builder.Append("data: ").Append(json).Append('\n'); + builder.Append('\n'); + + var bytes = Encoding.UTF8.GetBytes(builder.ToString()); + await writer.WriteAsync(bytes, cancellationToken).ConfigureAwait(false); + } + + private static IResult Json(T value, int statusCode) + { + var payload = JsonSerializer.Serialize(value, SerializerOptions); + return Results.Content(payload, "application/json", System.Text.Encoding.UTF8, statusCode); + } +} diff --git a/src/StellaOps.Scanner.WebService/Extensions/ConfigurationExtensions.cs b/src/StellaOps.Scanner.WebService/Extensions/ConfigurationExtensions.cs new file mode 100644 index 00000000..dcccfb5a --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Extensions/ConfigurationExtensions.cs @@ -0,0 +1,38 @@ +using System.Text; +using System.Text.Json; +using Microsoft.Extensions.Configuration; +using YamlDotNet.Serialization; +using YamlDotNet.Serialization.NamingConventions; + +namespace StellaOps.Scanner.WebService.Extensions; + +/// +/// Scanner-specific configuration helpers. +/// +public static class ConfigurationExtensions +{ + public static IConfigurationBuilder AddScannerYaml(this IConfigurationBuilder builder, string path) + { + ArgumentNullException.ThrowIfNull(builder); + + if (string.IsNullOrWhiteSpace(path) || !File.Exists(path)) + { + return builder; + } + + var deserializer = new DeserializerBuilder() + .WithNamingConvention(CamelCaseNamingConvention.Instance) + .Build(); + + using var reader = File.OpenText(path); + var yamlObject = deserializer.Deserialize(reader); + if (yamlObject is null) + { + return builder; + } + + var payload = JsonSerializer.Serialize(yamlObject); + var stream = new MemoryStream(Encoding.UTF8.GetBytes(payload)); + return builder.AddJsonStream(stream); + } +} diff --git a/src/StellaOps.Scanner.WebService/Hosting/ScannerPluginHostFactory.cs b/src/StellaOps.Scanner.WebService/Hosting/ScannerPluginHostFactory.cs new file mode 100644 index 00000000..0e048f7a --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Hosting/ScannerPluginHostFactory.cs @@ -0,0 +1,55 @@ +using System; +using System.IO; +using StellaOps.Plugin.Hosting; +using StellaOps.Scanner.WebService.Options; + +namespace StellaOps.Scanner.WebService.Hosting; + +internal static class ScannerPluginHostFactory +{ + public static PluginHostOptions Build(ScannerWebServiceOptions options, string contentRootPath) + { + ArgumentNullException.ThrowIfNull(options); + ArgumentNullException.ThrowIfNull(contentRootPath); + + var baseDirectory = options.Plugins.BaseDirectory; + if (string.IsNullOrWhiteSpace(baseDirectory)) + { + baseDirectory = Path.Combine(contentRootPath, ".."); + } + else if (!Path.IsPathRooted(baseDirectory)) + { + baseDirectory = Path.GetFullPath(Path.Combine(contentRootPath, baseDirectory)); + } + + var pluginsDirectory = options.Plugins.Directory; + if (string.IsNullOrWhiteSpace(pluginsDirectory)) + { + pluginsDirectory = Path.Combine("plugins", "scanner"); + } + + if (!Path.IsPathRooted(pluginsDirectory)) + { + pluginsDirectory = Path.Combine(baseDirectory, pluginsDirectory); + } + + var hostOptions = new PluginHostOptions + { + BaseDirectory = baseDirectory, + PluginsDirectory = pluginsDirectory, + PrimaryPrefix = "StellaOps.Scanner" + }; + + foreach (var additionalPrefix in options.Plugins.OrderedPlugins) + { + hostOptions.PluginOrder.Add(additionalPrefix); + } + + foreach (var pattern in options.Plugins.SearchPatterns) + { + hostOptions.SearchPatterns.Add(pattern); + } + + return hostOptions; + } +} diff --git a/src/StellaOps.Scanner.WebService/Infrastructure/ProblemResultFactory.cs b/src/StellaOps.Scanner.WebService/Infrastructure/ProblemResultFactory.cs new file mode 100644 index 00000000..28280c55 --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Infrastructure/ProblemResultFactory.cs @@ -0,0 +1,53 @@ +using System.Collections.Generic; +using System.Diagnostics; +using System.Text; +using System.Text.Json; +using System.Text.Json.Serialization; +using Microsoft.AspNetCore.Http; +using Microsoft.AspNetCore.Mvc; + +namespace StellaOps.Scanner.WebService.Infrastructure; + +internal static class ProblemResultFactory +{ + private static readonly JsonSerializerOptions JsonOptions = new(JsonSerializerDefaults.Web) + { + DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull + }; + + public static IResult Create( + HttpContext context, + string type, + string title, + int statusCode, + string? detail = null, + IDictionary? extensions = null) + { + ArgumentNullException.ThrowIfNull(context); + ArgumentException.ThrowIfNullOrWhiteSpace(type); + ArgumentException.ThrowIfNullOrWhiteSpace(title); + + var traceId = Activity.Current?.TraceId.ToString() ?? context.TraceIdentifier; + + var problem = new ProblemDetails + { + Type = type, + Title = title, + Detail = detail, + Status = statusCode, + Instance = context.Request.Path + }; + + problem.Extensions["traceId"] = traceId; + if (extensions is not null) + { + foreach (var entry in extensions) + { + problem.Extensions[entry.Key] = entry.Value; + } + } + + var payload = JsonSerializer.Serialize(problem, JsonOptions); + return Results.Content(payload, "application/problem+json", Encoding.UTF8, statusCode); + } +} diff --git a/src/StellaOps.Scanner.WebService/Options/ScannerWebServiceOptions.cs b/src/StellaOps.Scanner.WebService/Options/ScannerWebServiceOptions.cs new file mode 100644 index 00000000..e0e80bff --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Options/ScannerWebServiceOptions.cs @@ -0,0 +1,240 @@ +using System; +using System.Collections.Generic; + +namespace StellaOps.Scanner.WebService.Options; + +/// +/// Strongly typed configuration for the Scanner WebService host. +/// +public sealed class ScannerWebServiceOptions +{ + public const string SectionName = "scanner"; + + /// + /// Schema version for configuration consumers to coordinate breaking changes. + /// + public int SchemaVersion { get; set; } = 1; + + /// + /// Mongo storage configuration used for catalog and job state. + /// + public StorageOptions Storage { get; set; } = new(); + + /// + /// Queue configuration used to enqueue scan jobs. + /// + public QueueOptions Queue { get; set; } = new(); + + /// + /// Object store configuration for SBOM artefacts. + /// + public ArtifactStoreOptions ArtifactStore { get; set; } = new(); + + /// + /// Feature flags toggling optional behaviours. + /// + public FeatureFlagOptions Features { get; set; } = new(); + + /// + /// Plug-in loader configuration. + /// + public PluginOptions Plugins { get; set; } = new(); + + /// + /// Telemetry configuration for logs, metrics, traces. + /// + public TelemetryOptions Telemetry { get; set; } = new(); + + /// + /// Authority / authentication configuration. + /// + public AuthorityOptions Authority { get; set; } = new(); + + /// + /// Signing configuration for report envelopes and attestations. + /// + public SigningOptions Signing { get; set; } = new(); + + /// + /// API-specific settings such as base path. + /// + public ApiOptions Api { get; set; } = new(); + + public sealed class StorageOptions + { + public string Driver { get; set; } = "mongo"; + + public string Dsn { get; set; } = string.Empty; + + public string? Database { get; set; } + + public int CommandTimeoutSeconds { get; set; } = 30; + + public int HealthCheckTimeoutSeconds { get; set; } = 5; + + public IList Migrations { get; set; } = new List(); + } + + public sealed class QueueOptions + { + public string Driver { get; set; } = "redis"; + + public string Dsn { get; set; } = string.Empty; + + public string Namespace { get; set; } = "scanner"; + + public int VisibilityTimeoutSeconds { get; set; } = 300; + + public int LeaseHeartbeatSeconds { get; set; } = 30; + + public int MaxDeliveryAttempts { get; set; } = 5; + + public IDictionary DriverSettings { get; set; } = new Dictionary(StringComparer.OrdinalIgnoreCase); + } + + public sealed class ArtifactStoreOptions + { + public string Driver { get; set; } = "minio"; + + public string Endpoint { get; set; } = string.Empty; + + public bool UseTls { get; set; } = true; + + public string AccessKey { get; set; } = string.Empty; + + public string SecretKey { get; set; } = string.Empty; + + public string? SecretKeyFile { get; set; } + + public string Bucket { get; set; } = "scanner-artifacts"; + + public string? Region { get; set; } + + public bool EnableObjectLock { get; set; } = true; + + public int ObjectLockRetentionDays { get; set; } = 30; + + public IDictionary Headers { get; set; } = new Dictionary(StringComparer.OrdinalIgnoreCase); + } + + public sealed class FeatureFlagOptions + { + public bool AllowAnonymousScanSubmission { get; set; } + + public bool EnableSignedReports { get; set; } = true; + + public bool EnablePolicyPreview { get; set; } = true; + + public IDictionary Experimental { get; set; } = new Dictionary(StringComparer.OrdinalIgnoreCase); + } + + public sealed class PluginOptions + { + public string? BaseDirectory { get; set; } + + public string? Directory { get; set; } + + public IList SearchPatterns { get; set; } = new List(); + + public IList OrderedPlugins { get; set; } = new List(); + } + + public sealed class TelemetryOptions + { + public bool Enabled { get; set; } = true; + + public bool EnableTracing { get; set; } = true; + + public bool EnableMetrics { get; set; } = true; + + public bool EnableLogging { get; set; } = true; + + public bool EnableRequestLogging { get; set; } = true; + + public string MinimumLogLevel { get; set; } = "Information"; + + public string? ServiceName { get; set; } + + public string? OtlpEndpoint { get; set; } + + public IDictionary OtlpHeaders { get; set; } = new Dictionary(StringComparer.OrdinalIgnoreCase); + + public IDictionary ResourceAttributes { get; set; } = new Dictionary(StringComparer.OrdinalIgnoreCase); + } + + public sealed class AuthorityOptions + { + public bool Enabled { get; set; } + + public bool AllowAnonymousFallback { get; set; } = true; + + public string Issuer { get; set; } = string.Empty; + + public string? MetadataAddress { get; set; } + + public bool RequireHttpsMetadata { get; set; } = true; + + public int BackchannelTimeoutSeconds { get; set; } = 30; + + public int TokenClockSkewSeconds { get; set; } = 60; + + public IList Audiences { get; set; } = new List(); + + public IList RequiredScopes { get; set; } = new List(); + + public IList BypassNetworks { get; set; } = new List(); + + public string? ClientId { get; set; } + + public string? ClientSecret { get; set; } + + public string? ClientSecretFile { get; set; } + + public IList ClientScopes { get; set; } = new List(); + + public ResilienceOptions Resilience { get; set; } = new(); + + public sealed class ResilienceOptions + { + public bool? EnableRetries { get; set; } + + public IList RetryDelays { get; set; } = new List(); + + public bool? AllowOfflineCacheFallback { get; set; } + + public TimeSpan? OfflineCacheTolerance { get; set; } + } + } + + public sealed class SigningOptions + { + public bool Enabled { get; set; } = false; + + public string KeyId { get; set; } = string.Empty; + + public string Algorithm { get; set; } = "ed25519"; + + public string? KeyPem { get; set; } + + public string? KeyPemFile { get; set; } + + public string? CertificatePem { get; set; } + + public string? CertificatePemFile { get; set; } + + public string? CertificateChainPem { get; set; } + + public string? CertificateChainPemFile { get; set; } + + public int EnvelopeTtlSeconds { get; set; } = 600; + } + + public sealed class ApiOptions + { + public string BasePath { get; set; } = "/api/v1"; + + public string ScansSegment { get; set; } = "scans"; + + public string ReportsSegment { get; set; } = "reports"; + } +} diff --git a/src/StellaOps.Scanner.WebService/Options/ScannerWebServiceOptionsPostConfigure.cs b/src/StellaOps.Scanner.WebService/Options/ScannerWebServiceOptionsPostConfigure.cs new file mode 100644 index 00000000..476da69a --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Options/ScannerWebServiceOptionsPostConfigure.cs @@ -0,0 +1,91 @@ +using System; +using System.IO; + +namespace StellaOps.Scanner.WebService.Options; + +/// +/// Post-configuration helpers for . +/// +public static class ScannerWebServiceOptionsPostConfigure +{ + public static void Apply(ScannerWebServiceOptions options, string contentRootPath) + { + ArgumentNullException.ThrowIfNull(options); + ArgumentNullException.ThrowIfNull(contentRootPath); + + options.Plugins ??= new ScannerWebServiceOptions.PluginOptions(); + if (string.IsNullOrWhiteSpace(options.Plugins.Directory)) + { + options.Plugins.Directory = Path.Combine("plugins", "scanner"); + } + + options.Authority ??= new ScannerWebServiceOptions.AuthorityOptions(); + var authority = options.Authority; + if (string.IsNullOrWhiteSpace(authority.ClientSecret) + && !string.IsNullOrWhiteSpace(authority.ClientSecretFile)) + { + authority.ClientSecret = ReadSecretFile(authority.ClientSecretFile!, contentRootPath); + } + + options.ArtifactStore ??= new ScannerWebServiceOptions.ArtifactStoreOptions(); + var artifactStore = options.ArtifactStore; + if (string.IsNullOrWhiteSpace(artifactStore.SecretKey) + && !string.IsNullOrWhiteSpace(artifactStore.SecretKeyFile)) + { + artifactStore.SecretKey = ReadSecretFile(artifactStore.SecretKeyFile!, contentRootPath); + } + + options.Signing ??= new ScannerWebServiceOptions.SigningOptions(); + var signing = options.Signing; + if (string.IsNullOrWhiteSpace(signing.KeyPem) + && !string.IsNullOrWhiteSpace(signing.KeyPemFile)) + { + signing.KeyPem = ReadAllText(signing.KeyPemFile!, contentRootPath); + } + + if (string.IsNullOrWhiteSpace(signing.CertificatePem) + && !string.IsNullOrWhiteSpace(signing.CertificatePemFile)) + { + signing.CertificatePem = ReadAllText(signing.CertificatePemFile!, contentRootPath); + } + + if (string.IsNullOrWhiteSpace(signing.CertificateChainPem) + && !string.IsNullOrWhiteSpace(signing.CertificateChainPemFile)) + { + signing.CertificateChainPem = ReadAllText(signing.CertificateChainPemFile!, contentRootPath); + } + } + + private static string ReadSecretFile(string path, string contentRootPath) + { + var resolvedPath = ResolvePath(path, contentRootPath); + if (!File.Exists(resolvedPath)) + { + throw new InvalidOperationException($"Secret file '{resolvedPath}' was not found."); + } + + var secret = File.ReadAllText(resolvedPath).Trim(); + if (string.IsNullOrEmpty(secret)) + { + throw new InvalidOperationException($"Secret file '{resolvedPath}' is empty."); + } + + return secret; + } + + private static string ReadAllText(string path, string contentRootPath) + { + var resolvedPath = ResolvePath(path, contentRootPath); + if (!File.Exists(resolvedPath)) + { + throw new InvalidOperationException($"File '{resolvedPath}' was not found."); + } + + return File.ReadAllText(resolvedPath); + } + + private static string ResolvePath(string path, string contentRootPath) + => Path.IsPathRooted(path) + ? path + : Path.GetFullPath(Path.Combine(contentRootPath, path)); +} diff --git a/src/StellaOps.Scanner.WebService/Options/ScannerWebServiceOptionsValidator.cs b/src/StellaOps.Scanner.WebService/Options/ScannerWebServiceOptionsValidator.cs new file mode 100644 index 00000000..7025c91e --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Options/ScannerWebServiceOptionsValidator.cs @@ -0,0 +1,332 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using Microsoft.Extensions.Logging; +using StellaOps.Scanner.WebService.Security; + +namespace StellaOps.Scanner.WebService.Options; + +/// +/// Validation helpers for . +/// +public static class ScannerWebServiceOptionsValidator +{ + private static readonly HashSet SupportedStorageDrivers = new(StringComparer.OrdinalIgnoreCase) + { + "mongo" + }; + + private static readonly HashSet SupportedQueueDrivers = new(StringComparer.OrdinalIgnoreCase) + { + "redis", + "nats", + "rabbitmq" + }; + + private static readonly HashSet SupportedArtifactDrivers = new(StringComparer.OrdinalIgnoreCase) + { + "minio" + }; + + public static void Validate(ScannerWebServiceOptions options) + { + ArgumentNullException.ThrowIfNull(options); + + if (options.SchemaVersion <= 0) + { + throw new InvalidOperationException("Scanner configuration requires a positive schemaVersion."); + } + + options.Storage ??= new ScannerWebServiceOptions.StorageOptions(); + ValidateStorage(options.Storage); + + options.Queue ??= new ScannerWebServiceOptions.QueueOptions(); + ValidateQueue(options.Queue); + + options.ArtifactStore ??= new ScannerWebServiceOptions.ArtifactStoreOptions(); + ValidateArtifactStore(options.ArtifactStore); + + options.Features ??= new ScannerWebServiceOptions.FeatureFlagOptions(); + options.Plugins ??= new ScannerWebServiceOptions.PluginOptions(); + options.Telemetry ??= new ScannerWebServiceOptions.TelemetryOptions(); + ValidateTelemetry(options.Telemetry); + + options.Authority ??= new ScannerWebServiceOptions.AuthorityOptions(); + ValidateAuthority(options.Authority); + + options.Signing ??= new ScannerWebServiceOptions.SigningOptions(); + ValidateSigning(options.Signing); + + options.Api ??= new ScannerWebServiceOptions.ApiOptions(); + if (string.IsNullOrWhiteSpace(options.Api.BasePath)) + { + throw new InvalidOperationException("API basePath must be configured."); + } + } + + private static void ValidateStorage(ScannerWebServiceOptions.StorageOptions storage) + { + if (!SupportedStorageDrivers.Contains(storage.Driver)) + { + throw new InvalidOperationException($"Unsupported storage driver '{storage.Driver}'. Supported drivers: mongo."); + } + + if (string.IsNullOrWhiteSpace(storage.Dsn)) + { + throw new InvalidOperationException("Storage DSN must be configured."); + } + + if (storage.CommandTimeoutSeconds <= 0) + { + throw new InvalidOperationException("Storage commandTimeoutSeconds must be greater than zero."); + } + + if (storage.HealthCheckTimeoutSeconds <= 0) + { + throw new InvalidOperationException("Storage healthCheckTimeoutSeconds must be greater than zero."); + } + } + + private static void ValidateQueue(ScannerWebServiceOptions.QueueOptions queue) + { + if (!SupportedQueueDrivers.Contains(queue.Driver)) + { + throw new InvalidOperationException($"Unsupported queue driver '{queue.Driver}'. Supported drivers: redis, nats, rabbitmq."); + } + + if (string.IsNullOrWhiteSpace(queue.Dsn)) + { + throw new InvalidOperationException("Queue DSN must be configured."); + } + + if (string.IsNullOrWhiteSpace(queue.Namespace)) + { + throw new InvalidOperationException("Queue namespace must be configured."); + } + + if (queue.VisibilityTimeoutSeconds <= 0) + { + throw new InvalidOperationException("Queue visibilityTimeoutSeconds must be greater than zero."); + } + + if (queue.LeaseHeartbeatSeconds <= 0) + { + throw new InvalidOperationException("Queue leaseHeartbeatSeconds must be greater than zero."); + } + + if (queue.MaxDeliveryAttempts <= 0) + { + throw new InvalidOperationException("Queue maxDeliveryAttempts must be greater than zero."); + } + } + + private static void ValidateArtifactStore(ScannerWebServiceOptions.ArtifactStoreOptions artifactStore) + { + if (!SupportedArtifactDrivers.Contains(artifactStore.Driver)) + { + throw new InvalidOperationException($"Unsupported artifact store driver '{artifactStore.Driver}'. Supported drivers: minio."); + } + + if (string.IsNullOrWhiteSpace(artifactStore.Endpoint)) + { + throw new InvalidOperationException("Artifact store endpoint must be configured."); + } + + if (string.IsNullOrWhiteSpace(artifactStore.Bucket)) + { + throw new InvalidOperationException("Artifact store bucket must be configured."); + } + + if (artifactStore.EnableObjectLock && artifactStore.ObjectLockRetentionDays <= 0) + { + throw new InvalidOperationException("Artifact store objectLockRetentionDays must be greater than zero when object lock is enabled."); + } + } + + private static void ValidateTelemetry(ScannerWebServiceOptions.TelemetryOptions telemetry) + { + if (string.IsNullOrWhiteSpace(telemetry.MinimumLogLevel)) + { + throw new InvalidOperationException("Telemetry minimumLogLevel must be configured."); + } + + if (!Enum.TryParse(telemetry.MinimumLogLevel, ignoreCase: true, out LogLevel _)) + { + throw new InvalidOperationException($"Telemetry minimumLogLevel '{telemetry.MinimumLogLevel}' is invalid."); + } + + if (!string.IsNullOrWhiteSpace(telemetry.OtlpEndpoint) && !Uri.TryCreate(telemetry.OtlpEndpoint, UriKind.Absolute, out _)) + { + throw new InvalidOperationException("Telemetry OTLP endpoint must be an absolute URI when specified."); + } + + foreach (var attribute in telemetry.ResourceAttributes) + { + if (string.IsNullOrWhiteSpace(attribute.Key)) + { + throw new InvalidOperationException("Telemetry resource attribute keys must be non-empty."); + } + } + + foreach (var header in telemetry.OtlpHeaders) + { + if (string.IsNullOrWhiteSpace(header.Key)) + { + throw new InvalidOperationException("Telemetry OTLP header keys must be non-empty."); + } + } + } + + private static void ValidateAuthority(ScannerWebServiceOptions.AuthorityOptions authority) + { + authority.Resilience ??= new ScannerWebServiceOptions.AuthorityOptions.ResilienceOptions(); + NormalizeList(authority.Audiences, toLower: false); + NormalizeList(authority.RequiredScopes, toLower: true); + NormalizeList(authority.BypassNetworks, toLower: false); + NormalizeList(authority.ClientScopes, toLower: true); + NormalizeResilience(authority.Resilience); + + if (authority.RequiredScopes.Count == 0) + { + authority.RequiredScopes.Add(ScannerAuthorityScopes.ScansEnqueue); + } + + if (authority.ClientScopes.Count == 0) + { + foreach (var scope in authority.RequiredScopes) + { + authority.ClientScopes.Add(scope); + } + } + + if (authority.BackchannelTimeoutSeconds <= 0) + { + throw new InvalidOperationException("Authority backchannelTimeoutSeconds must be greater than zero."); + } + + if (authority.TokenClockSkewSeconds < 0 || authority.TokenClockSkewSeconds > 300) + { + throw new InvalidOperationException("Authority tokenClockSkewSeconds must be between 0 and 300 seconds."); + } + + if (!authority.Enabled) + { + return; + } + + if (string.IsNullOrWhiteSpace(authority.Issuer)) + { + throw new InvalidOperationException("Authority issuer must be configured when authority is enabled."); + } + + if (!Uri.TryCreate(authority.Issuer, UriKind.Absolute, out var issuerUri)) + { + throw new InvalidOperationException("Authority issuer must be an absolute URI."); + } + + if (authority.RequireHttpsMetadata && !issuerUri.IsLoopback && !string.Equals(issuerUri.Scheme, Uri.UriSchemeHttps, StringComparison.OrdinalIgnoreCase)) + { + throw new InvalidOperationException("Authority issuer must use HTTPS when requireHttpsMetadata is enabled."); + } + + if (!string.IsNullOrWhiteSpace(authority.MetadataAddress) && !Uri.TryCreate(authority.MetadataAddress, UriKind.Absolute, out _)) + { + throw new InvalidOperationException("Authority metadataAddress must be an absolute URI when specified."); + } + + if (authority.Audiences.Count == 0) + { + throw new InvalidOperationException("Authority audiences must include at least one entry when authority is enabled."); + } + + if (!authority.AllowAnonymousFallback) + { + if (string.IsNullOrWhiteSpace(authority.ClientId)) + { + throw new InvalidOperationException("Authority clientId must be configured when anonymous fallback is disabled."); + } + + if (string.IsNullOrWhiteSpace(authority.ClientSecret)) + { + throw new InvalidOperationException("Authority clientSecret must be configured when anonymous fallback is disabled."); + } + } + } + + private static void ValidateSigning(ScannerWebServiceOptions.SigningOptions signing) + { + if (signing.EnvelopeTtlSeconds <= 0) + { + throw new InvalidOperationException("Signing envelopeTtlSeconds must be greater than zero."); + } + + if (!signing.Enabled) + { + return; + } + + if (string.IsNullOrWhiteSpace(signing.KeyId)) + { + throw new InvalidOperationException("Signing keyId must be configured when signing is enabled."); + } + + if (string.IsNullOrWhiteSpace(signing.Algorithm)) + { + throw new InvalidOperationException("Signing algorithm must be configured when signing is enabled."); + } + + if (string.IsNullOrWhiteSpace(signing.KeyPem) && string.IsNullOrWhiteSpace(signing.KeyPemFile)) + { + throw new InvalidOperationException("Signing requires keyPem or keyPemFile when enabled."); + } + } + + private static void NormalizeList(IList values, bool toLower) + { + if (values is null || values.Count == 0) + { + return; + } + + var seen = new HashSet(StringComparer.OrdinalIgnoreCase); + for (var i = values.Count - 1; i >= 0; i--) + { + var entry = values[i]; + if (string.IsNullOrWhiteSpace(entry)) + { + values.RemoveAt(i); + continue; + } + + var normalized = toLower ? entry.Trim().ToLowerInvariant() : entry.Trim(); + if (!seen.Add(normalized)) + { + values.RemoveAt(i); + continue; + } + + values[i] = normalized; + } + } + + private static void NormalizeResilience(ScannerWebServiceOptions.AuthorityOptions.ResilienceOptions resilience) + { + if (resilience.RetryDelays is null) + { + return; + } + + foreach (var delay in resilience.RetryDelays.ToArray()) + { + if (delay <= TimeSpan.Zero) + { + throw new InvalidOperationException("Authority resilience retryDelays must be greater than zero."); + } + } + + if (resilience.OfflineCacheTolerance.HasValue && resilience.OfflineCacheTolerance.Value < TimeSpan.Zero) + { + throw new InvalidOperationException("Authority resilience offlineCacheTolerance must be greater than or equal to zero."); + } + } +} diff --git a/src/StellaOps.Scanner.WebService/Program.cs b/src/StellaOps.Scanner.WebService/Program.cs new file mode 100644 index 00000000..6dd41123 --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Program.cs @@ -0,0 +1,245 @@ +using System.Collections.Generic; +using System.Diagnostics; +using System.Text; +using System.Text.Json; +using System.Text.Json.Serialization; +using Microsoft.AspNetCore.Authorization; +using Microsoft.AspNetCore.Diagnostics; +using Microsoft.AspNetCore.Http; +using Microsoft.AspNetCore.Mvc; +using Microsoft.AspNetCore.Authentication; +using Microsoft.Extensions.Options; +using Serilog; +using Serilog.Events; +using StellaOps.Auth.Client; +using StellaOps.Auth.ServerIntegration; +using StellaOps.Configuration; +using StellaOps.Plugin.DependencyInjection; +using StellaOps.Scanner.WebService.Diagnostics; +using StellaOps.Scanner.WebService.Endpoints; +using StellaOps.Scanner.WebService.Extensions; +using StellaOps.Scanner.WebService.Hosting; +using StellaOps.Scanner.WebService.Options; +using StellaOps.Scanner.WebService.Services; +using StellaOps.Scanner.WebService.Security; + +var builder = WebApplication.CreateBuilder(args); + +builder.Configuration.AddStellaOpsDefaults(options => +{ + options.BasePath = builder.Environment.ContentRootPath; + options.EnvironmentPrefix = "SCANNER_"; + options.ConfigureBuilder = configurationBuilder => + { + configurationBuilder.AddScannerYaml(Path.Combine(builder.Environment.ContentRootPath, "../etc/scanner.yaml")); + }; +}); + +var contentRoot = builder.Environment.ContentRootPath; + +var bootstrapOptions = builder.Configuration.BindOptions( + ScannerWebServiceOptions.SectionName, + (opts, _) => + { + ScannerWebServiceOptionsPostConfigure.Apply(opts, contentRoot); + ScannerWebServiceOptionsValidator.Validate(opts); + }); + +builder.Services.AddOptions() + .Bind(builder.Configuration.GetSection(ScannerWebServiceOptions.SectionName)) + .PostConfigure(options => + { + ScannerWebServiceOptionsPostConfigure.Apply(options, contentRoot); + ScannerWebServiceOptionsValidator.Validate(options); + }) + .ValidateOnStart(); + +builder.Host.UseSerilog((context, services, loggerConfiguration) => +{ + loggerConfiguration + .MinimumLevel.Information() + .MinimumLevel.Override("Microsoft.AspNetCore", LogEventLevel.Warning) + .Enrich.FromLogContext() + .WriteTo.Console(); +}); + +builder.Services.AddSingleton(TimeProvider.System); +builder.Services.AddSingleton(); +builder.Services.AddHttpContextAccessor(); +builder.Services.AddSingleton(); +builder.Services.AddSingleton(sp => sp.GetRequiredService()); +builder.Services.AddSingleton(sp => sp.GetRequiredService()); +builder.Services.AddSingleton(); + +var pluginHostOptions = ScannerPluginHostFactory.Build(bootstrapOptions, contentRoot); +builder.Services.RegisterPluginRoutines(builder.Configuration, pluginHostOptions); + +builder.Services.AddEndpointsApiExplorer(); + +if (bootstrapOptions.Authority.Enabled) +{ + builder.Services.AddStellaOpsAuthClient(clientOptions => + { + clientOptions.Authority = bootstrapOptions.Authority.Issuer; + clientOptions.ClientId = bootstrapOptions.Authority.ClientId ?? string.Empty; + clientOptions.ClientSecret = bootstrapOptions.Authority.ClientSecret; + clientOptions.HttpTimeout = TimeSpan.FromSeconds(bootstrapOptions.Authority.BackchannelTimeoutSeconds); + + clientOptions.DefaultScopes.Clear(); + foreach (var scope in bootstrapOptions.Authority.ClientScopes) + { + clientOptions.DefaultScopes.Add(scope); + } + + var resilience = bootstrapOptions.Authority.Resilience ?? new ScannerWebServiceOptions.AuthorityOptions.ResilienceOptions(); + if (resilience.EnableRetries.HasValue) + { + clientOptions.EnableRetries = resilience.EnableRetries.Value; + } + + if (resilience.RetryDelays is { Count: > 0 }) + { + clientOptions.RetryDelays.Clear(); + foreach (var delay in resilience.RetryDelays) + { + clientOptions.RetryDelays.Add(delay); + } + } + + if (resilience.AllowOfflineCacheFallback.HasValue) + { + clientOptions.AllowOfflineCacheFallback = resilience.AllowOfflineCacheFallback.Value; + } + + if (resilience.OfflineCacheTolerance.HasValue) + { + clientOptions.OfflineCacheTolerance = resilience.OfflineCacheTolerance.Value; + } + }); + + builder.Services.AddStellaOpsResourceServerAuthentication( + builder.Configuration, + configurationSection: null, + configure: resourceOptions => + { + resourceOptions.Authority = bootstrapOptions.Authority.Issuer; + resourceOptions.RequireHttpsMetadata = bootstrapOptions.Authority.RequireHttpsMetadata; + resourceOptions.MetadataAddress = bootstrapOptions.Authority.MetadataAddress; + resourceOptions.BackchannelTimeout = TimeSpan.FromSeconds(bootstrapOptions.Authority.BackchannelTimeoutSeconds); + resourceOptions.TokenClockSkew = TimeSpan.FromSeconds(bootstrapOptions.Authority.TokenClockSkewSeconds); + + resourceOptions.Audiences.Clear(); + foreach (var audience in bootstrapOptions.Authority.Audiences) + { + resourceOptions.Audiences.Add(audience); + } + + resourceOptions.RequiredScopes.Clear(); + foreach (var scope in bootstrapOptions.Authority.RequiredScopes) + { + resourceOptions.RequiredScopes.Add(scope); + } + + resourceOptions.BypassNetworks.Clear(); + foreach (var network in bootstrapOptions.Authority.BypassNetworks) + { + resourceOptions.BypassNetworks.Add(network); + } + }); + + builder.Services.AddAuthorization(options => + { + options.AddStellaOpsScopePolicy(ScannerPolicies.ScansEnqueue, bootstrapOptions.Authority.RequiredScopes.ToArray()); + options.AddStellaOpsScopePolicy(ScannerPolicies.ScansRead, ScannerAuthorityScopes.ScansRead); + options.AddStellaOpsScopePolicy(ScannerPolicies.Reports, ScannerAuthorityScopes.ReportsRead); + }); +} +else +{ + builder.Services.AddAuthentication(options => + { + options.DefaultAuthenticateScheme = "Anonymous"; + options.DefaultChallengeScheme = "Anonymous"; + }) + .AddScheme("Anonymous", _ => { }); + + builder.Services.AddAuthorization(options => + { + options.AddPolicy(ScannerPolicies.ScansEnqueue, policy => policy.RequireAssertion(_ => true)); + options.AddPolicy(ScannerPolicies.ScansRead, policy => policy.RequireAssertion(_ => true)); + options.AddPolicy(ScannerPolicies.Reports, policy => policy.RequireAssertion(_ => true)); + }); +} + +var app = builder.Build(); + +var resolvedOptions = app.Services.GetRequiredService>().Value; +var authorityConfigured = resolvedOptions.Authority.Enabled; +if (authorityConfigured && resolvedOptions.Authority.AllowAnonymousFallback) +{ + app.Logger.LogWarning( + "Scanner authority authentication is enabled but anonymous fallback remains allowed. Disable fallback before production rollout."); +} + +if (resolvedOptions.Telemetry.EnableLogging && resolvedOptions.Telemetry.EnableRequestLogging) +{ + app.UseSerilogRequestLogging(options => + { + options.GetLevel = (httpContext, elapsed, exception) => + exception is null ? LogEventLevel.Information : LogEventLevel.Error; + options.EnrichDiagnosticContext = (diagnosticContext, httpContext) => + { + diagnosticContext.Set("RequestId", httpContext.TraceIdentifier); + diagnosticContext.Set("UserAgent", httpContext.Request.Headers.UserAgent.ToString()); + if (Activity.Current is { TraceId: var traceId } && traceId != default) + { + diagnosticContext.Set("TraceId", traceId.ToString()); + } + }; + }); +} + +app.UseExceptionHandler(errorApp => +{ + errorApp.Run(async context => + { + context.Response.ContentType = "application/problem+json"; + var feature = context.Features.Get(); + var error = feature?.Error; + + var extensions = new Dictionary(StringComparer.Ordinal) + { + ["traceId"] = Activity.Current?.TraceId.ToString() ?? context.TraceIdentifier, + }; + + var problem = Results.Problem( + detail: error?.Message, + instance: context.Request.Path, + statusCode: StatusCodes.Status500InternalServerError, + title: "Unexpected server error", + type: "https://stellaops.org/problems/internal-error", + extensions: extensions); + + await problem.ExecuteAsync(context).ConfigureAwait(false); + }); +}); + +if (authorityConfigured) +{ + app.UseAuthentication(); + app.UseAuthorization(); +} + +app.MapHealthEndpoints(); + +var apiGroup = app.MapGroup(resolvedOptions.Api.BasePath); + +if (app.Environment.IsEnvironment("Testing")) +{ + apiGroup.MapGet("/__auth-probe", () => Results.Ok("ok")) + .RequireAuthorization(ScannerPolicies.ScansEnqueue) + .WithName("scanner.auth-probe"); +} + +apiGroup.MapScanEndpoints(); +await app.RunAsync().ConfigureAwait(false); diff --git a/src/StellaOps.Scanner.WebService/Security/AnonymousAuthenticationHandler.cs b/src/StellaOps.Scanner.WebService/Security/AnonymousAuthenticationHandler.cs new file mode 100644 index 00000000..25e71caf --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Security/AnonymousAuthenticationHandler.cs @@ -0,0 +1,26 @@ +using System.Security.Claims; +using System.Text.Encodings.Web; +using Microsoft.AspNetCore.Authentication; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; + +namespace StellaOps.Scanner.WebService.Security; + +internal sealed class AnonymousAuthenticationHandler : AuthenticationHandler +{ + public AnonymousAuthenticationHandler( + IOptionsMonitor options, + ILoggerFactory logger, + UrlEncoder encoder) + : base(options, logger, encoder) + { + } + + protected override Task HandleAuthenticateAsync() + { + var identity = new ClaimsIdentity(authenticationType: Scheme.Name); + var principal = new ClaimsPrincipal(identity); + var ticket = new AuthenticationTicket(principal, Scheme.Name); + return Task.FromResult(AuthenticateResult.Success(ticket)); + } +} diff --git a/src/StellaOps.Scanner.WebService/Security/ScannerAuthorityScopes.cs b/src/StellaOps.Scanner.WebService/Security/ScannerAuthorityScopes.cs new file mode 100644 index 00000000..9b3bdaee --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Security/ScannerAuthorityScopes.cs @@ -0,0 +1,11 @@ +namespace StellaOps.Scanner.WebService.Security; + +/// +/// Canonical scope names consumed by the Scanner WebService. +/// +internal static class ScannerAuthorityScopes +{ + public const string ScansEnqueue = "scanner.scans.enqueue"; + public const string ScansRead = "scanner.scans.read"; + public const string ReportsRead = "scanner.reports.read"; +} diff --git a/src/StellaOps.Scanner.WebService/Security/ScannerPolicies.cs b/src/StellaOps.Scanner.WebService/Security/ScannerPolicies.cs new file mode 100644 index 00000000..f9d312d3 --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Security/ScannerPolicies.cs @@ -0,0 +1,8 @@ +namespace StellaOps.Scanner.WebService.Security; + +internal static class ScannerPolicies +{ + public const string ScansEnqueue = "scanner.api"; + public const string ScansRead = "scanner.scans.read"; + public const string Reports = "scanner.reports"; +} diff --git a/src/StellaOps.Scanner.WebService/Services/IScanCoordinator.cs b/src/StellaOps.Scanner.WebService/Services/IScanCoordinator.cs new file mode 100644 index 00000000..e6217f96 --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Services/IScanCoordinator.cs @@ -0,0 +1,10 @@ +using StellaOps.Scanner.WebService.Domain; + +namespace StellaOps.Scanner.WebService.Services; + +public interface IScanCoordinator +{ + ValueTask SubmitAsync(ScanSubmission submission, CancellationToken cancellationToken); + + ValueTask GetAsync(ScanId scanId, CancellationToken cancellationToken); +} diff --git a/src/StellaOps.Scanner.WebService/Services/InMemoryScanCoordinator.cs b/src/StellaOps.Scanner.WebService/Services/InMemoryScanCoordinator.cs new file mode 100644 index 00000000..503e1ff7 --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Services/InMemoryScanCoordinator.cs @@ -0,0 +1,80 @@ +using System.Collections.Concurrent; +using System.Collections.Generic; +using StellaOps.Scanner.WebService.Domain; +using StellaOps.Scanner.WebService.Utilities; + +namespace StellaOps.Scanner.WebService.Services; + +public sealed class InMemoryScanCoordinator : IScanCoordinator +{ + private sealed record ScanEntry(ScanSnapshot Snapshot); + + private readonly ConcurrentDictionary scans = new(StringComparer.OrdinalIgnoreCase); + private readonly TimeProvider timeProvider; + private readonly IScanProgressPublisher progressPublisher; + + public InMemoryScanCoordinator(TimeProvider timeProvider, IScanProgressPublisher progressPublisher) + { + this.timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider)); + this.progressPublisher = progressPublisher ?? throw new ArgumentNullException(nameof(progressPublisher)); + } + + public ValueTask SubmitAsync(ScanSubmission submission, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(submission); + + var normalizedTarget = submission.Target.Normalize(); + var metadata = submission.Metadata ?? new Dictionary(StringComparer.OrdinalIgnoreCase); + var scanId = ScanIdGenerator.Create(normalizedTarget, submission.Force, submission.ClientRequestId, metadata); + var now = timeProvider.GetUtcNow(); + + var eventData = new Dictionary(StringComparer.OrdinalIgnoreCase) + { + ["force"] = submission.Force, + }; + foreach (var pair in metadata) + { + eventData[$"meta.{pair.Key}"] = pair.Value; + } + + ScanEntry entry = scans.AddOrUpdate( + scanId.Value, + _ => new ScanEntry(new ScanSnapshot( + scanId, + normalizedTarget, + ScanStatus.Pending, + now, + now, + null)), + (_, existing) => + { + if (submission.Force) + { + var snapshot = existing.Snapshot with + { + Status = ScanStatus.Pending, + UpdatedAt = now, + FailureReason = null + }; + return new ScanEntry(snapshot); + } + + return existing; + }); + + var created = entry.Snapshot.CreatedAt == now; + var state = entry.Snapshot.Status.ToString(); + progressPublisher.Publish(scanId, state, created ? "queued" : "requeued", eventData); + return ValueTask.FromResult(new ScanSubmissionResult(entry.Snapshot, created)); + } + + public ValueTask GetAsync(ScanId scanId, CancellationToken cancellationToken) + { + if (scans.TryGetValue(scanId.Value, out var entry)) + { + return ValueTask.FromResult(entry.Snapshot); + } + + return ValueTask.FromResult(null); + } +} diff --git a/src/StellaOps.Scanner.WebService/Services/ScanProgressStream.cs b/src/StellaOps.Scanner.WebService/Services/ScanProgressStream.cs new file mode 100644 index 00000000..00fcebce --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Services/ScanProgressStream.cs @@ -0,0 +1,136 @@ +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Collections.ObjectModel; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Threading.Channels; +using StellaOps.Scanner.WebService.Domain; + +namespace StellaOps.Scanner.WebService.Services; + +public interface IScanProgressPublisher +{ + ScanProgressEvent Publish( + ScanId scanId, + string state, + string? message = null, + IReadOnlyDictionary? data = null, + string? correlationId = null); +} + +public interface IScanProgressReader +{ + bool Exists(ScanId scanId); + + IAsyncEnumerable SubscribeAsync(ScanId scanId, CancellationToken cancellationToken); +} + +public sealed class ScanProgressStream : IScanProgressPublisher, IScanProgressReader +{ + private sealed class ProgressChannel + { + private readonly List history = new(); + private readonly Channel channel = Channel.CreateUnbounded(new UnboundedChannelOptions + { + AllowSynchronousContinuations = true, + SingleReader = false, + SingleWriter = false + }); + + public int Sequence { get; private set; } + + public ScanProgressEvent Append(ScanProgressEvent progressEvent) + { + history.Add(progressEvent); + channel.Writer.TryWrite(progressEvent); + return progressEvent; + } + + public IReadOnlyList Snapshot() + { + return history.Count == 0 + ? Array.Empty() + : history.ToArray(); + } + + public ChannelReader Reader => channel.Reader; + + public int NextSequence() => ++Sequence; + } + + private static readonly IReadOnlyDictionary EmptyData = new ReadOnlyDictionary(new Dictionary(StringComparer.OrdinalIgnoreCase)); + + private readonly ConcurrentDictionary channels = new(StringComparer.OrdinalIgnoreCase); + private readonly TimeProvider timeProvider; + + public ScanProgressStream(TimeProvider timeProvider) + { + this.timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider)); + } + + public bool Exists(ScanId scanId) + => channels.ContainsKey(scanId.Value); + + public ScanProgressEvent Publish( + ScanId scanId, + string state, + string? message = null, + IReadOnlyDictionary? data = null, + string? correlationId = null) + { + var channel = channels.GetOrAdd(scanId.Value, _ => new ProgressChannel()); + + ScanProgressEvent progressEvent; + lock (channel) + { + var sequence = channel.NextSequence(); + var correlation = correlationId ?? $"{scanId.Value}:{sequence:D4}"; + var payload = data is null || data.Count == 0 + ? EmptyData + : new ReadOnlyDictionary(new Dictionary(data, StringComparer.OrdinalIgnoreCase)); + + progressEvent = new ScanProgressEvent( + scanId, + sequence, + timeProvider.GetUtcNow(), + state, + message, + correlation, + payload); + + channel.Append(progressEvent); + } + + return progressEvent; + } + + public async IAsyncEnumerable SubscribeAsync( + ScanId scanId, + [EnumeratorCancellation] CancellationToken cancellationToken) + { + if (!channels.TryGetValue(scanId.Value, out var channel)) + { + yield break; + } + + IReadOnlyList snapshot; + lock (channel) + { + snapshot = channel.Snapshot(); + } + + foreach (var progressEvent in snapshot) + { + yield return progressEvent; + } + + var reader = channel.Reader; + while (await reader.WaitToReadAsync(cancellationToken).ConfigureAwait(false)) + { + while (reader.TryRead(out var progressEvent)) + { + yield return progressEvent; + } + } + } +} diff --git a/src/StellaOps.Scanner.WebService/StellaOps.Scanner.WebService.csproj b/src/StellaOps.Scanner.WebService/StellaOps.Scanner.WebService.csproj new file mode 100644 index 00000000..0737bba6 --- /dev/null +++ b/src/StellaOps.Scanner.WebService/StellaOps.Scanner.WebService.csproj @@ -0,0 +1,24 @@ + + + net10.0 + preview + enable + enable + true + StellaOps.Scanner.WebService + + + + + + + + + + + + + + + + diff --git a/src/StellaOps.Scanner.WebService/TASKS.md b/src/StellaOps.Scanner.WebService/TASKS.md index 4c3463e8..f920168b 100644 --- a/src/StellaOps.Scanner.WebService/TASKS.md +++ b/src/StellaOps.Scanner.WebService/TASKS.md @@ -2,11 +2,11 @@ | ID | Status | Owner(s) | Depends on | Description | Exit Criteria | |----|--------|----------|------------|-------------|---------------| -| SCANNER-WEB-09-101 | TODO | Scanner WebService Guild | SCANNER-CORE-09-501 | Stand up minimal API host with Authority OpTok + DPoP enforcement, health/ready endpoints, and restart-time plug-in loader per architecture §1, §4. | Host boots with configuration validation, `/healthz` and `/readyz` return 200, Authority middleware enforced in integration tests. | -| SCANNER-WEB-09-102 | TODO | Scanner WebService Guild | SCANNER-WEB-09-101, SCANNER-QUEUE-09-401 | Implement `/api/v1/scans` submission/status endpoints with deterministic IDs, validation, and cancellation tokens. | Contract documented, e2e test posts scan request and retrieves status, cancellation token honoured. | -| SCANNER-WEB-09-103 | TODO | Scanner WebService Guild | SCANNER-WEB-09-102, SCANNER-CORE-09-502 | Emit scan progress via SSE/JSONL with correlation IDs and deterministic timestamps; document API reference. | Streaming endpoint verified in tests, timestamps formatted ISO-8601 UTC, docs updated in `docs/09_API_CLI_REFERENCE.md`. | -| SCANNER-WEB-09-104 | TODO | Scanner WebService Guild | SCANNER-STORAGE-09-301, SCANNER-QUEUE-09-401 | Bind configuration for Mongo, MinIO, queue, feature flags; add startup diagnostics and fail-fast policy for missing deps. | Misconfiguration fails fast with actionable errors, configuration bound tests pass, diagnostics logged with correlation IDs. | -| SCANNER-POLICY-09-105 | TODO | Scanner WebService Guild | POLICY-CORE-09-001 | Integrate policy schema loader + diagnostics + OpenAPI (YAML ignore rules, VEX include/exclude, vendor precedence). | Policy endpoints documented; validation surfaces actionable errors; OpenAPI schema published. | +| SCANNER-WEB-09-101 | DONE (2025-10-18) | Scanner WebService Guild | SCANNER-CORE-09-501 | Stand up minimal API host with Authority OpTok + DPoP enforcement, health/ready endpoints, and restart-time plug-in loader per architecture §1, §4. | Host boots with configuration validation, `/healthz` and `/readyz` return 200, Authority middleware enforced in integration tests. | +| SCANNER-WEB-09-102 | DONE (2025-10-18) | Scanner WebService Guild | SCANNER-WEB-09-101, SCANNER-QUEUE-09-401 | Implement `/api/v1/scans` submission/status endpoints with deterministic IDs, validation, and cancellation tokens. | Contract documented, e2e test posts scan request and retrieves status, cancellation token honoured. | +| SCANNER-WEB-09-103 | DOING | Scanner WebService Guild | SCANNER-WEB-09-102, SCANNER-CORE-09-502 | Emit scan progress via SSE/JSONL with correlation IDs and deterministic timestamps; document API reference. | Streaming endpoint verified in tests, timestamps formatted ISO-8601 UTC, docs updated in `docs/09_API_CLI_REFERENCE.md`. | +| SCANNER-WEB-09-104 | DONE (2025-10-19) | Scanner WebService Guild | SCANNER-STORAGE-09-301, SCANNER-QUEUE-09-401 | Bind configuration for Mongo, MinIO, queue, feature flags; add startup diagnostics and fail-fast policy for missing deps. | Misconfiguration fails fast with actionable errors, configuration bound tests pass, diagnostics logged with correlation IDs. | +| SCANNER-POLICY-09-105 | DOING | Scanner WebService Guild | POLICY-CORE-09-001 | Integrate policy schema loader + diagnostics + OpenAPI (YAML ignore rules, VEX include/exclude, vendor precedence). | Policy endpoints documented; validation surfaces actionable errors; OpenAPI schema published. | | SCANNER-POLICY-09-106 | TODO | Scanner WebService Guild | POLICY-CORE-09-002, SCANNER-POLICY-09-105 | `/reports` verdict assembly (Feedser/Vexer/Policy merge) + signed response envelope. | Aggregated report includes policy metadata; integration test verifies signed response; docs updated. | | SCANNER-POLICY-09-107 | TODO | Scanner WebService Guild | POLICY-CORE-09-005, SCANNER-POLICY-09-106 | Surface score inputs, config version, and `quietedBy` provenance in `/reports` response and signed payload; document schema changes. | `/reports` JSON + DSSE contain score, reachability, sourceTrust, confidenceBand, quiet provenance; contract tests updated; docs refreshed. | | SCANNER-RUNTIME-12-301 | TODO | Scanner WebService Guild | ZASTAVA-CORE-12-201 | Implement `/runtime/events` ingestion endpoint with validation, batching, and storage hooks per Zastava contract. | Observer fixtures POST events, data persisted and acked; invalid payloads rejected with deterministic errors. | diff --git a/src/StellaOps.Scanner.WebService/Utilities/ScanIdGenerator.cs b/src/StellaOps.Scanner.WebService/Utilities/ScanIdGenerator.cs new file mode 100644 index 00000000..618ced31 --- /dev/null +++ b/src/StellaOps.Scanner.WebService/Utilities/ScanIdGenerator.cs @@ -0,0 +1,48 @@ +using System.Collections.Generic; +using System.Linq; +using System.Security.Cryptography; +using System.Text; +using StellaOps.Scanner.WebService.Domain; + +namespace StellaOps.Scanner.WebService.Utilities; + +internal static class ScanIdGenerator +{ + public static ScanId Create( + ScanTarget target, + bool force, + string? clientRequestId, + IReadOnlyDictionary? metadata) + { + ArgumentNullException.ThrowIfNull(target); + + var builder = new StringBuilder(); + builder.Append('|'); + builder.Append(target.Reference?.Trim().ToLowerInvariant() ?? string.Empty); + builder.Append('|'); + builder.Append(target.Digest?.Trim().ToLowerInvariant() ?? string.Empty); + builder.Append("|force:"); + builder.Append(force ? '1' : '0'); + builder.Append("|client:"); + builder.Append(clientRequestId?.Trim().ToLowerInvariant() ?? string.Empty); + + if (metadata is not null && metadata.Count > 0) + { + foreach (var pair in metadata.OrderBy(static entry => entry.Key, StringComparer.OrdinalIgnoreCase)) + { + var key = pair.Key?.Trim().ToLowerInvariant() ?? string.Empty; + var value = pair.Value?.Trim() ?? string.Empty; + builder.Append('|'); + builder.Append(key); + builder.Append('='); + builder.Append(value); + } + } + + var canonical = builder.ToString(); + var hash = SHA256.HashData(Encoding.UTF8.GetBytes(canonical)); + var hex = Convert.ToHexString(hash).ToLowerInvariant(); + var trimmed = hex.Length > 40 ? hex[..40] : hex; + return new ScanId(trimmed); + } +} diff --git a/src/StellaOps.Scanner.Worker.Tests/StellaOps.Scanner.Worker.Tests.csproj b/src/StellaOps.Scanner.Worker.Tests/StellaOps.Scanner.Worker.Tests.csproj new file mode 100644 index 00000000..29c4feb2 --- /dev/null +++ b/src/StellaOps.Scanner.Worker.Tests/StellaOps.Scanner.Worker.Tests.csproj @@ -0,0 +1,12 @@ + + + net10.0 + preview + enable + enable + false + + + + + diff --git a/src/StellaOps.Scanner.Worker.Tests/WorkerBasicScanScenarioTests.cs b/src/StellaOps.Scanner.Worker.Tests/WorkerBasicScanScenarioTests.cs new file mode 100644 index 00000000..c5d5b3fe --- /dev/null +++ b/src/StellaOps.Scanner.Worker.Tests/WorkerBasicScanScenarioTests.cs @@ -0,0 +1,444 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Diagnostics.Metrics; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using Microsoft.Extensions.Time.Testing; +using StellaOps.Scanner.Worker.Diagnostics; +using StellaOps.Scanner.Worker.Hosting; +using StellaOps.Scanner.Worker.Options; +using StellaOps.Scanner.Worker.Processing; +using Xunit; + +namespace StellaOps.Scanner.Worker.Tests; + +public sealed class WorkerBasicScanScenarioTests +{ + [Fact] + public async Task DelayAsync_CompletesAfterTimeAdvance() + { + var scheduler = new ControlledDelayScheduler(); + var delayTask = scheduler.DelayAsync(TimeSpan.FromSeconds(5), CancellationToken.None); + scheduler.AdvanceBy(TimeSpan.FromSeconds(5)); + await delayTask.WaitAsync(TimeSpan.FromSeconds(1)); + } + + [Fact] + public async Task Worker_CompletesJob_RecordsTelemetry_And_Heartbeats() + { + var fakeTime = new FakeTimeProvider(); + fakeTime.SetUtcNow(DateTimeOffset.UtcNow); + + var options = new ScannerWorkerOptions + { + MaxConcurrentJobs = 1, + }; + options.Telemetry.EnableTelemetry = false; + options.Telemetry.EnableMetrics = true; + + var optionsMonitor = new StaticOptionsMonitor(options); + var testLoggerProvider = new TestLoggerProvider(); + var lease = new TestJobLease(fakeTime); + var jobSource = new TestJobSource(lease); + var scheduler = new ControlledDelayScheduler(); + var analyzer = new TestAnalyzerDispatcher(scheduler); + + using var listener = new WorkerMetricsListener(); + listener.Start(); + + using var services = new ServiceCollection() + .AddLogging(builder => + { + builder.ClearProviders(); + builder.AddProvider(testLoggerProvider); + builder.SetMinimumLevel(LogLevel.Debug); + }) + .AddSingleton(fakeTime) + .AddSingleton(fakeTime) + .AddSingleton>(optionsMonitor) + .AddSingleton() + .AddSingleton() + .AddSingleton() + .AddSingleton() + .AddSingleton(scheduler) + .AddSingleton(_ => jobSource) + .AddSingleton(analyzer) + .AddSingleton() + .AddSingleton() + .BuildServiceProvider(); + + var worker = services.GetRequiredService(); + + await worker.StartAsync(CancellationToken.None); + + await jobSource.LeaseIssued.Task.WaitAsync(TimeSpan.FromSeconds(5)); + + scheduler.AdvanceBy(TimeSpan.FromSeconds(30)); + scheduler.AdvanceBy(TimeSpan.FromSeconds(30)); + + try + { + await lease.Completed.Task.WaitAsync(TimeSpan.FromSeconds(30)); + } + catch (TimeoutException ex) + { + var stageLogs = string.Join(Environment.NewLine, testLoggerProvider + .GetEntriesForCategory(typeof(ScanProgressReporter).FullName!) + .Select(entry => entry.ToFormattedString())); + + throw new TimeoutException($"Worker did not complete within timeout. Logs:{Environment.NewLine}{stageLogs}", ex); + } + + await worker.StopAsync(CancellationToken.None); + + Assert.True(lease.Completed.Task.IsCompletedSuccessfully, "Job should complete successfully."); + Assert.Single(analyzer.Executions); + Assert.True(lease.RenewalCount >= 1, "Lease should have been renewed at least once."); + + var stageOrder = testLoggerProvider + .GetEntriesForCategory(typeof(ScanProgressReporter).FullName!) + .Where(entry => entry.EventId.Id == 1000) + .Select(entry => entry.GetScopeProperty("Stage")) + .Where(stage => stage is not null) + .Cast() + .ToArray(); + + Assert.Equal(ScanStageNames.Ordered, stageOrder); + + var queueLatency = listener.Measurements.Where(m => m.InstrumentName == "scanner_worker_queue_latency_ms").ToArray(); + Assert.Single(queueLatency); + Assert.True(queueLatency[0].Value > 0, "Queue latency should be positive."); + + var jobDuration = listener.Measurements.Where(m => m.InstrumentName == "scanner_worker_job_duration_ms").ToArray(); + Assert.Single(jobDuration); + Assert.True(jobDuration[0].Value > 0, "Job duration should be positive."); + + var stageDurations = listener.Measurements.Where(m => m.InstrumentName == "scanner_worker_stage_duration_ms").ToArray(); + Assert.Contains(stageDurations, m => m.Tags.TryGetValue("stage", out var stage) && Equals(stage, ScanStageNames.ExecuteAnalyzers)); + } + + private sealed class TestJobSource : IScanJobSource + { + private readonly TestJobLease _lease; + private int _delivered; + + public TestJobSource(TestJobLease lease) + { + _lease = lease; + } + + public TaskCompletionSource LeaseIssued { get; } = new(TaskCreationOptions.RunContinuationsAsynchronously); + + public Task TryAcquireAsync(CancellationToken cancellationToken) + { + if (Interlocked.Exchange(ref _delivered, 1) == 0) + { + LeaseIssued.TrySetResult(); + return Task.FromResult(_lease); + } + + return Task.FromResult(null); + } + } + + private sealed class TestJobLease : IScanJobLease + { + private readonly FakeTimeProvider _timeProvider; + private readonly Dictionary _metadata = new() + { + { "queue", "tests" }, + { "job.kind", "basic" }, + }; + + public TestJobLease(FakeTimeProvider timeProvider) + { + _timeProvider = timeProvider; + EnqueuedAtUtc = _timeProvider.GetUtcNow() - TimeSpan.FromSeconds(5); + LeasedAtUtc = _timeProvider.GetUtcNow(); + } + + public string JobId { get; } = Guid.NewGuid().ToString("n"); + + public string ScanId { get; } = $"scan-{Guid.NewGuid():n}"; + + public int Attempt { get; } = 1; + + public DateTimeOffset EnqueuedAtUtc { get; } + + public DateTimeOffset LeasedAtUtc { get; } + + public TimeSpan LeaseDuration { get; } = TimeSpan.FromSeconds(90); + + public IReadOnlyDictionary Metadata => _metadata; + + public TaskCompletionSource Completed { get; } = new(TaskCreationOptions.RunContinuationsAsynchronously); + + public int RenewalCount => _renewalCount; + + public ValueTask RenewAsync(CancellationToken cancellationToken) + { + Interlocked.Increment(ref _renewalCount); + return ValueTask.CompletedTask; + } + + public ValueTask CompleteAsync(CancellationToken cancellationToken) + { + Completed.TrySetResult(); + return ValueTask.CompletedTask; + } + + public ValueTask AbandonAsync(string reason, CancellationToken cancellationToken) + { + Completed.TrySetException(new InvalidOperationException($"Abandoned: {reason}")); + return ValueTask.CompletedTask; + } + + public ValueTask PoisonAsync(string reason, CancellationToken cancellationToken) + { + Completed.TrySetException(new InvalidOperationException($"Poisoned: {reason}")); + return ValueTask.CompletedTask; + } + + public ValueTask DisposeAsync() => ValueTask.CompletedTask; + + private int _renewalCount; + } + + private sealed class TestAnalyzerDispatcher : IScanAnalyzerDispatcher + { + private readonly IDelayScheduler _scheduler; + + public TestAnalyzerDispatcher(IDelayScheduler scheduler) + { + _scheduler = scheduler; + } + + public List Executions { get; } = new(); + + public async ValueTask ExecuteAsync(ScanJobContext context, CancellationToken cancellationToken) + { + Executions.Add(context.JobId); + await _scheduler.DelayAsync(TimeSpan.FromSeconds(45), cancellationToken); + } + } + + private sealed class ControlledDelayScheduler : IDelayScheduler + { + private readonly object _lock = new(); + private readonly SortedDictionary> _scheduled = new(); + private double _currentMilliseconds; + + public Task DelayAsync(TimeSpan delay, CancellationToken cancellationToken) + { + if (delay <= TimeSpan.Zero) + { + return Task.CompletedTask; + } + + var tcs = new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously); + var scheduled = new ScheduledDelay(tcs, cancellationToken); + lock (_lock) + { + var due = _currentMilliseconds + delay.TotalMilliseconds; + if (!_scheduled.TryGetValue(due, out var list)) + { + list = new List(); + _scheduled.Add(due, list); + } + + list.Add(scheduled); + } + + return scheduled.Task; + } + + public void AdvanceBy(TimeSpan delta) + { + lock (_lock) + { + _currentMilliseconds += delta.TotalMilliseconds; + var dueKeys = _scheduled.Keys.Where(key => key <= _currentMilliseconds).ToList(); + foreach (var due in dueKeys) + { + foreach (var scheduled in _scheduled[due]) + { + scheduled.Complete(); + } + + _scheduled.Remove(due); + } + } + } + + private sealed class ScheduledDelay + { + private readonly TaskCompletionSource _tcs; + private readonly CancellationTokenRegistration _registration; + + public ScheduledDelay(TaskCompletionSource tcs, CancellationToken cancellationToken) + { + _tcs = tcs; + if (cancellationToken.CanBeCanceled) + { + _registration = cancellationToken.Register(state => + { + var source = (TaskCompletionSource)state!; + source.TrySetCanceled(cancellationToken); + }, tcs); + } + } + + public Task Task => _tcs.Task; + + public void Complete() + { + _registration.Dispose(); + _tcs.TrySetResult(null); + } + } + } + + private sealed class StaticOptionsMonitor : IOptionsMonitor + where TOptions : class + { + private readonly TOptions _value; + + public StaticOptionsMonitor(TOptions value) + { + _value = value; + } + + public TOptions CurrentValue => _value; + + public TOptions Get(string? name) => _value; + + public IDisposable OnChange(Action listener) => NullDisposable.Instance; + + private sealed class NullDisposable : IDisposable + { + public static readonly NullDisposable Instance = new(); + public void Dispose() + { + } + } + } + + private sealed class WorkerMetricsListener : IDisposable + { + private readonly MeterListener _listener; + public ConcurrentBag Measurements { get; } = new(); + + public WorkerMetricsListener() + { + _listener = new MeterListener + { + InstrumentPublished = (instrument, listener) => + { + if (instrument.Meter.Name == ScannerWorkerInstrumentation.MeterName) + { + listener.EnableMeasurementEvents(instrument); + } + } + }; + + _listener.SetMeasurementEventCallback((instrument, measurement, tags, state) => + { + var tagDictionary = new Dictionary(tags.Length, StringComparer.Ordinal); + foreach (var tag in tags) + { + tagDictionary[tag.Key] = tag.Value; + } + + Measurements.Add(new Measurement(instrument.Name, measurement, tagDictionary)); + }); + } + + public void Start() => _listener.Start(); + + public void Dispose() => _listener.Dispose(); + } + + public sealed record Measurement(string InstrumentName, double Value, IReadOnlyDictionary Tags) + { + public object? this[string name] => Tags.TryGetValue(name, out var value) ? value : null; + } + + private sealed class TestLoggerProvider : ILoggerProvider + { + private readonly ConcurrentQueue _entries = new(); + + public ILogger CreateLogger(string categoryName) => new TestLogger(categoryName, _entries); + + public void Dispose() + { + } + + public IEnumerable GetEntriesForCategory(string categoryName) + => _entries.Where(entry => entry.Category == categoryName); + + private sealed class TestLogger : ILogger + { + private readonly string _category; + private readonly ConcurrentQueue _entries; + + public TestLogger(string category, ConcurrentQueue entries) + { + _category = category; + _entries = entries; + } + + public IDisposable? BeginScope(TState state) where TState : notnull => NullDisposable.Instance; + + public bool IsEnabled(LogLevel logLevel) => true; + + public void Log(LogLevel logLevel, EventId eventId, TState state, Exception? exception, Func formatter) + { + _entries.Enqueue(new TestLogEntry(_category, logLevel, eventId, state, exception)); + } + } + + private sealed class NullDisposable : IDisposable + { + public static readonly NullDisposable Instance = new(); + public void Dispose() + { + } + } + } + + public sealed record TestLogEntry(string Category, LogLevel Level, EventId EventId, object? State, Exception? Exception) + { + public T? GetScopeProperty(string name) + { + if (State is not IEnumerable> state) + { + return default; + } + + foreach (var kvp in state) + { + if (string.Equals(kvp.Key, name, StringComparison.OrdinalIgnoreCase) && kvp.Value is T value) + { + return value; + } + } + + return default; + } + + public string ToFormattedString() + { + var properties = State is IEnumerable> kvps + ? string.Join(", ", kvps.Select(kvp => $"{kvp.Key}={kvp.Value}")) + : State?.ToString() ?? string.Empty; + + var exceptionPart = Exception is null ? string.Empty : $" Exception={Exception.GetType().Name}: {Exception.Message}"; + return $"[{Level}] {Category} ({EventId.Id}) {properties}{exceptionPart}"; + } + } +} diff --git a/src/StellaOps.Scanner.Worker/AGENTS.md b/src/StellaOps.Scanner.Worker/AGENTS.md new file mode 100644 index 00000000..f816382d --- /dev/null +++ b/src/StellaOps.Scanner.Worker/AGENTS.md @@ -0,0 +1,26 @@ +# AGENTS +## Role +Scanner.Worker engineers own the queue-driven execution host that turns scan jobs into SBOM artefacts with deterministic progress reporting. +## Scope +- Host bootstrap: configuration binding, Authority client wiring, graceful shutdown, restart-time plug-in discovery hooks. +- Job acquisition & lease renewal semantics backed by the Scanner queue abstraction. +- Analyzer orchestration skeleton: stage pipeline, cancellation awareness, deterministic progress emissions. +- Telemetry: structured logging, OpenTelemetry metrics/traces, health counters for offline diagnostics. +## Participants +- Consumes jobs from `StellaOps.Scanner.Queue`. +- Persists progress/artifacts via `StellaOps.Scanner.Storage` once those modules land. +- Emits metrics and structured logs consumed by Observability stack & WebService status endpoints. +## Interfaces & contracts +- Queue lease abstraction (`IScanJobLease`, `IScanJobSource`) with deterministic identifiers and attempt counters. +- Analyzer dispatcher contracts for OS/lang/native analyzers and emitters. +- Telemetry resource attributes shared with Scanner.WebService and Scheduler. +## In/Out of scope +In scope: worker host, concurrency orchestration, lease renewal, cancellation wiring, deterministic logging/metrics. +Out of scope: queue provider implementations, analyzer business logic, Mongo/object-store repositories. +## Observability expectations +- Meter `StellaOps.Scanner.Worker` with queue latency, stage duration, failure counters. +- Activity source `StellaOps.Scanner.Worker.Job` for per-job tracing. +- Log correlation IDs (`jobId`, `leaseId`, `scanId`) with structured payloads; avoid dumping secrets or full manifests. +## Tests +- Integration fixture `WorkerBasicScanScenario` verifying acquisition → heartbeat → analyzer stages → completion. +- Unit tests around retry/jitter calculators as they are introduced. diff --git a/src/StellaOps.Scanner.Worker/Diagnostics/ScannerWorkerInstrumentation.cs b/src/StellaOps.Scanner.Worker/Diagnostics/ScannerWorkerInstrumentation.cs new file mode 100644 index 00000000..81762e5b --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Diagnostics/ScannerWorkerInstrumentation.cs @@ -0,0 +1,15 @@ +using System.Diagnostics; +using System.Diagnostics.Metrics; + +namespace StellaOps.Scanner.Worker.Diagnostics; + +public static class ScannerWorkerInstrumentation +{ + public const string ActivitySourceName = "StellaOps.Scanner.Worker.Job"; + + public const string MeterName = "StellaOps.Scanner.Worker"; + + public static ActivitySource ActivitySource { get; } = new(ActivitySourceName); + + public static Meter Meter { get; } = new(MeterName, version: "1.0.0"); +} diff --git a/src/StellaOps.Scanner.Worker/Diagnostics/ScannerWorkerMetrics.cs b/src/StellaOps.Scanner.Worker/Diagnostics/ScannerWorkerMetrics.cs new file mode 100644 index 00000000..9af0164b --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Diagnostics/ScannerWorkerMetrics.cs @@ -0,0 +1,109 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.Metrics; +using StellaOps.Scanner.Worker.Processing; + +namespace StellaOps.Scanner.Worker.Diagnostics; + +public sealed class ScannerWorkerMetrics +{ + private readonly Histogram _queueLatencyMs; + private readonly Histogram _jobDurationMs; + private readonly Histogram _stageDurationMs; + private readonly Counter _jobsCompleted; + private readonly Counter _jobsFailed; + + public ScannerWorkerMetrics() + { + _queueLatencyMs = ScannerWorkerInstrumentation.Meter.CreateHistogram( + "scanner_worker_queue_latency_ms", + unit: "ms", + description: "Time from job enqueue to lease acquisition."); + _jobDurationMs = ScannerWorkerInstrumentation.Meter.CreateHistogram( + "scanner_worker_job_duration_ms", + unit: "ms", + description: "Total processing duration per job."); + _stageDurationMs = ScannerWorkerInstrumentation.Meter.CreateHistogram( + "scanner_worker_stage_duration_ms", + unit: "ms", + description: "Stage execution duration per job."); + _jobsCompleted = ScannerWorkerInstrumentation.Meter.CreateCounter( + "scanner_worker_jobs_completed_total", + description: "Number of successfully completed scan jobs."); + _jobsFailed = ScannerWorkerInstrumentation.Meter.CreateCounter( + "scanner_worker_jobs_failed_total", + description: "Number of scan jobs that failed permanently."); + } + + public void RecordQueueLatency(ScanJobContext context, TimeSpan latency) + { + if (latency <= TimeSpan.Zero) + { + return; + } + + _queueLatencyMs.Record(latency.TotalMilliseconds, CreateTags(context)); + } + + public void RecordJobDuration(ScanJobContext context, TimeSpan duration) + { + if (duration <= TimeSpan.Zero) + { + return; + } + + _jobDurationMs.Record(duration.TotalMilliseconds, CreateTags(context)); + } + + public void RecordStageDuration(ScanJobContext context, string stage, TimeSpan duration) + { + if (duration <= TimeSpan.Zero) + { + return; + } + + _stageDurationMs.Record(duration.TotalMilliseconds, CreateTags(context, stage: stage)); + } + + public void IncrementJobCompleted(ScanJobContext context) + { + _jobsCompleted.Add(1, CreateTags(context)); + } + + public void IncrementJobFailed(ScanJobContext context, string failureReason) + { + _jobsFailed.Add(1, CreateTags(context, failureReason: failureReason)); + } + + private static KeyValuePair[] CreateTags(ScanJobContext context, string? stage = null, string? failureReason = null) + { + var tags = new List>(stage is null ? 5 : 6) + { + new("job.id", context.JobId), + new("scan.id", context.ScanId), + new("attempt", context.Lease.Attempt), + }; + + if (context.Lease.Metadata.TryGetValue("queue", out var queueName) && !string.IsNullOrWhiteSpace(queueName)) + { + tags.Add(new KeyValuePair("queue", queueName)); + } + + if (context.Lease.Metadata.TryGetValue("job.kind", out var jobKind) && !string.IsNullOrWhiteSpace(jobKind)) + { + tags.Add(new KeyValuePair("job.kind", jobKind)); + } + + if (!string.IsNullOrWhiteSpace(stage)) + { + tags.Add(new KeyValuePair("stage", stage)); + } + + if (!string.IsNullOrWhiteSpace(failureReason)) + { + tags.Add(new KeyValuePair("reason", failureReason)); + } + + return tags.ToArray(); + } +} diff --git a/src/StellaOps.Scanner.Worker/Diagnostics/TelemetryExtensions.cs b/src/StellaOps.Scanner.Worker/Diagnostics/TelemetryExtensions.cs new file mode 100644 index 00000000..e1db1b89 --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Diagnostics/TelemetryExtensions.cs @@ -0,0 +1,102 @@ +using System; +using System.Collections.Generic; +using System.Reflection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using OpenTelemetry.Metrics; +using OpenTelemetry.Resources; +using OpenTelemetry.Trace; +using StellaOps.Scanner.Worker.Options; + +namespace StellaOps.Scanner.Worker.Diagnostics; + +public static class TelemetryExtensions +{ + public static void ConfigureScannerWorkerTelemetry(this IHostApplicationBuilder builder, ScannerWorkerOptions options) + { + ArgumentNullException.ThrowIfNull(builder); + ArgumentNullException.ThrowIfNull(options); + + var telemetry = options.Telemetry; + if (!telemetry.EnableTelemetry) + { + return; + } + + var openTelemetry = builder.Services.AddOpenTelemetry(); + + openTelemetry.ConfigureResource(resource => + { + var version = Assembly.GetExecutingAssembly().GetName().Version?.ToString() ?? "unknown"; + resource.AddService(telemetry.ServiceName, serviceVersion: version, serviceInstanceId: Environment.MachineName); + resource.AddAttributes(new[] + { + new KeyValuePair("deployment.environment", builder.Environment.EnvironmentName), + }); + + foreach (var kvp in telemetry.ResourceAttributes) + { + if (string.IsNullOrWhiteSpace(kvp.Key) || kvp.Value is null) + { + continue; + } + + resource.AddAttributes(new[] { new KeyValuePair(kvp.Key, kvp.Value) }); + } + }); + + if (telemetry.EnableTracing) + { + openTelemetry.WithTracing(tracing => + { + tracing.AddSource(ScannerWorkerInstrumentation.ActivitySourceName); + ConfigureExporter(tracing, telemetry); + }); + } + + if (telemetry.EnableMetrics) + { + openTelemetry.WithMetrics(metrics => + { + metrics + .AddMeter(ScannerWorkerInstrumentation.MeterName) + .AddRuntimeInstrumentation() + .AddProcessInstrumentation(); + + ConfigureExporter(metrics, telemetry); + }); + } + } + + private static void ConfigureExporter(TracerProviderBuilder tracing, ScannerWorkerOptions.TelemetryOptions telemetry) + { + if (!string.IsNullOrWhiteSpace(telemetry.OtlpEndpoint)) + { + tracing.AddOtlpExporter(options => + { + options.Endpoint = new Uri(telemetry.OtlpEndpoint); + }); + } + + if (telemetry.ExportConsole || string.IsNullOrWhiteSpace(telemetry.OtlpEndpoint)) + { + tracing.AddConsoleExporter(); + } + } + + private static void ConfigureExporter(MeterProviderBuilder metrics, ScannerWorkerOptions.TelemetryOptions telemetry) + { + if (!string.IsNullOrWhiteSpace(telemetry.OtlpEndpoint)) + { + metrics.AddOtlpExporter(options => + { + options.Endpoint = new Uri(telemetry.OtlpEndpoint); + }); + } + + if (telemetry.ExportConsole || string.IsNullOrWhiteSpace(telemetry.OtlpEndpoint)) + { + metrics.AddConsoleExporter(); + } + } +} diff --git a/src/StellaOps.Scanner.Worker/Hosting/ScannerWorkerHostedService.cs b/src/StellaOps.Scanner.Worker/Hosting/ScannerWorkerHostedService.cs new file mode 100644 index 00000000..2f22294a --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Hosting/ScannerWorkerHostedService.cs @@ -0,0 +1,201 @@ +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using StellaOps.Scanner.Worker.Diagnostics; +using StellaOps.Scanner.Worker.Options; +using StellaOps.Scanner.Worker.Processing; + +namespace StellaOps.Scanner.Worker.Hosting; + +public sealed partial class ScannerWorkerHostedService : BackgroundService +{ + private readonly IScanJobSource _jobSource; + private readonly ScanJobProcessor _processor; + private readonly LeaseHeartbeatService _heartbeatService; + private readonly ScannerWorkerMetrics _metrics; + private readonly TimeProvider _timeProvider; + private readonly IOptionsMonitor _options; + private readonly ILogger _logger; + private readonly IDelayScheduler _delayScheduler; + + public ScannerWorkerHostedService( + IScanJobSource jobSource, + ScanJobProcessor processor, + LeaseHeartbeatService heartbeatService, + ScannerWorkerMetrics metrics, + TimeProvider timeProvider, + IDelayScheduler delayScheduler, + IOptionsMonitor options, + ILogger logger) + { + _jobSource = jobSource ?? throw new ArgumentNullException(nameof(jobSource)); + _processor = processor ?? throw new ArgumentNullException(nameof(processor)); + _heartbeatService = heartbeatService ?? throw new ArgumentNullException(nameof(heartbeatService)); + _metrics = metrics ?? throw new ArgumentNullException(nameof(metrics)); + _timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider)); + _delayScheduler = delayScheduler ?? throw new ArgumentNullException(nameof(delayScheduler)); + _options = options ?? throw new ArgumentNullException(nameof(options)); + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + } + + protected override async Task ExecuteAsync(CancellationToken stoppingToken) + { + var runningJobs = new HashSet(); + var delayStrategy = new PollDelayStrategy(_options.CurrentValue.Polling); + + WorkerStarted(_logger); + + while (!stoppingToken.IsCancellationRequested) + { + runningJobs.RemoveWhere(static task => task.IsCompleted); + + var options = _options.CurrentValue; + if (runningJobs.Count >= options.MaxConcurrentJobs) + { + var completed = await Task.WhenAny(runningJobs).ConfigureAwait(false); + runningJobs.Remove(completed); + continue; + } + + IScanJobLease? lease = null; + try + { + lease = await _jobSource.TryAcquireAsync(stoppingToken).ConfigureAwait(false); + } + catch (OperationCanceledException) when (stoppingToken.IsCancellationRequested) + { + break; + } + catch (Exception ex) + { + _logger.LogError(ex, "Scanner worker failed to acquire job lease; backing off."); + } + + if (lease is null) + { + var delay = delayStrategy.NextDelay(); + await _delayScheduler.DelayAsync(delay, stoppingToken).ConfigureAwait(false); + continue; + } + + delayStrategy.Reset(); + runningJobs.Add(RunJobAsync(lease, stoppingToken)); + } + + if (runningJobs.Count > 0) + { + await Task.WhenAll(runningJobs).ConfigureAwait(false); + } + + WorkerStopping(_logger); + } + + private async Task RunJobAsync(IScanJobLease lease, CancellationToken stoppingToken) + { + var options = _options.CurrentValue; + var jobStart = _timeProvider.GetUtcNow(); + var queueLatency = jobStart - lease.EnqueuedAtUtc; + var jobCts = CancellationTokenSource.CreateLinkedTokenSource(stoppingToken); + var jobToken = jobCts.Token; + var context = new ScanJobContext(lease, _timeProvider, jobStart, jobToken); + + _metrics.RecordQueueLatency(context, queueLatency); + JobAcquired(_logger, lease.JobId, lease.ScanId, lease.Attempt, queueLatency.TotalMilliseconds); + + var heartbeatTask = _heartbeatService.RunAsync(lease, jobToken); + Exception? processingException = null; + + try + { + await _processor.ExecuteAsync(context, jobToken).ConfigureAwait(false); + jobCts.Cancel(); + await heartbeatTask.ConfigureAwait(false); + await lease.CompleteAsync(stoppingToken).ConfigureAwait(false); + var duration = _timeProvider.GetUtcNow() - jobStart; + _metrics.RecordJobDuration(context, duration); + _metrics.IncrementJobCompleted(context); + JobCompleted(_logger, lease.JobId, lease.ScanId, duration.TotalMilliseconds); + } + catch (OperationCanceledException) when (stoppingToken.IsCancellationRequested) + { + processingException = null; + await lease.AbandonAsync("host-stopping", CancellationToken.None).ConfigureAwait(false); + JobAbandoned(_logger, lease.JobId, lease.ScanId); + } + catch (Exception ex) + { + processingException = ex; + var duration = _timeProvider.GetUtcNow() - jobStart; + _metrics.RecordJobDuration(context, duration); + + var reason = ex.GetType().Name; + var maxAttempts = options.Queue.MaxAttempts; + if (lease.Attempt >= maxAttempts) + { + await lease.PoisonAsync(reason, CancellationToken.None).ConfigureAwait(false); + _metrics.IncrementJobFailed(context, reason); + JobPoisoned(_logger, lease.JobId, lease.ScanId, lease.Attempt, maxAttempts, ex); + } + else + { + await lease.AbandonAsync(reason, CancellationToken.None).ConfigureAwait(false); + JobAbandonedWithError(_logger, lease.JobId, lease.ScanId, lease.Attempt, maxAttempts, ex); + } + } + finally + { + jobCts.Cancel(); + try + { + await heartbeatTask.ConfigureAwait(false); + } + catch (Exception ex) when (processingException is null && ex is not OperationCanceledException) + { + _logger.LogWarning(ex, "Heartbeat loop ended with an exception for job {JobId}.", lease.JobId); + } + + await lease.DisposeAsync().ConfigureAwait(false); + jobCts.Dispose(); + } + } + + [LoggerMessage(EventId = 2000, Level = LogLevel.Information, Message = "Scanner worker host started.")] + private static partial void WorkerStarted(ILogger logger); + + [LoggerMessage(EventId = 2001, Level = LogLevel.Information, Message = "Scanner worker host stopping.")] + private static partial void WorkerStopping(ILogger logger); + + [LoggerMessage( + EventId = 2002, + Level = LogLevel.Information, + Message = "Leased job {JobId} (scan {ScanId}) attempt {Attempt}; queue latency {LatencyMs:F0} ms.")] + private static partial void JobAcquired(ILogger logger, string jobId, string scanId, int attempt, double latencyMs); + + [LoggerMessage( + EventId = 2003, + Level = LogLevel.Information, + Message = "Job {JobId} (scan {ScanId}) completed in {DurationMs:F0} ms.")] + private static partial void JobCompleted(ILogger logger, string jobId, string scanId, double durationMs); + + [LoggerMessage( + EventId = 2004, + Level = LogLevel.Warning, + Message = "Job {JobId} (scan {ScanId}) abandoned due to host shutdown.")] + private static partial void JobAbandoned(ILogger logger, string jobId, string scanId); + + [LoggerMessage( + EventId = 2005, + Level = LogLevel.Warning, + Message = "Job {JobId} (scan {ScanId}) attempt {Attempt}/{MaxAttempts} abandoned after failure; job will be retried.")] + private static partial void JobAbandonedWithError(ILogger logger, string jobId, string scanId, int attempt, int maxAttempts, Exception exception); + + [LoggerMessage( + EventId = 2006, + Level = LogLevel.Error, + Message = "Job {JobId} (scan {ScanId}) attempt {Attempt}/{MaxAttempts} exceeded retry budget; quarantining job.")] + private static partial void JobPoisoned(ILogger logger, string jobId, string scanId, int attempt, int maxAttempts, Exception exception); +} diff --git a/src/StellaOps.Scanner.Worker/Options/ScannerWorkerOptions.cs b/src/StellaOps.Scanner.Worker/Options/ScannerWorkerOptions.cs new file mode 100644 index 00000000..7a99bc2b --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Options/ScannerWorkerOptions.cs @@ -0,0 +1,142 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Collections.ObjectModel; + +namespace StellaOps.Scanner.Worker.Options; + +public sealed class ScannerWorkerOptions +{ + public const string SectionName = "Scanner:Worker"; + + public int MaxConcurrentJobs { get; set; } = 2; + + public QueueOptions Queue { get; } = new(); + + public PollingOptions Polling { get; } = new(); + + public AuthorityOptions Authority { get; } = new(); + + public TelemetryOptions Telemetry { get; } = new(); + + public ShutdownOptions Shutdown { get; } = new(); + + public sealed class QueueOptions + { + public int MaxAttempts { get; set; } = 5; + + public double HeartbeatSafetyFactor { get; set; } = 3.0; + + public int MaxHeartbeatJitterMilliseconds { get; set; } = 750; + + public IReadOnlyList HeartbeatRetryDelays => _heartbeatRetryDelays; + + public TimeSpan MinHeartbeatInterval { get; set; } = TimeSpan.FromSeconds(10); + + public TimeSpan MaxHeartbeatInterval { get; set; } = TimeSpan.FromSeconds(30); + + public void SetHeartbeatRetryDelays(IEnumerable delays) + { + _heartbeatRetryDelays = NormalizeDelays(delays); + } + + internal IReadOnlyList NormalizedHeartbeatRetryDelays => _heartbeatRetryDelays; + + private static IReadOnlyList NormalizeDelays(IEnumerable delays) + { + var buffer = new List(); + foreach (var delay in delays) + { + if (delay <= TimeSpan.Zero) + { + continue; + } + + buffer.Add(delay); + } + + buffer.Sort(); + return new ReadOnlyCollection(buffer); + } + + private IReadOnlyList _heartbeatRetryDelays = new ReadOnlyCollection(new TimeSpan[] + { + TimeSpan.FromSeconds(2), + TimeSpan.FromSeconds(5), + TimeSpan.FromSeconds(10), + }); + } + + public sealed class PollingOptions + { + public TimeSpan InitialDelay { get; set; } = TimeSpan.FromMilliseconds(200); + + public TimeSpan MaxDelay { get; set; } = TimeSpan.FromSeconds(5); + + public double JitterRatio { get; set; } = 0.2; + } + + public sealed class AuthorityOptions + { + public bool Enabled { get; set; } + + public string? Issuer { get; set; } + + public string? ClientId { get; set; } + + public string? ClientSecret { get; set; } + + public bool RequireHttpsMetadata { get; set; } = true; + + public string? MetadataAddress { get; set; } + + public int BackchannelTimeoutSeconds { get; set; } = 20; + + public int TokenClockSkewSeconds { get; set; } = 30; + + public IList Scopes { get; } = new List { "scanner.scan" }; + + public ResilienceOptions Resilience { get; } = new(); + } + + public sealed class ResilienceOptions + { + public bool? EnableRetries { get; set; } + + public IList RetryDelays { get; } = new List + { + TimeSpan.FromMilliseconds(250), + TimeSpan.FromMilliseconds(500), + TimeSpan.FromSeconds(1), + TimeSpan.FromSeconds(5), + }; + + public bool? AllowOfflineCacheFallback { get; set; } + + public TimeSpan? OfflineCacheTolerance { get; set; } + } + + public sealed class TelemetryOptions + { + public bool EnableLogging { get; set; } = true; + + public bool EnableTelemetry { get; set; } = true; + + public bool EnableTracing { get; set; } + + public bool EnableMetrics { get; set; } = true; + + public string ServiceName { get; set; } = "stellaops-scanner-worker"; + + public string? OtlpEndpoint { get; set; } + + public bool ExportConsole { get; set; } + + public IDictionary ResourceAttributes { get; } = new ConcurrentDictionary(StringComparer.OrdinalIgnoreCase); + } + + public sealed class ShutdownOptions + { + public TimeSpan Timeout { get; set; } = TimeSpan.FromSeconds(30); + } +} diff --git a/src/StellaOps.Scanner.Worker/Options/ScannerWorkerOptionsValidator.cs b/src/StellaOps.Scanner.Worker/Options/ScannerWorkerOptionsValidator.cs new file mode 100644 index 00000000..2b302c54 --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Options/ScannerWorkerOptionsValidator.cs @@ -0,0 +1,99 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using Microsoft.Extensions.Options; + +namespace StellaOps.Scanner.Worker.Options; + +public sealed class ScannerWorkerOptionsValidator : IValidateOptions +{ + public ValidateOptionsResult Validate(string? name, ScannerWorkerOptions options) + { + ArgumentNullException.ThrowIfNull(options); + + var failures = new List(); + + if (options.MaxConcurrentJobs <= 0) + { + failures.Add("Scanner.Worker:MaxConcurrentJobs must be greater than zero."); + } + + if (options.Queue.HeartbeatSafetyFactor < 2.0) + { + failures.Add("Scanner.Worker:Queue:HeartbeatSafetyFactor must be at least 2."); + } + + if (options.Queue.MaxAttempts <= 0) + { + failures.Add("Scanner.Worker:Queue:MaxAttempts must be greater than zero."); + } + + if (options.Queue.MinHeartbeatInterval <= TimeSpan.Zero) + { + failures.Add("Scanner.Worker:Queue:MinHeartbeatInterval must be greater than zero."); + } + + if (options.Queue.MaxHeartbeatInterval <= options.Queue.MinHeartbeatInterval) + { + failures.Add("Scanner.Worker:Queue:MaxHeartbeatInterval must be greater than MinHeartbeatInterval."); + } + + if (options.Polling.InitialDelay <= TimeSpan.Zero) + { + failures.Add("Scanner.Worker:Polling:InitialDelay must be greater than zero."); + } + + if (options.Polling.MaxDelay < options.Polling.InitialDelay) + { + failures.Add("Scanner.Worker:Polling:MaxDelay must be greater than or equal to InitialDelay."); + } + + if (options.Polling.JitterRatio is < 0 or > 1) + { + failures.Add("Scanner.Worker:Polling:JitterRatio must be between 0 and 1."); + } + + if (options.Authority.Enabled) + { + if (string.IsNullOrWhiteSpace(options.Authority.Issuer)) + { + failures.Add("Scanner.Worker:Authority requires Issuer when Enabled is true."); + } + + if (string.IsNullOrWhiteSpace(options.Authority.ClientId)) + { + failures.Add("Scanner.Worker:Authority requires ClientId when Enabled is true."); + } + + if (options.Authority.BackchannelTimeoutSeconds <= 0) + { + failures.Add("Scanner.Worker:Authority:BackchannelTimeoutSeconds must be greater than zero."); + } + + if (options.Authority.TokenClockSkewSeconds < 0) + { + failures.Add("Scanner.Worker:Authority:TokenClockSkewSeconds cannot be negative."); + } + + if (options.Authority.Resilience.RetryDelays.Any(delay => delay <= TimeSpan.Zero)) + { + failures.Add("Scanner.Worker:Authority:Resilience:RetryDelays must be positive durations."); + } + } + + if (options.Shutdown.Timeout < TimeSpan.FromSeconds(5)) + { + failures.Add("Scanner.Worker:Shutdown:Timeout must be at least 5 seconds to allow lease completion."); + } + + if (options.Telemetry.EnableTelemetry) + { + if (!options.Telemetry.EnableMetrics && !options.Telemetry.EnableTracing) + { + failures.Add("Scanner.Worker:Telemetry:EnableTelemetry requires metrics or tracing to be enabled."); + } + } + + return failures.Count == 0 ? ValidateOptionsResult.Success : ValidateOptionsResult.Fail(failures); + } +} diff --git a/src/StellaOps.Scanner.Worker/Processing/AnalyzerStageExecutor.cs b/src/StellaOps.Scanner.Worker/Processing/AnalyzerStageExecutor.cs new file mode 100644 index 00000000..1b7d6b78 --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Processing/AnalyzerStageExecutor.cs @@ -0,0 +1,20 @@ +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Worker.Processing; + +public sealed class AnalyzerStageExecutor : IScanStageExecutor +{ + private readonly IScanAnalyzerDispatcher _dispatcher; + + public AnalyzerStageExecutor(IScanAnalyzerDispatcher dispatcher) + { + _dispatcher = dispatcher ?? throw new ArgumentNullException(nameof(dispatcher)); + } + + public string StageName => ScanStageNames.ExecuteAnalyzers; + + public ValueTask ExecuteAsync(ScanJobContext context, CancellationToken cancellationToken) + => _dispatcher.ExecuteAsync(context, cancellationToken); +} diff --git a/src/StellaOps.Scanner.Worker/Processing/IDelayScheduler.cs b/src/StellaOps.Scanner.Worker/Processing/IDelayScheduler.cs new file mode 100644 index 00000000..2be99eba --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Processing/IDelayScheduler.cs @@ -0,0 +1,10 @@ +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Worker.Processing; + +public interface IDelayScheduler +{ + Task DelayAsync(TimeSpan delay, CancellationToken cancellationToken); +} diff --git a/src/StellaOps.Scanner.Worker/Processing/IScanAnalyzerDispatcher.cs b/src/StellaOps.Scanner.Worker/Processing/IScanAnalyzerDispatcher.cs new file mode 100644 index 00000000..e6677fc9 --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Processing/IScanAnalyzerDispatcher.cs @@ -0,0 +1,15 @@ +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Worker.Processing; + +public interface IScanAnalyzerDispatcher +{ + ValueTask ExecuteAsync(ScanJobContext context, CancellationToken cancellationToken); +} + +public sealed class NullScanAnalyzerDispatcher : IScanAnalyzerDispatcher +{ + public ValueTask ExecuteAsync(ScanJobContext context, CancellationToken cancellationToken) + => ValueTask.CompletedTask; +} diff --git a/src/StellaOps.Scanner.Worker/Processing/IScanJobLease.cs b/src/StellaOps.Scanner.Worker/Processing/IScanJobLease.cs new file mode 100644 index 00000000..705c77d4 --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Processing/IScanJobLease.cs @@ -0,0 +1,31 @@ +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Worker.Processing; + +public interface IScanJobLease : IAsyncDisposable +{ + string JobId { get; } + + string ScanId { get; } + + int Attempt { get; } + + DateTimeOffset EnqueuedAtUtc { get; } + + DateTimeOffset LeasedAtUtc { get; } + + TimeSpan LeaseDuration { get; } + + IReadOnlyDictionary Metadata { get; } + + ValueTask RenewAsync(CancellationToken cancellationToken); + + ValueTask CompleteAsync(CancellationToken cancellationToken); + + ValueTask AbandonAsync(string reason, CancellationToken cancellationToken); + + ValueTask PoisonAsync(string reason, CancellationToken cancellationToken); +} diff --git a/src/StellaOps.Scanner.Worker/Processing/IScanJobSource.cs b/src/StellaOps.Scanner.Worker/Processing/IScanJobSource.cs new file mode 100644 index 00000000..b37dba2b --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Processing/IScanJobSource.cs @@ -0,0 +1,9 @@ +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Worker.Processing; + +public interface IScanJobSource +{ + Task TryAcquireAsync(CancellationToken cancellationToken); +} diff --git a/src/StellaOps.Scanner.Worker/Processing/IScanStageExecutor.cs b/src/StellaOps.Scanner.Worker/Processing/IScanStageExecutor.cs new file mode 100644 index 00000000..bf93169b --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Processing/IScanStageExecutor.cs @@ -0,0 +1,11 @@ +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Worker.Processing; + +public interface IScanStageExecutor +{ + string StageName { get; } + + ValueTask ExecuteAsync(ScanJobContext context, CancellationToken cancellationToken); +} diff --git a/src/StellaOps.Scanner.Worker/Processing/LeaseHeartbeatService.cs b/src/StellaOps.Scanner.Worker/Processing/LeaseHeartbeatService.cs new file mode 100644 index 00000000..6dae7c3b --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Processing/LeaseHeartbeatService.cs @@ -0,0 +1,148 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using StellaOps.Scanner.Worker.Options; + +namespace StellaOps.Scanner.Worker.Processing; + +public sealed class LeaseHeartbeatService +{ + private readonly TimeProvider _timeProvider; + private readonly IOptionsMonitor _options; + private readonly IDelayScheduler _delayScheduler; + private readonly ILogger _logger; + + public LeaseHeartbeatService(TimeProvider timeProvider, IDelayScheduler delayScheduler, IOptionsMonitor options, ILogger logger) + { + _timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider)); + _delayScheduler = delayScheduler ?? throw new ArgumentNullException(nameof(delayScheduler)); + _options = options ?? throw new ArgumentNullException(nameof(options)); + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + } + + public async Task RunAsync(IScanJobLease lease, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(lease); + + var options = _options.CurrentValue; + var interval = ComputeInterval(options, lease); + + while (!cancellationToken.IsCancellationRequested) + { + options = _options.CurrentValue; + var delay = ApplyJitter(interval, options.Queue.MaxHeartbeatJitterMilliseconds); + try + { + await _delayScheduler.DelayAsync(delay, cancellationToken).ConfigureAwait(false); + } + catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested) + { + break; + } + + if (cancellationToken.IsCancellationRequested) + { + break; + } + + if (await TryRenewAsync(options, lease, cancellationToken).ConfigureAwait(false)) + { + continue; + } + + _logger.LogError( + "Job {JobId} (scan {ScanId}) lease renewal exhausted retries; cancelling processing.", + lease.JobId, + lease.ScanId); + throw new InvalidOperationException("Lease renewal retries exhausted."); + } + } + + private static TimeSpan ComputeInterval(ScannerWorkerOptions options, IScanJobLease lease) + { + var divisor = options.Queue.HeartbeatSafetyFactor <= 0 ? 3.0 : options.Queue.HeartbeatSafetyFactor; + var recommended = TimeSpan.FromTicks((long)(lease.LeaseDuration.Ticks / Math.Max(2.0, divisor))); + if (recommended < options.Queue.MinHeartbeatInterval) + { + recommended = options.Queue.MinHeartbeatInterval; + } + else if (recommended > options.Queue.MaxHeartbeatInterval) + { + recommended = options.Queue.MaxHeartbeatInterval; + } + + return recommended; + } + + private static TimeSpan ApplyJitter(TimeSpan duration, int maxJitterMilliseconds) + { + if (maxJitterMilliseconds <= 0) + { + return duration; + } + + var offset = Random.Shared.NextDouble() * maxJitterMilliseconds; + return duration + TimeSpan.FromMilliseconds(offset); + } + + private async Task TryRenewAsync(ScannerWorkerOptions options, IScanJobLease lease, CancellationToken cancellationToken) + { + try + { + await lease.RenewAsync(cancellationToken).ConfigureAwait(false); + return true; + } + catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested) + { + return false; + } + catch (Exception ex) + { + _logger.LogWarning( + ex, + "Job {JobId} (scan {ScanId}) heartbeat failed; retrying.", + lease.JobId, + lease.ScanId); + } + + foreach (var delay in options.Queue.NormalizedHeartbeatRetryDelays) + { + if (cancellationToken.IsCancellationRequested) + { + return false; + } + + try + { + await _delayScheduler.DelayAsync(delay, cancellationToken).ConfigureAwait(false); + } + catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested) + { + return false; + } + + try + { + await lease.RenewAsync(cancellationToken).ConfigureAwait(false); + return true; + } + catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested) + { + return false; + } + catch (Exception ex) + { + _logger.LogWarning( + ex, + "Job {JobId} (scan {ScanId}) heartbeat retry failed; will retry after {Delay}.", + lease.JobId, + lease.ScanId, + delay); + } + } + + return false; + } +} diff --git a/src/StellaOps.Scanner.Worker/Processing/NoOpStageExecutor.cs b/src/StellaOps.Scanner.Worker/Processing/NoOpStageExecutor.cs new file mode 100644 index 00000000..c9ec93a6 --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Processing/NoOpStageExecutor.cs @@ -0,0 +1,18 @@ +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Worker.Processing; + +public sealed class NoOpStageExecutor : IScanStageExecutor +{ + public NoOpStageExecutor(string stageName) + { + StageName = stageName ?? throw new ArgumentNullException(nameof(stageName)); + } + + public string StageName { get; } + + public ValueTask ExecuteAsync(ScanJobContext context, CancellationToken cancellationToken) + => ValueTask.CompletedTask; +} diff --git a/src/StellaOps.Scanner.Worker/Processing/NullScanJobSource.cs b/src/StellaOps.Scanner.Worker/Processing/NullScanJobSource.cs new file mode 100644 index 00000000..2f972f88 --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Processing/NullScanJobSource.cs @@ -0,0 +1,26 @@ +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; + +namespace StellaOps.Scanner.Worker.Processing; + +public sealed class NullScanJobSource : IScanJobSource +{ + private readonly ILogger _logger; + private int _logged; + + public NullScanJobSource(ILogger logger) + { + _logger = logger; + } + + public Task TryAcquireAsync(CancellationToken cancellationToken) + { + if (Interlocked.Exchange(ref _logged, 1) == 0) + { + _logger.LogWarning("No queue provider registered. Scanner worker will idle until a queue adapter is configured."); + } + + return Task.FromResult(null); + } +} diff --git a/src/StellaOps.Scanner.Worker/Processing/PollDelayStrategy.cs b/src/StellaOps.Scanner.Worker/Processing/PollDelayStrategy.cs new file mode 100644 index 00000000..cf4a386e --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Processing/PollDelayStrategy.cs @@ -0,0 +1,49 @@ +using System; + +using StellaOps.Scanner.Worker.Options; + +namespace StellaOps.Scanner.Worker.Processing; + +public sealed class PollDelayStrategy +{ + private readonly ScannerWorkerOptions.PollingOptions _options; + private TimeSpan _currentDelay; + + public PollDelayStrategy(ScannerWorkerOptions.PollingOptions options) + { + _options = options ?? throw new ArgumentNullException(nameof(options)); + } + + public TimeSpan NextDelay() + { + if (_currentDelay == TimeSpan.Zero) + { + _currentDelay = _options.InitialDelay; + return ApplyJitter(_currentDelay); + } + + var doubled = _currentDelay + _currentDelay; + _currentDelay = doubled < _options.MaxDelay ? doubled : _options.MaxDelay; + return ApplyJitter(_currentDelay); + } + + public void Reset() => _currentDelay = TimeSpan.Zero; + + private TimeSpan ApplyJitter(TimeSpan duration) + { + if (_options.JitterRatio <= 0) + { + return duration; + } + + var maxOffset = duration.TotalMilliseconds * _options.JitterRatio; + if (maxOffset <= 0) + { + return duration; + } + + var offset = (Random.Shared.NextDouble() * 2.0 - 1.0) * maxOffset; + var adjustedMs = Math.Max(0, duration.TotalMilliseconds + offset); + return TimeSpan.FromMilliseconds(adjustedMs); + } +} diff --git a/src/StellaOps.Scanner.Worker/Processing/ScanJobContext.cs b/src/StellaOps.Scanner.Worker/Processing/ScanJobContext.cs new file mode 100644 index 00000000..8b985eeb --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Processing/ScanJobContext.cs @@ -0,0 +1,27 @@ +using System; +using System.Threading; + +namespace StellaOps.Scanner.Worker.Processing; + +public sealed class ScanJobContext +{ + public ScanJobContext(IScanJobLease lease, TimeProvider timeProvider, DateTimeOffset startUtc, CancellationToken cancellationToken) + { + Lease = lease ?? throw new ArgumentNullException(nameof(lease)); + TimeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider)); + StartUtc = startUtc; + CancellationToken = cancellationToken; + } + + public IScanJobLease Lease { get; } + + public TimeProvider TimeProvider { get; } + + public DateTimeOffset StartUtc { get; } + + public CancellationToken CancellationToken { get; } + + public string JobId => Lease.JobId; + + public string ScanId => Lease.ScanId; +} diff --git a/src/StellaOps.Scanner.Worker/Processing/ScanJobProcessor.cs b/src/StellaOps.Scanner.Worker/Processing/ScanJobProcessor.cs new file mode 100644 index 00000000..5263d555 --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Processing/ScanJobProcessor.cs @@ -0,0 +1,65 @@ +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; + +namespace StellaOps.Scanner.Worker.Processing; + +public sealed class ScanJobProcessor +{ + private readonly IReadOnlyDictionary _executors; + private readonly ScanProgressReporter _progressReporter; + private readonly ILogger _logger; + + public ScanJobProcessor(IEnumerable executors, ScanProgressReporter progressReporter, ILogger logger) + { + _progressReporter = progressReporter ?? throw new ArgumentNullException(nameof(progressReporter)); + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + + var map = new Dictionary(StringComparer.OrdinalIgnoreCase); + foreach (var executor in executors ?? Array.Empty()) + { + if (executor is null || string.IsNullOrWhiteSpace(executor.StageName)) + { + continue; + } + + map[executor.StageName] = executor; + } + + foreach (var stage in ScanStageNames.Ordered) + { + if (map.ContainsKey(stage)) + { + continue; + } + + map[stage] = new NoOpStageExecutor(stage); + _logger.LogDebug("No executor registered for stage {Stage}; using no-op placeholder.", stage); + } + + _executors = map; + } + + public async ValueTask ExecuteAsync(ScanJobContext context, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(context); + + foreach (var stage in ScanStageNames.Ordered) + { + cancellationToken.ThrowIfCancellationRequested(); + + if (!_executors.TryGetValue(stage, out var executor)) + { + continue; + } + + await _progressReporter.ExecuteStageAsync( + context, + stage, + executor.ExecuteAsync, + cancellationToken).ConfigureAwait(false); + } + } +} diff --git a/src/StellaOps.Scanner.Worker/Processing/ScanProgressReporter.cs b/src/StellaOps.Scanner.Worker/Processing/ScanProgressReporter.cs new file mode 100644 index 00000000..228a02ac --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Processing/ScanProgressReporter.cs @@ -0,0 +1,86 @@ +using System; +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using StellaOps.Scanner.Worker.Diagnostics; + +namespace StellaOps.Scanner.Worker.Processing; + +public sealed partial class ScanProgressReporter +{ + private readonly ScannerWorkerMetrics _metrics; + private readonly ILogger _logger; + + public ScanProgressReporter(ScannerWorkerMetrics metrics, ILogger logger) + { + _metrics = metrics ?? throw new ArgumentNullException(nameof(metrics)); + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + } + + public async ValueTask ExecuteStageAsync( + ScanJobContext context, + string stageName, + Func stageWork, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(context); + ArgumentException.ThrowIfNullOrWhiteSpace(stageName); + ArgumentNullException.ThrowIfNull(stageWork); + + StageStarting(_logger, context.JobId, context.ScanId, stageName, context.Lease.Attempt); + + var start = context.TimeProvider.GetUtcNow(); + using var activity = ScannerWorkerInstrumentation.ActivitySource.StartActivity( + $"scanner.worker.{stageName}", + ActivityKind.Internal); + + activity?.SetTag("scanner.worker.job_id", context.JobId); + activity?.SetTag("scanner.worker.scan_id", context.ScanId); + activity?.SetTag("scanner.worker.stage", stageName); + + try + { + await stageWork(context, cancellationToken).ConfigureAwait(false); + var duration = context.TimeProvider.GetUtcNow() - start; + _metrics.RecordStageDuration(context, stageName, duration); + StageCompleted(_logger, context.JobId, context.ScanId, stageName, duration.TotalMilliseconds); + } + catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested) + { + StageCancelled(_logger, context.JobId, context.ScanId, stageName); + throw; + } + catch (Exception ex) + { + var duration = context.TimeProvider.GetUtcNow() - start; + _metrics.RecordStageDuration(context, stageName, duration); + StageFailed(_logger, context.JobId, context.ScanId, stageName, ex); + throw; + } + } + + [LoggerMessage( + EventId = 1000, + Level = LogLevel.Information, + Message = "Job {JobId} (scan {ScanId}) entering stage {Stage} (attempt {Attempt}).")] + private static partial void StageStarting(ILogger logger, string jobId, string scanId, string stage, int attempt); + + [LoggerMessage( + EventId = 1001, + Level = LogLevel.Information, + Message = "Job {JobId} (scan {ScanId}) finished stage {Stage} in {ElapsedMs:F0} ms.")] + private static partial void StageCompleted(ILogger logger, string jobId, string scanId, string stage, double elapsedMs); + + [LoggerMessage( + EventId = 1002, + Level = LogLevel.Warning, + Message = "Job {JobId} (scan {ScanId}) stage {Stage} cancelled by request.")] + private static partial void StageCancelled(ILogger logger, string jobId, string scanId, string stage); + + [LoggerMessage( + EventId = 1003, + Level = LogLevel.Error, + Message = "Job {JobId} (scan {ScanId}) stage {Stage} failed.")] + private static partial void StageFailed(ILogger logger, string jobId, string scanId, string stage, Exception exception); +} diff --git a/src/StellaOps.Scanner.Worker/Processing/ScanStageNames.cs b/src/StellaOps.Scanner.Worker/Processing/ScanStageNames.cs new file mode 100644 index 00000000..f4aea671 --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Processing/ScanStageNames.cs @@ -0,0 +1,23 @@ +using System.Collections.Generic; + +namespace StellaOps.Scanner.Worker.Processing; + +public static class ScanStageNames +{ + public const string ResolveImage = "resolve-image"; + public const string PullLayers = "pull-layers"; + public const string BuildFilesystem = "build-filesystem"; + public const string ExecuteAnalyzers = "execute-analyzers"; + public const string ComposeArtifacts = "compose-artifacts"; + public const string EmitReports = "emit-reports"; + + public static readonly IReadOnlyList Ordered = new[] + { + ResolveImage, + PullLayers, + BuildFilesystem, + ExecuteAnalyzers, + ComposeArtifacts, + EmitReports, + }; +} diff --git a/src/StellaOps.Scanner.Worker/Processing/SystemDelayScheduler.cs b/src/StellaOps.Scanner.Worker/Processing/SystemDelayScheduler.cs new file mode 100644 index 00000000..cf5f3b9e --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Processing/SystemDelayScheduler.cs @@ -0,0 +1,18 @@ +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace StellaOps.Scanner.Worker.Processing; + +public sealed class SystemDelayScheduler : IDelayScheduler +{ + public Task DelayAsync(TimeSpan delay, CancellationToken cancellationToken) + { + if (delay <= TimeSpan.Zero) + { + return Task.CompletedTask; + } + + return Task.Delay(delay, cancellationToken); + } +} diff --git a/src/StellaOps.Scanner.Worker/Program.cs b/src/StellaOps.Scanner.Worker/Program.cs new file mode 100644 index 00000000..d2275074 --- /dev/null +++ b/src/StellaOps.Scanner.Worker/Program.cs @@ -0,0 +1,98 @@ +using System.Diagnostics; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using Microsoft.Extensions.DependencyInjection.Extensions; +using StellaOps.Auth.Client; +using StellaOps.Scanner.Worker.Diagnostics; +using StellaOps.Scanner.Worker.Hosting; +using StellaOps.Scanner.Worker.Options; +using StellaOps.Scanner.Worker.Processing; + +var builder = Host.CreateApplicationBuilder(args); + +builder.Services.AddOptions() + .BindConfiguration(ScannerWorkerOptions.SectionName) + .ValidateOnStart(); + +builder.Services.AddSingleton, ScannerWorkerOptionsValidator>(); +builder.Services.AddSingleton(TimeProvider.System); +builder.Services.AddSingleton(); +builder.Services.AddSingleton(); +builder.Services.AddSingleton(); +builder.Services.AddSingleton(); +builder.Services.AddSingleton(); + +builder.Services.TryAddSingleton(); +builder.Services.TryAddSingleton(); +builder.Services.AddSingleton(); + +builder.Services.AddSingleton(); +builder.Services.AddHostedService(sp => sp.GetRequiredService()); + +var workerOptions = builder.Configuration.GetSection(ScannerWorkerOptions.SectionName).Get() ?? new ScannerWorkerOptions(); + +builder.Services.Configure(options => +{ + options.ShutdownTimeout = workerOptions.Shutdown.Timeout; +}); + +builder.ConfigureScannerWorkerTelemetry(workerOptions); + +if (workerOptions.Authority.Enabled) +{ + builder.Services.AddStellaOpsAuthClient(clientOptions => + { + clientOptions.Authority = workerOptions.Authority.Issuer?.Trim() ?? string.Empty; + clientOptions.ClientId = workerOptions.Authority.ClientId?.Trim() ?? string.Empty; + clientOptions.ClientSecret = workerOptions.Authority.ClientSecret; + clientOptions.EnableRetries = workerOptions.Authority.Resilience.EnableRetries ?? true; + clientOptions.HttpTimeout = TimeSpan.FromSeconds(workerOptions.Authority.BackchannelTimeoutSeconds); + + clientOptions.DefaultScopes.Clear(); + foreach (var scope in workerOptions.Authority.Scopes) + { + if (string.IsNullOrWhiteSpace(scope)) + { + continue; + } + + clientOptions.DefaultScopes.Add(scope); + } + + clientOptions.RetryDelays.Clear(); + foreach (var delay in workerOptions.Authority.Resilience.RetryDelays) + { + if (delay <= TimeSpan.Zero) + { + continue; + } + + clientOptions.RetryDelays.Add(delay); + } + + if (workerOptions.Authority.Resilience.AllowOfflineCacheFallback is bool allowOffline) + { + clientOptions.AllowOfflineCacheFallback = allowOffline; + } + + if (workerOptions.Authority.Resilience.OfflineCacheTolerance is { } tolerance && tolerance > TimeSpan.Zero) + { + clientOptions.OfflineCacheTolerance = tolerance; + } + }); +} + +builder.Logging.Configure(options => +{ + options.ActivityTrackingOptions = ActivityTrackingOptions.SpanId + | ActivityTrackingOptions.TraceId + | ActivityTrackingOptions.ParentId; +}); + +var host = builder.Build(); + +await host.RunAsync(); + +public partial class Program; diff --git a/src/StellaOps.Scanner.Worker/StellaOps.Scanner.Worker.csproj b/src/StellaOps.Scanner.Worker/StellaOps.Scanner.Worker.csproj new file mode 100644 index 00000000..7d9307e3 --- /dev/null +++ b/src/StellaOps.Scanner.Worker/StellaOps.Scanner.Worker.csproj @@ -0,0 +1,20 @@ + + + net10.0 + preview + enable + enable + true + + + + + + + + + + + + + diff --git a/src/StellaOps.Scanner.Worker/TASKS.md b/src/StellaOps.Scanner.Worker/TASKS.md new file mode 100644 index 00000000..c7be4b7b --- /dev/null +++ b/src/StellaOps.Scanner.Worker/TASKS.md @@ -0,0 +1,8 @@ +# Scanner Worker Task Board + +| ID | Status | Owner(s) | Depends on | Description | Exit Criteria | +|----|--------|----------|------------|-------------|---------------| +| SCANNER-WORKER-09-201 | DONE (2025-10-19) | Scanner Worker Guild | SCANNER-CORE-09-501 | Worker host bootstrap with Authority auth, hosted services, and graceful shutdown semantics. | `Program.cs` binds `Scanner:Worker` options, registers delay scheduler, configures telemetry + Authority client, and enforces shutdown timeout. | +| SCANNER-WORKER-09-202 | DONE (2025-10-19) | Scanner Worker Guild | SCANNER-WORKER-09-201, SCANNER-QUEUE-09-401 | Lease/heartbeat loop with retry+jitter, poison-job quarantine, structured logging. | `ScannerWorkerHostedService` + `LeaseHeartbeatService` manage concurrency, renewal margins, poison handling, and structured logs exercised by integration fixture. | +| SCANNER-WORKER-09-203 | DONE (2025-10-19) | Scanner Worker Guild | SCANNER-WORKER-09-202, SCANNER-STORAGE-09-301 | Analyzer dispatch skeleton emitting deterministic stage progress and honoring cancellation tokens. | Deterministic stage list + `ScanProgressReporter`; `WorkerBasicScanScenario` validates ordering and cancellation propagation. | +| SCANNER-WORKER-09-204 | DONE (2025-10-19) | Scanner Worker Guild | SCANNER-WORKER-09-203 | Worker metrics (queue latency, stage duration, failure counts) with OpenTelemetry resource wiring. | `ScannerWorkerMetrics` records queue/job/stage metrics; integration test asserts analyzer stage histogram entries. | diff --git a/src/StellaOps.sln b/src/StellaOps.sln index abc2bb3c..007759e7 100644 --- a/src/StellaOps.sln +++ b/src/StellaOps.sln @@ -237,6 +237,28 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Excititor.Connect EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Excititor.Connectors.Ubuntu.CSAF.Tests", "StellaOps.Excititor.Connectors.Ubuntu.CSAF.Tests\StellaOps.Excititor.Connectors.Ubuntu.CSAF.Tests.csproj", "{CADA1364-8EB1-479E-AB6F-4105C26335C8}" EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Scanner.Core", "StellaOps.Scanner.Core\StellaOps.Scanner.Core.csproj", "{8CC4441E-9D1A-4E00-831B-34828A3F9446}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Scanner.Core.Tests", "StellaOps.Scanner.Core.Tests\StellaOps.Scanner.Core.Tests.csproj", "{01B8AC3F-1B97-4F79-93C6-BE1CBA26FE17}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "StellaOps.Authority", "StellaOps.Authority", "{BDB24B64-FE4E-C4BD-9F80-9428F98EDF6F}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Policy", "StellaOps.Policy\StellaOps.Policy.csproj", "{37BB9502-CCD1-425A-BF45-D56968B0C2F9}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Policy.Tests", "StellaOps.Policy.Tests\StellaOps.Policy.Tests.csproj", "{015A7A95-2C07-4C7F-8048-DB591AAC5FE5}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Scanner.WebService", "StellaOps.Scanner.WebService\StellaOps.Scanner.WebService.csproj", "{EF59DAD6-30CE-47CB-862A-DD79F31BFDE4}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Scanner.WebService.Tests", "StellaOps.Scanner.WebService.Tests\StellaOps.Scanner.WebService.Tests.csproj", "{27D951AD-696D-4330-B4F5-F8F81344C191}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Scanner.Storage", "StellaOps.Scanner.Storage\StellaOps.Scanner.Storage.csproj", "{31277AFF-9BFF-4C17-8593-B562A385058E}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Scanner.Storage.Tests", "StellaOps.Scanner.Storage.Tests\StellaOps.Scanner.Storage.Tests.csproj", "{3A8F090F-678D-46E2-8899-67402129749C}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Scanner.Worker", "StellaOps.Scanner.Worker\StellaOps.Scanner.Worker.csproj", "{19FACEC7-D6D4-40F5-84AD-14E2983F18F7}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Scanner.Worker.Tests", "StellaOps.Scanner.Worker.Tests\StellaOps.Scanner.Worker.Tests.csproj", "{8342286A-BE36-4ACA-87FF-EBEB4E268498}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -1627,6 +1649,126 @@ Global {CADA1364-8EB1-479E-AB6F-4105C26335C8}.Release|x64.Build.0 = Release|Any CPU {CADA1364-8EB1-479E-AB6F-4105C26335C8}.Release|x86.ActiveCfg = Release|Any CPU {CADA1364-8EB1-479E-AB6F-4105C26335C8}.Release|x86.Build.0 = Release|Any CPU + {8CC4441E-9D1A-4E00-831B-34828A3F9446}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {8CC4441E-9D1A-4E00-831B-34828A3F9446}.Debug|Any CPU.Build.0 = Debug|Any CPU + {8CC4441E-9D1A-4E00-831B-34828A3F9446}.Debug|x64.ActiveCfg = Debug|Any CPU + {8CC4441E-9D1A-4E00-831B-34828A3F9446}.Debug|x64.Build.0 = Debug|Any CPU + {8CC4441E-9D1A-4E00-831B-34828A3F9446}.Debug|x86.ActiveCfg = Debug|Any CPU + {8CC4441E-9D1A-4E00-831B-34828A3F9446}.Debug|x86.Build.0 = Debug|Any CPU + {8CC4441E-9D1A-4E00-831B-34828A3F9446}.Release|Any CPU.ActiveCfg = Release|Any CPU + {8CC4441E-9D1A-4E00-831B-34828A3F9446}.Release|Any CPU.Build.0 = Release|Any CPU + {8CC4441E-9D1A-4E00-831B-34828A3F9446}.Release|x64.ActiveCfg = Release|Any CPU + {8CC4441E-9D1A-4E00-831B-34828A3F9446}.Release|x64.Build.0 = Release|Any CPU + {8CC4441E-9D1A-4E00-831B-34828A3F9446}.Release|x86.ActiveCfg = Release|Any CPU + {8CC4441E-9D1A-4E00-831B-34828A3F9446}.Release|x86.Build.0 = Release|Any CPU + {01B8AC3F-1B97-4F79-93C6-BE1CBA26FE17}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {01B8AC3F-1B97-4F79-93C6-BE1CBA26FE17}.Debug|Any CPU.Build.0 = Debug|Any CPU + {01B8AC3F-1B97-4F79-93C6-BE1CBA26FE17}.Debug|x64.ActiveCfg = Debug|Any CPU + {01B8AC3F-1B97-4F79-93C6-BE1CBA26FE17}.Debug|x64.Build.0 = Debug|Any CPU + {01B8AC3F-1B97-4F79-93C6-BE1CBA26FE17}.Debug|x86.ActiveCfg = Debug|Any CPU + {01B8AC3F-1B97-4F79-93C6-BE1CBA26FE17}.Debug|x86.Build.0 = Debug|Any CPU + {01B8AC3F-1B97-4F79-93C6-BE1CBA26FE17}.Release|Any CPU.ActiveCfg = Release|Any CPU + {01B8AC3F-1B97-4F79-93C6-BE1CBA26FE17}.Release|Any CPU.Build.0 = Release|Any CPU + {01B8AC3F-1B97-4F79-93C6-BE1CBA26FE17}.Release|x64.ActiveCfg = Release|Any CPU + {01B8AC3F-1B97-4F79-93C6-BE1CBA26FE17}.Release|x64.Build.0 = Release|Any CPU + {01B8AC3F-1B97-4F79-93C6-BE1CBA26FE17}.Release|x86.ActiveCfg = Release|Any CPU + {01B8AC3F-1B97-4F79-93C6-BE1CBA26FE17}.Release|x86.Build.0 = Release|Any CPU + {37BB9502-CCD1-425A-BF45-D56968B0C2F9}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {37BB9502-CCD1-425A-BF45-D56968B0C2F9}.Debug|Any CPU.Build.0 = Debug|Any CPU + {37BB9502-CCD1-425A-BF45-D56968B0C2F9}.Debug|x64.ActiveCfg = Debug|Any CPU + {37BB9502-CCD1-425A-BF45-D56968B0C2F9}.Debug|x64.Build.0 = Debug|Any CPU + {37BB9502-CCD1-425A-BF45-D56968B0C2F9}.Debug|x86.ActiveCfg = Debug|Any CPU + {37BB9502-CCD1-425A-BF45-D56968B0C2F9}.Debug|x86.Build.0 = Debug|Any CPU + {37BB9502-CCD1-425A-BF45-D56968B0C2F9}.Release|Any CPU.ActiveCfg = Release|Any CPU + {37BB9502-CCD1-425A-BF45-D56968B0C2F9}.Release|Any CPU.Build.0 = Release|Any CPU + {37BB9502-CCD1-425A-BF45-D56968B0C2F9}.Release|x64.ActiveCfg = Release|Any CPU + {37BB9502-CCD1-425A-BF45-D56968B0C2F9}.Release|x64.Build.0 = Release|Any CPU + {37BB9502-CCD1-425A-BF45-D56968B0C2F9}.Release|x86.ActiveCfg = Release|Any CPU + {37BB9502-CCD1-425A-BF45-D56968B0C2F9}.Release|x86.Build.0 = Release|Any CPU + {015A7A95-2C07-4C7F-8048-DB591AAC5FE5}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {015A7A95-2C07-4C7F-8048-DB591AAC5FE5}.Debug|Any CPU.Build.0 = Debug|Any CPU + {015A7A95-2C07-4C7F-8048-DB591AAC5FE5}.Debug|x64.ActiveCfg = Debug|Any CPU + {015A7A95-2C07-4C7F-8048-DB591AAC5FE5}.Debug|x64.Build.0 = Debug|Any CPU + {015A7A95-2C07-4C7F-8048-DB591AAC5FE5}.Debug|x86.ActiveCfg = Debug|Any CPU + {015A7A95-2C07-4C7F-8048-DB591AAC5FE5}.Debug|x86.Build.0 = Debug|Any CPU + {015A7A95-2C07-4C7F-8048-DB591AAC5FE5}.Release|Any CPU.ActiveCfg = Release|Any CPU + {015A7A95-2C07-4C7F-8048-DB591AAC5FE5}.Release|Any CPU.Build.0 = Release|Any CPU + {015A7A95-2C07-4C7F-8048-DB591AAC5FE5}.Release|x64.ActiveCfg = Release|Any CPU + {015A7A95-2C07-4C7F-8048-DB591AAC5FE5}.Release|x64.Build.0 = Release|Any CPU + {015A7A95-2C07-4C7F-8048-DB591AAC5FE5}.Release|x86.ActiveCfg = Release|Any CPU + {015A7A95-2C07-4C7F-8048-DB591AAC5FE5}.Release|x86.Build.0 = Release|Any CPU + {EF59DAD6-30CE-47CB-862A-DD79F31BFDE4}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {EF59DAD6-30CE-47CB-862A-DD79F31BFDE4}.Debug|Any CPU.Build.0 = Debug|Any CPU + {EF59DAD6-30CE-47CB-862A-DD79F31BFDE4}.Debug|x64.ActiveCfg = Debug|Any CPU + {EF59DAD6-30CE-47CB-862A-DD79F31BFDE4}.Debug|x64.Build.0 = Debug|Any CPU + {EF59DAD6-30CE-47CB-862A-DD79F31BFDE4}.Debug|x86.ActiveCfg = Debug|Any CPU + {EF59DAD6-30CE-47CB-862A-DD79F31BFDE4}.Debug|x86.Build.0 = Debug|Any CPU + {EF59DAD6-30CE-47CB-862A-DD79F31BFDE4}.Release|Any CPU.ActiveCfg = Release|Any CPU + {EF59DAD6-30CE-47CB-862A-DD79F31BFDE4}.Release|Any CPU.Build.0 = Release|Any CPU + {EF59DAD6-30CE-47CB-862A-DD79F31BFDE4}.Release|x64.ActiveCfg = Release|Any CPU + {EF59DAD6-30CE-47CB-862A-DD79F31BFDE4}.Release|x64.Build.0 = Release|Any CPU + {EF59DAD6-30CE-47CB-862A-DD79F31BFDE4}.Release|x86.ActiveCfg = Release|Any CPU + {EF59DAD6-30CE-47CB-862A-DD79F31BFDE4}.Release|x86.Build.0 = Release|Any CPU + {27D951AD-696D-4330-B4F5-F8F81344C191}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {27D951AD-696D-4330-B4F5-F8F81344C191}.Debug|Any CPU.Build.0 = Debug|Any CPU + {27D951AD-696D-4330-B4F5-F8F81344C191}.Debug|x64.ActiveCfg = Debug|Any CPU + {27D951AD-696D-4330-B4F5-F8F81344C191}.Debug|x64.Build.0 = Debug|Any CPU + {27D951AD-696D-4330-B4F5-F8F81344C191}.Debug|x86.ActiveCfg = Debug|Any CPU + {27D951AD-696D-4330-B4F5-F8F81344C191}.Debug|x86.Build.0 = Debug|Any CPU + {27D951AD-696D-4330-B4F5-F8F81344C191}.Release|Any CPU.ActiveCfg = Release|Any CPU + {27D951AD-696D-4330-B4F5-F8F81344C191}.Release|Any CPU.Build.0 = Release|Any CPU + {27D951AD-696D-4330-B4F5-F8F81344C191}.Release|x64.ActiveCfg = Release|Any CPU + {27D951AD-696D-4330-B4F5-F8F81344C191}.Release|x64.Build.0 = Release|Any CPU + {27D951AD-696D-4330-B4F5-F8F81344C191}.Release|x86.ActiveCfg = Release|Any CPU + {27D951AD-696D-4330-B4F5-F8F81344C191}.Release|x86.Build.0 = Release|Any CPU + {31277AFF-9BFF-4C17-8593-B562A385058E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {31277AFF-9BFF-4C17-8593-B562A385058E}.Debug|Any CPU.Build.0 = Debug|Any CPU + {31277AFF-9BFF-4C17-8593-B562A385058E}.Debug|x64.ActiveCfg = Debug|Any CPU + {31277AFF-9BFF-4C17-8593-B562A385058E}.Debug|x64.Build.0 = Debug|Any CPU + {31277AFF-9BFF-4C17-8593-B562A385058E}.Debug|x86.ActiveCfg = Debug|Any CPU + {31277AFF-9BFF-4C17-8593-B562A385058E}.Debug|x86.Build.0 = Debug|Any CPU + {31277AFF-9BFF-4C17-8593-B562A385058E}.Release|Any CPU.ActiveCfg = Release|Any CPU + {31277AFF-9BFF-4C17-8593-B562A385058E}.Release|Any CPU.Build.0 = Release|Any CPU + {31277AFF-9BFF-4C17-8593-B562A385058E}.Release|x64.ActiveCfg = Release|Any CPU + {31277AFF-9BFF-4C17-8593-B562A385058E}.Release|x64.Build.0 = Release|Any CPU + {31277AFF-9BFF-4C17-8593-B562A385058E}.Release|x86.ActiveCfg = Release|Any CPU + {31277AFF-9BFF-4C17-8593-B562A385058E}.Release|x86.Build.0 = Release|Any CPU + {3A8F090F-678D-46E2-8899-67402129749C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {3A8F090F-678D-46E2-8899-67402129749C}.Debug|Any CPU.Build.0 = Debug|Any CPU + {3A8F090F-678D-46E2-8899-67402129749C}.Debug|x64.ActiveCfg = Debug|Any CPU + {3A8F090F-678D-46E2-8899-67402129749C}.Debug|x64.Build.0 = Debug|Any CPU + {3A8F090F-678D-46E2-8899-67402129749C}.Debug|x86.ActiveCfg = Debug|Any CPU + {3A8F090F-678D-46E2-8899-67402129749C}.Debug|x86.Build.0 = Debug|Any CPU + {3A8F090F-678D-46E2-8899-67402129749C}.Release|Any CPU.ActiveCfg = Release|Any CPU + {3A8F090F-678D-46E2-8899-67402129749C}.Release|Any CPU.Build.0 = Release|Any CPU + {3A8F090F-678D-46E2-8899-67402129749C}.Release|x64.ActiveCfg = Release|Any CPU + {3A8F090F-678D-46E2-8899-67402129749C}.Release|x64.Build.0 = Release|Any CPU + {3A8F090F-678D-46E2-8899-67402129749C}.Release|x86.ActiveCfg = Release|Any CPU + {3A8F090F-678D-46E2-8899-67402129749C}.Release|x86.Build.0 = Release|Any CPU + {19FACEC7-D6D4-40F5-84AD-14E2983F18F7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {19FACEC7-D6D4-40F5-84AD-14E2983F18F7}.Debug|Any CPU.Build.0 = Debug|Any CPU + {19FACEC7-D6D4-40F5-84AD-14E2983F18F7}.Debug|x64.ActiveCfg = Debug|Any CPU + {19FACEC7-D6D4-40F5-84AD-14E2983F18F7}.Debug|x64.Build.0 = Debug|Any CPU + {19FACEC7-D6D4-40F5-84AD-14E2983F18F7}.Debug|x86.ActiveCfg = Debug|Any CPU + {19FACEC7-D6D4-40F5-84AD-14E2983F18F7}.Debug|x86.Build.0 = Debug|Any CPU + {19FACEC7-D6D4-40F5-84AD-14E2983F18F7}.Release|Any CPU.ActiveCfg = Release|Any CPU + {19FACEC7-D6D4-40F5-84AD-14E2983F18F7}.Release|Any CPU.Build.0 = Release|Any CPU + {19FACEC7-D6D4-40F5-84AD-14E2983F18F7}.Release|x64.ActiveCfg = Release|Any CPU + {19FACEC7-D6D4-40F5-84AD-14E2983F18F7}.Release|x64.Build.0 = Release|Any CPU + {19FACEC7-D6D4-40F5-84AD-14E2983F18F7}.Release|x86.ActiveCfg = Release|Any CPU + {19FACEC7-D6D4-40F5-84AD-14E2983F18F7}.Release|x86.Build.0 = Release|Any CPU + {8342286A-BE36-4ACA-87FF-EBEB4E268498}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {8342286A-BE36-4ACA-87FF-EBEB4E268498}.Debug|Any CPU.Build.0 = Debug|Any CPU + {8342286A-BE36-4ACA-87FF-EBEB4E268498}.Debug|x64.ActiveCfg = Debug|Any CPU + {8342286A-BE36-4ACA-87FF-EBEB4E268498}.Debug|x64.Build.0 = Debug|Any CPU + {8342286A-BE36-4ACA-87FF-EBEB4E268498}.Debug|x86.ActiveCfg = Debug|Any CPU + {8342286A-BE36-4ACA-87FF-EBEB4E268498}.Debug|x86.Build.0 = Debug|Any CPU + {8342286A-BE36-4ACA-87FF-EBEB4E268498}.Release|Any CPU.ActiveCfg = Release|Any CPU + {8342286A-BE36-4ACA-87FF-EBEB4E268498}.Release|Any CPU.Build.0 = Release|Any CPU + {8342286A-BE36-4ACA-87FF-EBEB4E268498}.Release|x64.ActiveCfg = Release|Any CPU + {8342286A-BE36-4ACA-87FF-EBEB4E268498}.Release|x64.Build.0 = Release|Any CPU + {8342286A-BE36-4ACA-87FF-EBEB4E268498}.Release|x86.ActiveCfg = Release|Any CPU + {8342286A-BE36-4ACA-87FF-EBEB4E268498}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE @@ -1722,5 +1864,7 @@ Global {A2E3F03A-0CAD-4E2A-8C71-DDEBB1B7E4F7} = {827E0CD3-B72D-47B6-A68D-7590B98EB39B} {3A1AF0AD-4DAE-4D82-9CCF-2DCB83CC3679} = {827E0CD3-B72D-47B6-A68D-7590B98EB39B} {F1DF0F07-1BCB-4B55-8353-07BF8A4B2A67} = {827E0CD3-B72D-47B6-A68D-7590B98EB39B} + {31277AFF-9BFF-4C17-8593-B562A385058E} = {827E0CD3-B72D-47B6-A68D-7590B98EB39B} + {3A8F090F-678D-46E2-8899-67402129749C} = {827E0CD3-B72D-47B6-A68D-7590B98EB39B} EndGlobalSection EndGlobal