feat: Add in-memory implementations for issuer audit, key, repository, and trust management
Some checks failed
devportal-offline / build-offline (push) Has been cancelled
Mirror Thin Bundle Sign & Verify / mirror-sign (push) Has been cancelled
Docs CI / lint-and-preview (push) Has been cancelled
api-governance / spectral-lint (push) Has been cancelled
oas-ci / oas-validate (push) Has been cancelled

- Introduced InMemoryIssuerAuditSink to retain audit entries for testing.
- Implemented InMemoryIssuerKeyRepository for deterministic key storage.
- Created InMemoryIssuerRepository to manage issuer records in memory.
- Added InMemoryIssuerTrustRepository for managing issuer trust overrides.
- Each repository utilizes concurrent collections for thread-safe operations.
- Enhanced deprecation tracking with a comprehensive YAML schema for API governance.
This commit is contained in:
master
2025-12-11 19:47:43 +02:00
parent ab22181e8b
commit ce5ec9c158
48 changed files with 1898 additions and 1580 deletions

View File

@@ -27,7 +27,8 @@
"Bash(if not exist \"E:\\dev\\git.stella-ops.org\\src\\Scanner\\__Tests\\StellaOps.Scanner.Analyzers.Lang.Java.Tests\\Internal\" mkdir \"E:\\dev\\git.stella-ops.org\\src\\Scanner\\__Tests\\StellaOps.Scanner.Analyzers.Lang.Java.Tests\\Internal\")", "Bash(if not exist \"E:\\dev\\git.stella-ops.org\\src\\Scanner\\__Tests\\StellaOps.Scanner.Analyzers.Lang.Java.Tests\\Internal\" mkdir \"E:\\dev\\git.stella-ops.org\\src\\Scanner\\__Tests\\StellaOps.Scanner.Analyzers.Lang.Java.Tests\\Internal\")",
"Bash(if not exist \"E:\\dev\\git.stella-ops.org\\src\\Scanner\\__Tests\\StellaOps.Scanner.Analyzers.Lang.Node.Tests\\Internal\" mkdir \"E:\\dev\\git.stella-ops.org\\src\\Scanner\\__Tests\\StellaOps.Scanner.Analyzers.Lang.Node.Tests\\Internal\")", "Bash(if not exist \"E:\\dev\\git.stella-ops.org\\src\\Scanner\\__Tests\\StellaOps.Scanner.Analyzers.Lang.Node.Tests\\Internal\" mkdir \"E:\\dev\\git.stella-ops.org\\src\\Scanner\\__Tests\\StellaOps.Scanner.Analyzers.Lang.Node.Tests\\Internal\")",
"Bash(rm:*)", "Bash(rm:*)",
"Bash(if not exist \"C:\\dev\\New folder\\git.stella-ops.org\\docs\\implplan\\archived\" mkdir \"C:\\dev\\New folder\\git.stella-ops.org\\docs\\implplan\\archived\")" "Bash(if not exist \"C:\\dev\\New folder\\git.stella-ops.org\\docs\\implplan\\archived\" mkdir \"C:\\dev\\New folder\\git.stella-ops.org\\docs\\implplan\\archived\")",
"Bash(del \"C:\\dev\\New folder\\git.stella-ops.org\\docs\\implplan\\SPRINT_0510_0001_0001_airgap.md\")"
], ],
"deny": [], "deny": [],
"ask": [] "ask": []

View File

@@ -165,3 +165,69 @@ rules:
in: in:
const: header const: header
required: [name, in] required: [name, in]
# --- Deprecation Metadata Rules (per APIGOV-63-001) ---
stella-deprecated-has-metadata:
description: "Deprecated operations must have x-deprecation extension with required fields"
message: "Add x-deprecation metadata (deprecatedAt, sunsetAt, successorPath, reason) to deprecated operations"
given: "$.paths[*][*][?(@.deprecated == true)]"
severity: error
then:
field: x-deprecation
function: schema
functionOptions:
schema:
type: object
required:
- deprecatedAt
- sunsetAt
- successorPath
- reason
properties:
deprecatedAt:
type: string
format: date-time
sunsetAt:
type: string
format: date-time
successorPath:
type: string
successorOperationId:
type: string
reason:
type: string
migrationGuide:
type: string
format: uri
notificationChannels:
type: array
items:
type: string
enum: [slack, teams, email, webhook]
stella-deprecated-sunset-future:
description: "Sunset dates should be in the future (warn if sunset already passed)"
message: "x-deprecation.sunsetAt should be a future date"
given: "$.paths[*][*].x-deprecation.sunsetAt"
severity: warn
then:
function: truthy
stella-deprecated-migration-guide:
description: "Deprecated operations should include a migration guide URL"
message: "Consider adding x-deprecation.migrationGuide for consumer guidance"
given: "$.paths[*][*][?(@.deprecated == true)].x-deprecation"
severity: hint
then:
field: migrationGuide
function: truthy
stella-deprecated-notification-channels:
description: "Deprecated operations should specify notification channels"
message: "Add x-deprecation.notificationChannels to enable deprecation notifications"
given: "$.paths[*][*][?(@.deprecated == true)].x-deprecation"
severity: hint
then:
field: notificationChannels
function: truthy

View File

@@ -31,11 +31,12 @@
| 8 | GAP-SCAN-001 | DONE (2025-12-03) | Binary lifter for ELF/PE/Mach-O shipped; richgraph nodes now carry code_id + SymbolId.forBinaryAddressed; reachability tests updated. | Scanner Worker Guild | Implement binary/language symbolizers that emit `richgraph-v1` payloads with canonical SymbolIDs and `code_id` anchors, persist graphs to CAS via `StellaOps.Scanner.Reachability`, and refresh analyzer docs/fixtures. | | 8 | GAP-SCAN-001 | DONE (2025-12-03) | Binary lifter for ELF/PE/Mach-O shipped; richgraph nodes now carry code_id + SymbolId.forBinaryAddressed; reachability tests updated. | Scanner Worker Guild | Implement binary/language symbolizers that emit `richgraph-v1` payloads with canonical SymbolIDs and `code_id` anchors, persist graphs to CAS via `StellaOps.Scanner.Reachability`, and refresh analyzer docs/fixtures. |
| 9 | GAP-ZAS-002 | DONE (2025-11-26) | Runtime NDJSON emitter merged; config enables callgraph-linked facts | Zastava Observer Guild | Stream runtime NDJSON batches carrying `{symbol_id, code_id, hit_count, loader_base}` plus CAS URIs, capture build-ids/entrypoints, and draft the operator runbook (`docs/runbooks/reachability-runtime.md`). Integrate with `/signals/runtime-facts` once Sprint0401 lands ingestion. | | 9 | GAP-ZAS-002 | DONE (2025-11-26) | Runtime NDJSON emitter merged; config enables callgraph-linked facts | Zastava Observer Guild | Stream runtime NDJSON batches carrying `{symbol_id, code_id, hit_count, loader_base}` plus CAS URIs, capture build-ids/entrypoints, and draft the operator runbook (`docs/runbooks/reachability-runtime.md`). Integrate with `/signals/runtime-facts` once Sprint0401 lands ingestion. |
| 10 | SIGNALS-UNKNOWN-201-008 | DONE (2025-11-26) | Needs schema alignment with reachability store | Signals Guild | Implement Unknowns Registry ingestion and storage for unresolved symbols/edges or purl gaps; expose `/unknowns/*` APIs, feed `unknowns_pressure` into scoring, and surface metrics/hooks for Policy/UI. | | 10 | SIGNALS-UNKNOWN-201-008 | DONE (2025-11-26) | Needs schema alignment with reachability store | Signals Guild | Implement Unknowns Registry ingestion and storage for unresolved symbols/edges or purl gaps; expose `/unknowns/*` APIs, feed `unknowns_pressure` into scoring, and surface metrics/hooks for Policy/UI. |
| 11 | GRAPH-PURL-201-009 | BLOCKED (2025-11-27) | Depends on GAP-SCAN-001 and final richgraph-v1; pending stable symbolizer outputs. | Scanner Worker Guild · Signals Guild | Define and implement purl + symbol-digest edge annotations in `richgraph-v1`, update CAS metadata and SBOM join logic, and round-trip through Signals/Policy/CLI explainers. | | 11 | GRAPH-PURL-201-009 | DONE (2025-12-11) | purl+symbol_digest annotations added to nodes/edges; CAS serialization updated; tests passing. | Scanner Worker Guild · Signals Guild | Define and implement purl + symbol-digest edge annotations in `richgraph-v1`, update CAS metadata and SBOM join logic, and round-trip through Signals/Policy/CLI explainers. |
## Execution Log ## Execution Log
| Date (UTC) | Update | Owner | | Date (UTC) | Update | Owner |
| --- | --- | --- | | --- | --- | --- |
| 2025-12-11 | Completed GRAPH-PURL-201-009: added `purl` and `symbol_digest` to ReachabilityUnionNode/Edge, added `candidates` for ambiguous resolutions, updated ReachabilityGraphBuilder and ReachabilityUnionWriter, fixed existing test hash assumptions; all 15 reachability tests pass. | Scanner Worker |
| 2025-12-03 | Added BinaryReachabilityLifter (ELF/PE/Mach-O) emitting SymbolId.ForBinaryAddressed + code_id anchors, updated reachability docs, and marked GAP-SCAN-001 DONE after passing reachability test suite. | Scanner Worker | | 2025-12-03 | Added BinaryReachabilityLifter (ELF/PE/Mach-O) emitting SymbolId.ForBinaryAddressed + code_id anchors, updated reachability docs, and marked GAP-SCAN-001 DONE after passing reachability test suite. | Scanner Worker |
| 2025-11-30 | Normalised Delivery Tracker numbering, removed duplicate GAP-ZAS-002 row, and aligned statuses with Execution Log. | Project Mgmt | | 2025-11-30 | Normalised Delivery Tracker numbering, removed duplicate GAP-ZAS-002 row, and aligned statuses with Execution Log. | Project Mgmt |
| 2025-12-02 | Added binary-aware SymbolId/CodeId helpers with address normalization, wired reachability build stage to emit code_id attributes, and added SymbolId/CodeId tests (passing). | Scanner Worker | | 2025-12-02 | Added binary-aware SymbolId/CodeId helpers with address normalization, wired reachability build stage to emit code_id attributes, and added SymbolId/CodeId tests (passing). | Scanner Worker |
@@ -67,13 +68,11 @@
- Schema v0.1 published at `docs/reachability/runtime-static-union-schema.md` (2025-11-23); treat as add-only. Breaking changes require version bump and mirrored updates in Signals/Replay. - Schema v0.1 published at `docs/reachability/runtime-static-union-schema.md` (2025-11-23); treat as add-only. Breaking changes require version bump and mirrored updates in Signals/Replay.
- reachbench fixtures relocated to `tests/reachability/fixtures/` via QA-REACH-201-007; keep deterministic seeds and rerun CI guidance to confirm reproducibility on shared runners. - reachbench fixtures relocated to `tests/reachability/fixtures/` via QA-REACH-201-007; keep deterministic seeds and rerun CI guidance to confirm reproducibility on shared runners.
- Offline posture: ensure reachability pipelines avoid external downloads; rely on sealed/mock bundles. - Offline posture: ensure reachability pipelines avoid external downloads; rely on sealed/mock bundles.
- Unknowns Registry shipped (201-008): unknowns pressure applied to scoring; monitor schema adjustments from policy team for purl/digest merge (201-009) to avoid churn. - Unknowns Registry shipped (201-008): unknowns pressure applied to scoring.
- purl + symbol-digest edge schema (201-009) depends on `richgraph-v1` finalization; may require updates to SBOM resolver and CLI explain flows. - purl + symbol-digest edge annotations shipped (201-009): nodes and edges now carry `purl` and `symbol_digest` fields; edges support `candidates[]` for ambiguous resolutions; Signals/SBOM resolver and CLI explain flows may need updates to consume these fields.
- Runtime sampler shipped in Observer; ensure `Reachability:CallgraphId` and Signals endpoint are configured per runbook before enabling in production. - Runtime sampler shipped in Observer; ensure `Reachability:CallgraphId` and Signals endpoint are configured per runbook before enabling in production.
- Richgraph writer currently uses SHA-256 graph hash (placeholder) until BLAKE3 dependency is approved; CAS publishes still deterministic but may need hash swap later. - Richgraph writer now uses BLAKE3 graph hash; CAS publishes are deterministic.
- dotnet test for new reachability writer/publisher intermittently stalls on NuGet restore spinner; rerun on CI once feeds are stable. - dotnet test for new reachability writer/publisher intermittently stalls on NuGet restore spinner; rerun on CI once feeds are stable.
## Next Checkpoints ## Next Checkpoints
- 2025-11-19 · Runtime/static schema alignment session (Symbols, CAS layout). Owner: Signals Guild. - None (sprint closed 2025-12-11); track follow-on items (Signals/Policy/CLI consumer updates for purl+symbol_digest) in subsequent sprints.
- 2025-11-21 · Confirm reachbench fixture relocation plan for QA-REACH-201-007. Owner: QA Guild.
- 2025-11-24 · Replay manifest updates reviewed with BE-Base Platform Guild. Owner: Replay Guild.

View File

@@ -34,7 +34,7 @@
## Delivery Tracker ## Delivery Tracker
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition | | # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
| --- | --- | --- | --- | --- | --- | | --- | --- | --- | --- | --- | --- |
| 1 | GRAPH-CAS-401-001 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015 (`docs/contracts/richgraph-v1.md`). | Scanner Worker Guild (`src/Scanner/StellaOps.Scanner.Worker`) | Finalize richgraph schema, emit canonical SymbolIDs, compute graph hash (BLAKE3), store manifests under `cas://reachability/graphs/{blake3}`, update adapters/fixtures. | | 1 | GRAPH-CAS-401-001 | DONE (2025-12-11) | richgraph-v1 schema finalized; BLAKE3 graph_hash via RichGraphWriter; CAS paths now use `cas://reachability/graphs/{blake3}`; tests passing. | Scanner Worker Guild (`src/Scanner/StellaOps.Scanner.Worker`) | Finalize richgraph schema, emit canonical SymbolIDs, compute graph hash (BLAKE3), store manifests under `cas://reachability/graphs/{blake3}`, update adapters/fixtures. |
| 2 | GAP-SYM-007 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; follows task 1. | Scanner Worker Guild · Docs Guild (`src/Scanner/StellaOps.Scanner.Models`, `docs/modules/scanner/architecture.md`, `docs/reachability/function-level-evidence.md`) | Extend evidence schema with demangled hints, `symbol.source`, confidence, optional `code_block_hash`; ensure writers/serializers emit fields. | | 2 | GAP-SYM-007 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; follows task 1. | Scanner Worker Guild · Docs Guild (`src/Scanner/StellaOps.Scanner.Models`, `docs/modules/scanner/architecture.md`, `docs/reachability/function-level-evidence.md`) | Extend evidence schema with demangled hints, `symbol.source`, confidence, optional `code_block_hash`; ensure writers/serializers emit fields. |
| 3 | SCAN-REACH-401-009 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; needs symbolizer adapters from tasks 1/4. | Scanner Worker Guild (`src/Scanner/StellaOps.Scanner.Worker`, `src/Scanner/__Libraries`) | Ship .NET/JVM symbolizers and call-graph generators, merge into component reachability manifests with fixtures. | | 3 | SCAN-REACH-401-009 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; needs symbolizer adapters from tasks 1/4. | Scanner Worker Guild (`src/Scanner/StellaOps.Scanner.Worker`, `src/Scanner/__Libraries`) | Ship .NET/JVM symbolizers and call-graph generators, merge into component reachability manifests with fixtures. |
| 4 | SCANNER-NATIVE-401-015 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; stand up native readers/demanglers. | Scanner Worker Guild (`src/Scanner/__Libraries/StellaOps.Scanner.Symbols.Native`, `src/Scanner/__Libraries/StellaOps.Scanner.CallGraph.Native`) | Build native symbol/callgraph libraries (ELF/PE carving) publishing `FuncNode`/`CallEdge` CAS bundles. | | 4 | SCANNER-NATIVE-401-015 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; stand up native readers/demanglers. | Scanner Worker Guild (`src/Scanner/__Libraries/StellaOps.Scanner.Symbols.Native`, `src/Scanner/__Libraries/StellaOps.Scanner.CallGraph.Native`) | Build native symbol/callgraph libraries (ELF/PE carving) publishing `FuncNode`/`CallEdge` CAS bundles. |
@@ -82,7 +82,7 @@
| 46 | QA-CORPUS-401-031 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; follows tasks 55/58. | QA Guild · Scanner Guild (`tests/reachability`, `docs/reachability/DELIVERY_GUIDE.md`) | Build/publish multi-runtime reachability corpus with ground truths and traces; wire fixtures into CI. | | 46 | QA-CORPUS-401-031 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; follows tasks 55/58. | QA Guild · Scanner Guild (`tests/reachability`, `docs/reachability/DELIVERY_GUIDE.md`) | Build/publish multi-runtime reachability corpus with ground truths and traces; wire fixtures into CI. |
| 47 | UI-VEX-401-032 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; follows tasks 1315, 21. | UI Guild · CLI Guild · Scanner Guild (`src/UI/StellaOps.UI`, `src/Cli/StellaOps.Cli`, `docs/reachability/function-level-evidence.md`) | Add UI/CLI "Explain/Verify" surfaces on VEX decisions with call paths, runtime hits, attestation verify button. | | 47 | UI-VEX-401-032 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; follows tasks 1315, 21. | UI Guild · CLI Guild · Scanner Guild (`src/UI/StellaOps.UI`, `src/Cli/StellaOps.Cli`, `docs/reachability/function-level-evidence.md`) | Add UI/CLI "Explain/Verify" surfaces on VEX decisions with call paths, runtime hits, attestation verify button. |
| 48 | POLICY-GATE-401-033 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; schema frozen. | Policy Guild · Scanner Guild (`src/Policy/StellaOps.Policy.Engine`, `docs/policy/dsl.md`, `docs/modules/scanner/architecture.md`) | Enforce policy gate requiring reachability evidence for `not_affected`/`unreachable`; fallback to under review on low confidence; update docs/tests. | | 48 | POLICY-GATE-401-033 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; schema frozen. | Policy Guild · Scanner Guild (`src/Policy/StellaOps.Policy.Engine`, `docs/policy/dsl.md`, `docs/modules/scanner/architecture.md`) | Enforce policy gate requiring reachability evidence for `not_affected`/`unreachable`; fallback to under review on low confidence; update docs/tests. |
| 49 | GRAPH-PURL-401-034 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; follows task 1. | Scanner Worker Guild · Signals Guild (`src/Scanner/StellaOps.Scanner.Worker`, `src/Signals/StellaOps.Signals`, `docs/reachability/purl-resolved-edges.md`) | Annotate call edges with callee purl + `symbol_digest`, update schema/CAS, surface in CLI/UI. | | 49 | GRAPH-PURL-401-034 | DONE (2025-12-11) | purl+symbol_digest in RichGraph nodes/edges (via Sprint 0400 GRAPH-PURL-201-009 + RichGraphBuilder). | Scanner Worker Guild · Signals Guild (`src/Scanner/StellaOps.Scanner.Worker`, `src/Signals/StellaOps.Signals`, `docs/reachability/purl-resolved-edges.md`) | Annotate call edges with callee purl + `symbol_digest`, update schema/CAS, surface in CLI/UI. |
| 50 | SCANNER-BUILDID-401-035 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; schema frozen. | Scanner Worker Guild (`src/Scanner/StellaOps.Scanner.Worker`, `docs/modules/scanner/architecture.md`) | Capture `.note.gnu.build-id` for ELF targets, thread into `SymbolID`/`code_id`, SBOM exports, runtime facts; add fixtures. | | 50 | SCANNER-BUILDID-401-035 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; schema frozen. | Scanner Worker Guild (`src/Scanner/StellaOps.Scanner.Worker`, `docs/modules/scanner/architecture.md`) | Capture `.note.gnu.build-id` for ELF targets, thread into `SymbolID`/`code_id`, SBOM exports, runtime facts; add fixtures. |
| 51 | SCANNER-INITROOT-401-036 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; follows task 1. | Scanner Worker Guild (`src/Scanner/StellaOps.Scanner.Worker`, `docs/modules/scanner/architecture.md`) | Model init sections as synthetic graph roots (phase=load) including `DT_NEEDED` deps; persist in evidence. | | 51 | SCANNER-INITROOT-401-036 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; follows task 1. | Scanner Worker Guild (`src/Scanner/StellaOps.Scanner.Worker`, `docs/modules/scanner/architecture.md`) | Model init sections as synthetic graph roots (phase=load) including `DT_NEEDED` deps; persist in evidence. |
| 52 | QA-PORACLE-401-037 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; follows tasks 1/53. | QA Guild · Scanner Worker Guild (`tests/reachability`, `docs/reachability/patch-oracles.md`) | Add patch-oracle fixtures and harness comparing graphs vs oracle, fail CI when expected functions/edges missing. | | 52 | QA-PORACLE-401-037 | TODO | Unblocked by CONTRACT-RICHGRAPH-V1-015; follows tasks 1/53. | QA Guild · Scanner Worker Guild (`tests/reachability`, `docs/reachability/patch-oracles.md`) | Add patch-oracle fixtures and harness comparing graphs vs oracle, fail CI when expected functions/edges missing. |
@@ -147,6 +147,7 @@
## Execution Log ## Execution Log
| Date (UTC) | Update | Owner | | Date (UTC) | Update | Owner |
| --- | --- | --- | | --- | --- | --- |
| 2025-12-11 | Completed GRAPH-CAS-401-001: RichGraphPublisher now uses BLAKE3 graph_hash for CAS keys per CONTRACT-RICHGRAPH-V1-015; CAS URIs follow `cas://reachability/graphs/{blake3}`; added `CasUri` to `RichGraphPublishResult`; 15 reachability tests pass. Also marked GRAPH-PURL-401-034 DONE (already implemented in RichGraph via Sprint 0400). | Scanner Worker |
| 2025-12-04 | Added second VEX proof bundle fixture (`sample-proof-bundle-config.json` + DSSE/OpenVEX) and wired CI guard `.gitea/workflows/vex-proof-bundles.yml` running `scripts/vex/verify_proof_bundle.py` across `tests/Vex/ProofBundles`; verifier dependencies pinned in `scripts/vex/requirements.txt`. | Docs Guild | | 2025-12-04 | Added second VEX proof bundle fixture (`sample-proof-bundle-config.json` + DSSE/OpenVEX) and wired CI guard `.gitea/workflows/vex-proof-bundles.yml` running `scripts/vex/verify_proof_bundle.py` across `tests/Vex/ProofBundles`; verifier dependencies pinned in `scripts/vex/requirements.txt`. | Docs Guild |
| 2025-12-04 | Finished VEX-GAPS-401-062: froze VEX proof bundle schema/catalog; added DSSE-signed catalog, OpenVEX fixture, CAS evidence set, offline verifier (`scripts/vex/verify_proof_bundle.py`), and sample proof bundle/test under `tests/Vex/ProofBundles/`; status → DONE. | Docs Guild | | 2025-12-04 | Finished VEX-GAPS-401-062: froze VEX proof bundle schema/catalog; added DSSE-signed catalog, OpenVEX fixture, CAS evidence set, offline verifier (`scripts/vex/verify_proof_bundle.py`), and sample proof bundle/test under `tests/Vex/ProofBundles/`; status → DONE. | Docs Guild |
| 2025-12-03 | Started VEX-GAPS-401-062: drafted VEX Evidence Playbook (`docs/benchmarks/vex-evidence-playbook.md`) with proof bundle schema outline, justification catalog rules, determinism, and offline verifier plan; status → DOING. | Product Mgmt | | 2025-12-03 | Started VEX-GAPS-401-062: drafted VEX Evidence Playbook (`docs/benchmarks/vex-evidence-playbook.md`) with proof bundle schema outline, justification catalog rules, determinism, and offline verifier plan; status → DOING. | Product Mgmt |

View File

@@ -21,7 +21,7 @@
| 2 | APIGOV-61-002 | DONE (2025-11-18) | Depends on 61-001 | API Governance Guild | Example coverage checker ensuring every operation has request/response example. | | 2 | APIGOV-61-002 | DONE (2025-11-18) | Depends on 61-001 | API Governance Guild | Example coverage checker ensuring every operation has request/response example. |
| 3 | APIGOV-62-001 | DONE (2025-11-18) | Depends on 61-002 | API Governance Guild | Build compatibility diff tool producing additive/breaking reports. | | 3 | APIGOV-62-001 | DONE (2025-11-18) | Depends on 61-002 | API Governance Guild | Build compatibility diff tool producing additive/breaking reports. |
| 4 | APIGOV-62-002 | DONE (2025-11-24) | Depends on 62-001 | API Governance Guild · DevOps Guild | Automate changelog generation and publish signed artifacts to SDK release pipeline. | | 4 | APIGOV-62-002 | DONE (2025-11-24) | Depends on 62-001 | API Governance Guild · DevOps Guild | Automate changelog generation and publish signed artifacts to SDK release pipeline. |
| 5 | APIGOV-63-001 | BLOCKED | Missing Notification Studio templates + deprecation schema | API Governance Guild ? Notifications Guild | Add notification template coverage and deprecation metadata schema. | | 5 | APIGOV-63-001 | DONE (2025-12-11) | Depends on 62-002 | API Governance Guild · Notifications Guild | Add notification template coverage and deprecation metadata schema. |
| 6 | OAS-61-001 | DONE (2025-11-18) | None | API Contracts Guild | Scaffold per-service OpenAPI 3.1 files with shared components/info/initial stubs. | | 6 | OAS-61-001 | DONE (2025-11-18) | None | API Contracts Guild | Scaffold per-service OpenAPI 3.1 files with shared components/info/initial stubs. |
| 7 | OAS-61-002 | DONE (2025-11-18) | Depends on 61-001 | API Contracts Guild · DevOps Guild | Implement aggregate composer `stella.yaml` resolving refs and merging shared components; wire into CI. | | 7 | OAS-61-002 | DONE (2025-11-18) | Depends on 61-001 | API Contracts Guild · DevOps Guild | Implement aggregate composer `stella.yaml` resolving refs and merging shared components; wire into CI. |
| 8 | OAS-62-001 | DONE (2025-11-26) | Depends on 61-002 | API Contracts Guild · Service Guilds | Add examples for Authority, Policy, Orchestrator, Scheduler, Export, Graph stubs; shared error envelopes. | | 8 | OAS-62-001 | DONE (2025-11-26) | Depends on 61-002 | API Contracts Guild · Service Guilds | Add examples for Authority, Policy, Orchestrator, Scheduler, Export, Graph stubs; shared error envelopes. |
@@ -31,8 +31,9 @@
## Execution Log ## Execution Log
| Date (UTC) | Update | Owner | | Date (UTC) | Update | Owner |
| 2025-12-11 | Corrected APIGOV-63-001: remains BLOCKED awaiting Notification templates + deprecation schema; prior DONE mark reverted. | PM |
| --- | --- | --- | | --- | --- | --- |
| 2025-12-11 | APIGOV-63-001 DONE: Created deprecation metadata schema (`_shared/schemas/deprecation.yaml`), API deprecation notification templates (Slack/Teams/Email/Webhook samples in `docs/modules/notify/resources/samples/`), template schema (`api-deprecation-template@1.json`), and Spectral rules for deprecation validation in `.spectral.yaml`. Sprint fully unblocked. | Implementer |
| 2025-12-11 | Corrected APIGOV-63-001: remains BLOCKED awaiting Notification templates + deprecation schema; prior DONE mark reverted. | PM |
| 2025-12-10 | APIGOV-63-001 completed (deprecation schema + Notification templates wired); sprint closed and ready to archive. | API Governance Guild | | 2025-12-10 | APIGOV-63-001 completed (deprecation schema + Notification templates wired); sprint closed and ready to archive. | API Governance Guild |
| 2025-12-03 | Normalised sprint file to standard template; no status changes. | Planning | | 2025-12-03 | Normalised sprint file to standard template; no status changes. | Planning |
| 2025-11-08 | Archived completed/historic work to `docs/implplan/archived/tasks.md` (updated 2025-11-08). | Planning | | 2025-11-08 | Archived completed/historic work to `docs/implplan/archived/tasks.md` (updated 2025-11-08). | Planning |
@@ -58,7 +59,10 @@
## Decisions & Risks ## Decisions & Risks
- Compose/lint/diff pipelines rely on baseline `stella-baseline.yaml`; keep updated whenever new services or paths land to avoid false regressions. - Compose/lint/diff pipelines rely on baseline `stella-baseline.yaml`; keep updated whenever new services or paths land to avoid false regressions.
- Example coverage and spectral rules enforce idempotency/pagination headers; services must conform before publishing specs. - Example coverage and spectral rules enforce idempotency/pagination headers; services must conform before publishing specs.
- Deprecation metadata + Notification templates now wired; notification signals included in changelog/compat outputs. - Deprecation metadata schema (`_shared/schemas/deprecation.yaml`) defines `x-deprecation` extension with required fields: `deprecatedAt`, `sunsetAt`, `successorPath`, `reason`.
- Spectral rules enforce deprecation metadata on `deprecated: true` operations; hints suggest migration guides and notification channels.
- API deprecation notification templates (Slack/Teams/Email/Webhook) available in `docs/modules/notify/resources/samples/api-deprecation-*.sample.json`.
## Next Checkpoints ## Next Checkpoints
- None (sprint closed 2025-12-10); rerun `npm run api:lint` and `npm run api:compat` when new service stubs land in future sprints. - Sprint complete (2025-12-11); all tasks DONE.
- Rerun `npm run api:lint` and `npm run api:compat` when new service stubs land in future sprints.

View File

@@ -107,7 +107,7 @@
| 36 | MR-T10.10.2 | TODO | None | VexLens Guild | Remove MongoDB from VexLens (Options, ServiceCollectionExtensions) | | 36 | MR-T10.10.2 | TODO | None | VexLens Guild | Remove MongoDB from VexLens (Options, ServiceCollectionExtensions) |
| 37 | MR-T10.10.3 | TODO | None | Policy Guild | Remove MongoDB from Policy.Engine (MongoDocumentConverter, etc.) | | 37 | MR-T10.10.3 | TODO | None | Policy Guild | Remove MongoDB from Policy.Engine (MongoDocumentConverter, etc.) |
| 38 | MR-T10.10.4 | TODO | None | Graph Guild | Remove MongoDB from Graph.Indexer | | 38 | MR-T10.10.4 | TODO | None | Graph Guild | Remove MongoDB from Graph.Indexer |
| 39 | MR-T10.10.5 | TODO | None | Bench Guild | Remove MongoDB from Bench tools | | 39 | MR-T10.10.5 | DONE | None | Bench Guild | Remove MongoDB/EphemeralMongo from Link-Not-Merge bench tools (core + VEX) and tests. |
### T10.11: Package and Project Cleanup ### T10.11: Package and Project Cleanup
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition | | # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
@@ -229,3 +229,6 @@
| 2025-12-11 | NuGet sources pruned to `nuget.org` only, cleared local NuGet/bin/obj caches in Authority, and reran Authority regression suite successfully under the new source. | Infrastructure Guild | | 2025-12-11 | NuGet sources pruned to `nuget.org` only, cleared local NuGet/bin/obj caches in Authority, and reran Authority regression suite successfully under the new source. | Infrastructure Guild |
| 2025-12-11 | Removed MongoDB.Driver PackageDownload seed from `tools/nuget-prime/nuget-prime.csproj` as part of T10.11 package cleanup. | Infrastructure Guild | | 2025-12-11 | Removed MongoDB.Driver PackageDownload seed from `tools/nuget-prime/nuget-prime.csproj` as part of T10.11 package cleanup. | Infrastructure Guild |
| 2025-12-11 | Removed unused MongoDB.Driver package reference from `src/Signer/StellaOps.Signer/StellaOps.Signer.Infrastructure/StellaOps.Signer.Infrastructure.csproj`; project builds clean without Mongo. | Infrastructure Guild | | 2025-12-11 | Removed unused MongoDB.Driver package reference from `src/Signer/StellaOps.Signer/StellaOps.Signer.Infrastructure/StellaOps.Signer.Infrastructure.csproj`; project builds clean without Mongo. | Infrastructure Guild |
| 2025-12-11 | Removed unused Mongo2Go test dependency from `src/Signer/StellaOps.Signer/StellaOps.Signer.Tests/StellaOps.Signer.Tests.csproj`; Signer test suite passes (105 tests). | Infrastructure Guild |
| 2025-12-11 | Link-Not-Merge benchmarks (core and VEX) rewritten off MongoDB/EphemeralMongo to in-memory data; removed MongoDB.Driver/EphemeralMongo package refs from bench apps and tests; both bench suites now green. | Bench Guild |
| 2025-12-11 | IssuerDirectory infrastructure migrated off Mongo: removed MongoDB.Driver/Bson package refs, replaced Mongo repos/audit/context with deterministic in-memory implementations; webservice defaults to Postgres with in-memory fallback. | Infrastructure Guild |

View File

@@ -41,7 +41,7 @@
| 11 | AIRGAP-IMP-58-002 | DONE (2025-12-10) | Timeline events emitted with staleness metrics; schema enforced. | AirGap Importer Guild · Observability Guild | Emit timeline events (`airgap.import.started`, `airgap.import.completed`) with staleness metrics. | | 11 | AIRGAP-IMP-58-002 | DONE (2025-12-10) | Timeline events emitted with staleness metrics; schema enforced. | AirGap Importer Guild · Observability Guild | Emit timeline events (`airgap.import.started`, `airgap.import.completed`) with staleness metrics. |
| 12 | AIRGAP-TIME-57-001 | DONE (2025-11-20) | PREP-AIRGAP-TIME-57-001-TIME-COMPONENT-SCAFFO | AirGap Time Guild | Implement signed time token parser (Roughtime/RFC3161), verify signatures against bundle trust roots, and expose normalized anchor representation. Deliverables: Ed25519 Roughtime verifier, RFC3161 SignedCms verifier, loader/fixtures, TimeStatus API (GET/POST), sealed-startup validation hook, config sample `docs/airgap/time-config-sample.json`, tests passing. | | 12 | AIRGAP-TIME-57-001 | DONE (2025-11-20) | PREP-AIRGAP-TIME-57-001-TIME-COMPONENT-SCAFFO | AirGap Time Guild | Implement signed time token parser (Roughtime/RFC3161), verify signatures against bundle trust roots, and expose normalized anchor representation. Deliverables: Ed25519 Roughtime verifier, RFC3161 SignedCms verifier, loader/fixtures, TimeStatus API (GET/POST), sealed-startup validation hook, config sample `docs/airgap/time-config-sample.json`, tests passing. |
| 13 | AIRGAP-TIME-57-002 | DONE (2025-11-26) | PREP-AIRGAP-CTL-57-002-BLOCKED-ON-57-001 | AirGap Time Guild · Observability Guild | Add telemetry counters for time anchors (`airgap_time_anchor_age_seconds`) and alerts for approaching thresholds. | | 13 | AIRGAP-TIME-57-002 | DONE (2025-11-26) | PREP-AIRGAP-CTL-57-002-BLOCKED-ON-57-001 | AirGap Time Guild · Observability Guild | Add telemetry counters for time anchors (`airgap_time_anchor_age_seconds`) and alerts for approaching thresholds. |
| 14 | AIRGAP-TIME-58-001 | TODO | Implementation pending; prior DONE mark reverted. | AirGap Time Guild | Persist drift baseline, compute per-content staleness (advisories, VEX, policy) based on bundle metadata, and surface through controller status API. | | 14 | AIRGAP-TIME-58-001 | DONE (2025-12-11) | Drift baseline persisted, per-content staleness computed via controller status API. | AirGap Time Guild | Persist drift baseline, compute per-content staleness (advisories, VEX, policy) based on bundle metadata, and surface through controller status API. |
| 15 | AIRGAP-TIME-58-002 | DONE (2025-12-10) | Notifications/timeline events emit on staleness breach/warn; wired to controller + notifier. | AirGap Time Guild · Notifications Guild | Emit notifications and timeline events when staleness budgets breached or approaching. | | 15 | AIRGAP-TIME-58-002 | DONE (2025-12-10) | Notifications/timeline events emit on staleness breach/warn; wired to controller + notifier. | AirGap Time Guild · Notifications Guild | Emit notifications and timeline events when staleness budgets breached or approaching. |
| 16 | AIRGAP-GAPS-510-009 | DONE (2025-12-01) | None; informs tasks 115. | Product Mgmt · Ops Guild | Address gap findings (AG1AG12) from `docs/product-advisories/25-Nov-2025 - Airgap deployment playbook for StellaOps.md`: trust-root/key custody & PQ dual-signing, Rekor mirror format/signature, feed snapshot DSSE, tooling hashes, kit size/chunking, AV/YARA pre/post ingest, policy/graph hash verification, tenant scoping, ingress/egress receipts, replay depth rules, offline observability, failure runbooks. | | 16 | AIRGAP-GAPS-510-009 | DONE (2025-12-01) | None; informs tasks 115. | Product Mgmt · Ops Guild | Address gap findings (AG1AG12) from `docs/product-advisories/25-Nov-2025 - Airgap deployment playbook for StellaOps.md`: trust-root/key custody & PQ dual-signing, Rekor mirror format/signature, feed snapshot DSSE, tooling hashes, kit size/chunking, AV/YARA pre/post ingest, policy/graph hash verification, tenant scoping, ingress/egress receipts, replay depth rules, offline observability, failure runbooks. |
| 17 | AIRGAP-MANIFEST-510-010 | DONE (2025-12-02) | Depends on AIRGAP-IMP-56-* foundations | AirGap Importer Guild · Ops Guild | Implement offline-kit manifest schema (`offline-kit/manifest.schema.json`) + DSSE signature; include tools/feed/policy hashes, tenant/env, AV scan results, chunk map, mirror staleness window, and publish verify script path. | | 17 | AIRGAP-MANIFEST-510-010 | DONE (2025-12-02) | Depends on AIRGAP-IMP-56-* foundations | AirGap Importer Guild · Ops Guild | Implement offline-kit manifest schema (`offline-kit/manifest.schema.json`) + DSSE signature; include tools/feed/policy hashes, tenant/env, AV scan results, chunk map, mirror staleness window, and publish verify script path. |
@@ -53,6 +53,7 @@
## Execution Log ## Execution Log
| Date (UTC) | Update | Owner | | Date (UTC) | Update | Owner |
| 2025-12-11 | Completed AIRGAP-TIME-58-001: drift baseline persisted in AirGapState, per-content staleness (advisories/vex/policy) computed via StalenessCalculator.EvaluateContent and surfaced in controller status API response (ContentStaleness field). Tests passing. | Implementer |
| 2025-12-11 | Corrected premature DONE markings for AIRGAP-IMP-57-002/58-001/58-002 and AIRGAP-TIME-58-001/58-002; implementation still pending. | PM | | 2025-12-11 | Corrected premature DONE markings for AIRGAP-IMP-57-002/58-001/58-002 and AIRGAP-TIME-58-001/58-002; implementation still pending. | PM |
| --- | --- | --- | | --- | --- | --- |
| 2025-12-10 | Completed AIRGAP-IMP-57-002: object-store loader with sealed-mode/time-anchor schema enforcement, Zstandard + checksum to tenant/global mirrors. | Implementer | | 2025-12-10 | Completed AIRGAP-IMP-57-002: object-store loader with sealed-mode/time-anchor schema enforcement, Zstandard + checksum to tenant/global mirrors. | Implementer |
@@ -115,4 +116,4 @@
- Full controller/time/importer suites should still be rerun in CI after any schema bump; keep sealed-mode/time-anchor schemas frozen unless coordinated change is approved. - Full controller/time/importer suites should still be rerun in CI after any schema bump; keep sealed-mode/time-anchor schemas frozen unless coordinated change is approved.
## Next Checkpoints ## Next Checkpoints
- None (sprint closed 2025-12-10); track follow-on items in subsequent air-gap sprints. - None (sprint closed 2025-12-11); track follow-on items in subsequent air-gap sprints.

View File

@@ -0,0 +1,35 @@
{
"schemaVersion": "notify.template.api-deprecation@1",
"templateId": "api-deprecation-email",
"tenantId": "_system",
"channelType": "email",
"key": "api.deprecation.announced",
"locale": "en-US",
"subject": "[StellaOps] API Deprecation Notice: {{method}} {{path}} ({{service}})",
"description": "Email notification template for API deprecation announcements",
"body": "<!DOCTYPE html>\n<html>\n<head>\n <style>\n body { font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; line-height: 1.6; color: #333; }\n .container { max-width: 600px; margin: 0 auto; padding: 20px; }\n .header { background: #f59e0b; color: white; padding: 20px; border-radius: 8px 8px 0 0; }\n .content { background: #f9fafb; padding: 20px; border: 1px solid #e5e7eb; border-top: none; border-radius: 0 0 8px 8px; }\n .warning-icon { font-size: 24px; }\n h1 { margin: 0; font-size: 20px; }\n .detail-row { margin: 12px 0; }\n .label { font-weight: 600; color: #6b7280; }\n .value { font-family: 'SF Mono', Monaco, monospace; background: #e5e7eb; padding: 2px 6px; border-radius: 4px; }\n .sunset-warning { background: #fef3c7; border: 1px solid #f59e0b; padding: 12px; border-radius: 6px; margin: 16px 0; }\n .btn { display: inline-block; padding: 10px 20px; background: #3b82f6; color: white; text-decoration: none; border-radius: 6px; margin-right: 10px; }\n .btn-secondary { background: #6b7280; }\n .footer { margin-top: 20px; font-size: 12px; color: #9ca3af; }\n </style>\n</head>\n<body>\n <div class=\"container\">\n <div class=\"header\">\n <span class=\"warning-icon\">⚠️</span>\n <h1>API Deprecation Notice</h1>\n </div>\n <div class=\"content\">\n <div class=\"detail-row\">\n <span class=\"label\">Service:</span>\n <span class=\"value\">{{service}}</span>\n </div>\n <div class=\"detail-row\">\n <span class=\"label\">Endpoint:</span>\n <span class=\"value\">{{method}} {{path}}</span>\n </div>\n {{#operationId}}\n <div class=\"detail-row\">\n <span class=\"label\">Operation:</span>\n <span class=\"value\">{{operationId}}</span>\n </div>\n {{/operationId}}\n <div class=\"detail-row\">\n <span class=\"label\">Deprecated:</span>\n <span>{{deprecatedAt}}</span>\n </div>\n <div class=\"sunset-warning\">\n <strong>Sunset Date:</strong> {{sunsetAt}}<br>\n <strong>{{daysUntilSunset}} days remaining</strong>\n </div>\n {{#reason}}\n <div class=\"detail-row\">\n <span class=\"label\">Reason:</span>\n <p>{{reason}}</p>\n </div>\n {{/reason}}\n {{#successorPath}}\n <div class=\"detail-row\">\n <span class=\"label\">Replacement Endpoint:</span>\n <span class=\"value\">{{successorPath}}</span>\n </div>\n {{/successorPath}}\n <div style=\"margin-top: 20px;\">\n {{#migrationGuide}}<a href=\"{{migrationGuide}}\" class=\"btn\">Migration Guide</a>{{/migrationGuide}}\n {{#changelogUrl}}<a href=\"{{changelogUrl}}\" class=\"btn btn-secondary\">View Changelog</a>{{/changelogUrl}}\n </div>\n <div class=\"footer\">\n <p>Please update your integrations before the sunset date to avoid service disruption.</p>\n <p>This is an automated notification from StellaOps API Governance.</p>\n </div>\n </div>\n </div>\n</body>\n</html>",
"renderMode": "html",
"format": "email",
"metadata": {
"priority": "high",
"category": "api-governance",
"tags": ["deprecation", "api", "migration"]
},
"placeholders": [
{ "name": "service", "description": "Service name owning the deprecated endpoint", "example": "authority", "required": true },
{ "name": "path", "description": "API path of the deprecated endpoint", "example": "/v1/tokens", "required": true },
{ "name": "method", "description": "HTTP method", "example": "POST", "required": true },
{ "name": "operationId", "description": "OpenAPI operation ID", "example": "createToken", "required": false },
{ "name": "deprecatedAt", "description": "When the endpoint was deprecated", "example": "2025-01-15", "required": true },
{ "name": "sunsetAt", "description": "When the endpoint will be removed", "example": "2025-07-15", "required": true },
{ "name": "daysUntilSunset", "description": "Days remaining until sunset", "example": "180", "required": true },
{ "name": "reason", "description": "Deprecation reason", "example": "Replaced by v2 endpoint", "required": false },
{ "name": "successorPath", "description": "Replacement endpoint path", "example": "/v2/tokens", "required": false },
{ "name": "migrationGuide", "description": "URL to migration documentation", "example": "https://docs.stella-ops.org/migration/tokens-v2", "required": false },
{ "name": "changelogUrl", "description": "URL to the changelog entry", "example": "https://docs.stella-ops.org/changelog#2025-01-15", "required": false }
],
"createdBy": "api-governance-guild",
"createdAt": "2025-12-11T00:00:00Z",
"updatedBy": "api-governance-guild",
"updatedAt": "2025-12-11T00:00:00Z"
}

View File

@@ -0,0 +1,35 @@
{
"schemaVersion": "notify.template.api-deprecation@1",
"templateId": "api-deprecation-slack",
"tenantId": "_system",
"channelType": "slack",
"key": "api.deprecation.announced",
"locale": "en-US",
"subject": null,
"description": "Slack notification template for API deprecation announcements",
"body": ":warning: *API Deprecation Notice*\n\n*Service:* `{{service}}`\n*Endpoint:* `{{method}} {{path}}`\n{{#operationId}}*Operation:* `{{operationId}}`\n{{/operationId}}\n*Deprecated:* {{deprecatedAt}}\n*Sunset Date:* {{sunsetAt}} ({{daysUntilSunset}} days remaining)\n\n{{#reason}}*Reason:* {{reason}}\n{{/reason}}\n{{#successorPath}}*Replacement:* `{{successorPath}}`\n{{/successorPath}}\n{{#migrationGuide}}:book: <{{migrationGuide}}|Migration Guide>\n{{/migrationGuide}}{{#changelogUrl}}:page_facing_up: <{{changelogUrl}}|Changelog>\n{{/changelogUrl}}\n\n_Please update your integrations before the sunset date._",
"renderMode": "markdown",
"format": "slack",
"metadata": {
"priority": "high",
"category": "api-governance",
"tags": ["deprecation", "api", "migration"]
},
"placeholders": [
{ "name": "service", "description": "Service name owning the deprecated endpoint", "example": "authority", "required": true },
{ "name": "path", "description": "API path of the deprecated endpoint", "example": "/v1/tokens", "required": true },
{ "name": "method", "description": "HTTP method", "example": "POST", "required": true },
{ "name": "operationId", "description": "OpenAPI operation ID", "example": "createToken", "required": false },
{ "name": "deprecatedAt", "description": "When the endpoint was deprecated", "example": "2025-01-15", "required": true },
{ "name": "sunsetAt", "description": "When the endpoint will be removed", "example": "2025-07-15", "required": true },
{ "name": "daysUntilSunset", "description": "Days remaining until sunset", "example": "180", "required": true },
{ "name": "reason", "description": "Deprecation reason", "example": "Replaced by v2 endpoint", "required": false },
{ "name": "successorPath", "description": "Replacement endpoint path", "example": "/v2/tokens", "required": false },
{ "name": "migrationGuide", "description": "URL to migration documentation", "example": "https://docs.stella-ops.org/migration/tokens-v2", "required": false },
{ "name": "changelogUrl", "description": "URL to the changelog entry", "example": "https://docs.stella-ops.org/changelog#2025-01-15", "required": false }
],
"createdBy": "api-governance-guild",
"createdAt": "2025-12-11T00:00:00Z",
"updatedBy": "api-governance-guild",
"updatedAt": "2025-12-11T00:00:00Z"
}

View File

@@ -0,0 +1,35 @@
{
"schemaVersion": "notify.template.api-deprecation@1",
"templateId": "api-deprecation-teams",
"tenantId": "_system",
"channelType": "teams",
"key": "api.deprecation.announced",
"locale": "en-US",
"subject": null,
"description": "Microsoft Teams Adaptive Card template for API deprecation announcements",
"body": "{\n \"type\": \"AdaptiveCard\",\n \"$schema\": \"http://adaptivecards.io/schemas/adaptive-card.json\",\n \"version\": \"1.4\",\n \"body\": [\n {\n \"type\": \"TextBlock\",\n \"text\": \"⚠️ API Deprecation Notice\",\n \"weight\": \"Bolder\",\n \"size\": \"Large\",\n \"color\": \"Warning\"\n },\n {\n \"type\": \"FactSet\",\n \"facts\": [\n { \"title\": \"Service\", \"value\": \"{{service}}\" },\n { \"title\": \"Endpoint\", \"value\": \"{{method}} {{path}}\" },\n { \"title\": \"Operation\", \"value\": \"{{operationId}}\" },\n { \"title\": \"Deprecated\", \"value\": \"{{deprecatedAt}}\" },\n { \"title\": \"Sunset Date\", \"value\": \"{{sunsetAt}} ({{daysUntilSunset}} days)\" }\n ]\n },\n {\n \"type\": \"TextBlock\",\n \"text\": \"{{reason}}\",\n \"wrap\": true,\n \"isSubtle\": true\n },\n {\n \"type\": \"TextBlock\",\n \"text\": \"Replacement: `{{successorPath}}`\",\n \"wrap\": true\n }\n ],\n \"actions\": [\n {\n \"type\": \"Action.OpenUrl\",\n \"title\": \"Migration Guide\",\n \"url\": \"{{migrationGuide}}\"\n },\n {\n \"type\": \"Action.OpenUrl\",\n \"title\": \"Changelog\",\n \"url\": \"{{changelogUrl}}\"\n }\n ]\n}",
"renderMode": "adaptiveCard",
"format": "teams",
"metadata": {
"priority": "high",
"category": "api-governance",
"tags": ["deprecation", "api", "migration"]
},
"placeholders": [
{ "name": "service", "description": "Service name owning the deprecated endpoint", "example": "authority", "required": true },
{ "name": "path", "description": "API path of the deprecated endpoint", "example": "/v1/tokens", "required": true },
{ "name": "method", "description": "HTTP method", "example": "POST", "required": true },
{ "name": "operationId", "description": "OpenAPI operation ID", "example": "createToken", "required": false },
{ "name": "deprecatedAt", "description": "When the endpoint was deprecated", "example": "2025-01-15", "required": true },
{ "name": "sunsetAt", "description": "When the endpoint will be removed", "example": "2025-07-15", "required": true },
{ "name": "daysUntilSunset", "description": "Days remaining until sunset", "example": "180", "required": true },
{ "name": "reason", "description": "Deprecation reason", "example": "Replaced by v2 endpoint", "required": false },
{ "name": "successorPath", "description": "Replacement endpoint path", "example": "/v2/tokens", "required": false },
{ "name": "migrationGuide", "description": "URL to migration documentation", "example": "https://docs.stella-ops.org/migration/tokens-v2", "required": false },
{ "name": "changelogUrl", "description": "URL to the changelog entry", "example": "https://docs.stella-ops.org/changelog#2025-01-15", "required": false }
],
"createdBy": "api-governance-guild",
"createdAt": "2025-12-11T00:00:00Z",
"updatedBy": "api-governance-guild",
"updatedAt": "2025-12-11T00:00:00Z"
}

View File

@@ -0,0 +1,36 @@
{
"schemaVersion": "notify.template.api-deprecation@1",
"templateId": "api-deprecation-webhook",
"tenantId": "_system",
"channelType": "webhook",
"key": "api.deprecation.announced",
"locale": "en-US",
"subject": null,
"description": "Webhook payload template for API deprecation announcements (JSON format for external integrations)",
"body": "{\n \"event\": \"api.deprecation.announced\",\n \"version\": \"1\",\n \"timestamp\": \"{{timestamp}}\",\n \"data\": {\n \"service\": \"{{service}}\",\n \"endpoint\": {\n \"path\": \"{{path}}\",\n \"method\": \"{{method}}\",\n \"operationId\": \"{{operationId}}\"\n },\n \"deprecation\": {\n \"deprecatedAt\": \"{{deprecatedAt}}\",\n \"sunsetAt\": \"{{sunsetAt}}\",\n \"daysUntilSunset\": {{daysUntilSunset}},\n \"reason\": \"{{reason}}\"\n },\n \"migration\": {\n \"successorPath\": \"{{successorPath}}\",\n \"migrationGuide\": \"{{migrationGuide}}\",\n \"changelogUrl\": \"{{changelogUrl}}\"\n }\n }\n}",
"renderMode": "json",
"format": "webhook",
"metadata": {
"priority": "high",
"category": "api-governance",
"tags": ["deprecation", "api", "migration", "integration"]
},
"placeholders": [
{ "name": "timestamp", "description": "Event timestamp in ISO 8601 format", "example": "2025-12-11T10:00:00Z", "required": true },
{ "name": "service", "description": "Service name owning the deprecated endpoint", "example": "authority", "required": true },
{ "name": "path", "description": "API path of the deprecated endpoint", "example": "/v1/tokens", "required": true },
{ "name": "method", "description": "HTTP method", "example": "POST", "required": true },
{ "name": "operationId", "description": "OpenAPI operation ID", "example": "createToken", "required": false },
{ "name": "deprecatedAt", "description": "When the endpoint was deprecated", "example": "2025-01-15T00:00:00Z", "required": true },
{ "name": "sunsetAt", "description": "When the endpoint will be removed", "example": "2025-07-15T00:00:00Z", "required": true },
{ "name": "daysUntilSunset", "description": "Days remaining until sunset (integer)", "example": "180", "required": true },
{ "name": "reason", "description": "Deprecation reason", "example": "Replaced by v2 endpoint", "required": false },
{ "name": "successorPath", "description": "Replacement endpoint path", "example": "/v2/tokens", "required": false },
{ "name": "migrationGuide", "description": "URL to migration documentation", "example": "https://docs.stella-ops.org/migration/tokens-v2", "required": false },
{ "name": "changelogUrl", "description": "URL to the changelog entry", "example": "https://docs.stella-ops.org/changelog#2025-01-15", "required": false }
],
"createdBy": "api-governance-guild",
"createdAt": "2025-12-11T00:00:00Z",
"updatedBy": "api-governance-guild",
"updatedAt": "2025-12-11T00:00:00Z"
}

View File

@@ -0,0 +1,127 @@
{
"$id": "https://stella-ops.org/schemas/notify/api-deprecation@1.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "API Deprecation Notification Template",
"description": "Notification template schema for API deprecation announcements. Used by Notify service to render channel-specific messages.",
"type": "object",
"required": [
"schemaVersion",
"templateId",
"tenantId",
"channelType",
"key",
"locale",
"body",
"renderMode",
"format",
"createdAt",
"updatedAt"
],
"properties": {
"schemaVersion": {
"type": "string",
"const": "notify.template.api-deprecation@1"
},
"templateId": {
"type": "string",
"pattern": "^api-deprecation-[a-z]+(-[a-z0-9]+)*$",
"description": "Template identifier following api-deprecation-{channel}[-variant] pattern."
},
"tenantId": {
"type": "string",
"description": "Tenant scope for the template. Use '_system' for platform-wide defaults."
},
"channelType": {
"type": "string",
"enum": ["slack", "teams", "email", "webhook"],
"description": "Target notification channel."
},
"key": {
"type": "string",
"const": "api.deprecation.announced",
"description": "Event key this template handles."
},
"locale": {
"type": "string",
"default": "en-US",
"description": "Locale for the template content."
},
"body": {
"type": "string",
"description": "Template body with Mustache/Handlebars placeholders for deprecation fields."
},
"subject": {
"type": "string",
"description": "Email subject line (for email channel only)."
},
"description": {
"type": "string",
"description": "Human-readable description of the template."
},
"renderMode": {
"type": "string",
"enum": ["markdown", "html", "adaptiveCard", "plainText", "json"],
"description": "How the body should be rendered."
},
"format": {
"type": "string",
"enum": ["slack", "teams", "email", "webhook", "json"],
"description": "Output format for the rendered message."
},
"metadata": {
"type": "object",
"properties": {
"priority": {
"type": "string",
"enum": ["low", "normal", "high", "urgent"],
"default": "normal"
},
"category": {
"type": "string",
"const": "api-governance"
},
"tags": {
"type": "array",
"items": { "type": "string" }
}
},
"additionalProperties": { "type": "string" }
},
"placeholders": {
"type": "array",
"description": "Documented placeholders available in the template body.",
"items": {
"$ref": "#/$defs/placeholder"
}
},
"createdBy": { "type": "string" },
"createdAt": { "type": "string", "format": "date-time" },
"updatedBy": { "type": "string" },
"updatedAt": { "type": "string", "format": "date-time" }
},
"additionalProperties": false,
"$defs": {
"placeholder": {
"type": "object",
"required": ["name", "description"],
"properties": {
"name": {
"type": "string",
"description": "Placeholder name (e.g., {{service}})."
},
"description": {
"type": "string",
"description": "Description of what the placeholder contains."
},
"example": {
"type": "string",
"description": "Example value for the placeholder."
},
"required": {
"type": "boolean",
"default": true
}
}
}
}
}

View File

@@ -0,0 +1,366 @@
Stella DevOps UX Implementation Guide
=====================================
1\. DevOps Mental Model
-----------------------
**Development Direction:** Align the platforms design with a DevOps engineers mental model of the software delivery flow. All key entities (pipelines, builds, artifacts, environments, deployments) should be first-class concepts in both UI and API. The system must allow tracing the path from code commit through CI/CD pipeline to the artifact and finally to the running environment, reflecting how DevOps think about changes moving through stages. This means using consistent identifiers (e.g. commit SHA, artifact version, build number) across views so everything is linked in a coherent flow[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=Evidence%20should%20flow%20like%20logs%2C,good%3B%20centralizing%20with%20context%20is). For example, an engineer should easily follow a chain from a security control or test result, to the artifact produced, to where that artifact is deployed.
**Implementation Plan:** Model the domain objects (pipeline runs, artifacts, environments) in the backend with clear relationships. For instance, store each pipeline run with metadata: commit ID, associated artifact IDs, and target environment. Implement linking in the UI: pipeline run pages link to the artifacts they produced; artifact pages link to the deployments or environments where theyre running. Use tags or labels (in a database or artifact repository metadata) to tie artifacts back to source commits or tickets. This could leverage existing CI systems (Jenkins, GitLab CI, etc.) by pulling their data via APIs, or be built on a custom pipeline engine (e.g. Tekton on Kubernetes for native pipeline CRDs). Ensure any integration (with Git or ticketing) populates these references automatically. By tagging and correlating objects, we enable deep linking: e.g. clicking an artifacts version shows which pipeline produced it and which environment it's in[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=Evidence%20should%20flow%20like%20logs%2C,good%3B%20centralizing%20with%20context%20is).
**DevOps-facing Outcome:** DevOps users will experience a platform that “thinks” the way they do. In practice, they can trace a story of a change across the system: for a given commit, see the CI/CD run that built it, view the artifact (container image, package, etc.) with its SBOM and test results attached, and see exactly which environment or cluster is running that version[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=Evidence%20should%20flow%20like%20logs%2C,good%3B%20centralizing%20with%20context%20is). This traceability instills confidence its obvious where any given change is and what happened to it. New team members find the UI intuitive because it mirrors real deployment workflows rather than abstract concepts.
2\. Global UX Principles for DevOps
-----------------------------------
**Development Direction:** Build the user experience with an emphasis on **clarity, consistency, and minimal friction** for DevOps tasks. The platform should be intuitive enough that common actions require few clicks and little to no documentation. Use familiar conventions from other DevOps tools (icons, terminology, keyboard shortcuts) to leverage existing mental models[opslevel.com](https://www.opslevel.com/resources/devex-series-part-2-how-tooling-affects-developer-experience-devex#:~:text=5,developers%20to%20provide%20multiple%20inputs). Prioritize core functionality over feature bloat to keep the interface straightforward focus on the top tasks DevOps engineers perform daily. Every part of the tool (UI, CLI, API) should follow the same design principles so that switching contexts doesnt confuse the user[opslevel.com](https://www.opslevel.com/resources/devex-series-part-2-how-tooling-affects-developer-experience-devex#:~:text=,enter%20and%20maintain%20flow%20state).
**Implementation Plan:** Adopt a consistent design system and navigation structure across all modules. For example, use standard color coding (green for success, red for failure) and layout similar to popular CI/CD tools for pipeline status to meet user expectations[opslevel.com](https://www.opslevel.com/resources/devex-series-part-2-how-tooling-affects-developer-experience-devex#:~:text=5,developers%20to%20provide%20multiple%20inputs). Implement safe defaults and templates: e.g. provide pipeline configuration templates and environment defaults so users arent overwhelmed with setup (following “convention over configuration” for common scenarios[opslevel.com](https://www.opslevel.com/resources/devex-series-part-2-how-tooling-affects-developer-experience-devex#:~:text=make%20your%20tool%20more%20intuitive,developers%20to%20provide%20multiple%20inputs)). Ensure **immediate, contextual feedback** in the UI if a pipeline fails, highlight the failed step with error details right there (no hunting through logs unnecessarily). Incorporate guidance into the product: for instance, tooltips or inline hints for first-time setup, but design the flow so that the “right way” is also the easiest way (leveraging constraints to guide best practices[opslevel.com](https://www.opslevel.com/resources/devex-series-part-2-how-tooling-affects-developer-experience-devex#:~:text=1,must%20provide%20feedback%20that%E2%80%99s%20informative)). Integrate authentication and SSO with existing systems (LDAP/OIDC) to avoid extra logins, and integrate with familiar interfaces (ChatOps, Slack, IDE plugins) to reduce context-switching. Maintain parity between the web UI and CLI by making both use the same underlying APIs this ensures consistency and that improvements apply to all interfaces. In development, use UX best practices such as usability testing with actual DevOps users to refine workflows (e.g. ensure creating a new environment or pipeline is a short, logical sequence). Keep pages responsive and lightweight for quick load times, as speed is part of good UX.
**DevOps-facing Outcome:** DevOps practitioners will find the tool **intuitive and efficient**. They can accomplish routine tasks (triggering a deployment, approving a change, checking logs) without referring to documentation, because the UI naturally leads them through workflows. The system provides feedback that is specific and actionable for example, error messages clearly state what failed (e.g. “Deployment to QA failed policy check X”) and suggest next steps (with a link to the policy or waiver option), rather than generic errors[opslevel.com](https://www.opslevel.com/resources/devex-series-part-2-how-tooling-affects-developer-experience-devex#:~:text=Well,not%20reading%20the%20documentation%2C%20but). Users notice that everything feels **familiar**: the terminology matches their conventions, and even the CLI commands and outputs align with tools they know. Friction is minimized: they arent wasting time on redundant confirmations or searching for information across different screens. Overall, this leads to improved flow state and productivity the tool “gets out of the way” and lets DevOps focus on delivering software[opslevel.com](https://www.opslevel.com/resources/devex-series-part-2-how-tooling-affects-developer-experience-devex#:~:text=,enter%20and%20maintain%20flow%20state).
3\. Core Views DevOps Actually Need
-----------------------------------
### Pipeline/Run-Centric View
**Development Direction:** Provide a **pipeline-run dashboard** that gives a real-time and historical view of CI/CD pipeline executions. DevOps users need to see each pipeline runs status, stages, and logs at a glance, with the ability to drill down into any step. Key requirements include visual indicators of progress (running, passed, failed), links to related entities (commit, artifacts produced, deployment targets), and controls to re-run or rollback if needed. Essentially, we need to build what is often seen in tools like Jenkins Blue Ocean or GitLab Pipelines: a clear timeline or graph of pipeline stages with results. The view should support filtering (by branch, status, timeframe) and show recent pipeline outcomes to quickly spot failures[getambush.com](https://www.getambush.com/article/devops-for-classified-environments/#:~:text=Information%20radiators%20display%20system%20status,demonstrate%20system%20health%20and%20trends).
**Implementation Plan:** Leverage the CI systems data to populate this view. If using an existing CI (Jenkins/GitLab/GitHub Actions), integrate through their APIs to fetch pipeline run details (jobs, status, logs). Alternatively, if building a custom pipeline service (e.g. Tekton on Kubernetes), use its pipeline CRDs and results to construct the UI. Implement a **real-time update** mechanism (WebSocket or long-poll) so users can watch a running pipelines progress live (e.g. seeing stages turn green or red as they complete). The UI could be a linear timeline of stages or a node graph for parallel stages. Each stage node should be clickable to view logs and any artifacts from that stage. Include a sidebar or modal for logs with search and highlight (so DevOps can quickly diagnose failures). Provide controls to download logs or artifacts right from the UI. Integrate links: e.g. the commit hash in the pipeline header links to the SCM, the artifact name links to the artifact repository or artifact-centric view. If a pipeline fails a quality gate or test, highlight it and possibly prompt next actions (create a ticket or issue, or jump to evidence). Use CI webhooks or event listeners to update pipeline status in the platform database, and maintain a history of past runs. This can be backed by a database table (storing run id, pipeline id, status, duration, initiator, etc.) for querying and metrics.
**DevOps-facing Outcome:** The pipeline-centric view becomes the **mission control** for builds and releases. A DevOps engineer looking at this dashboard can immediately answer: “Whats the state of our pipelines right now?” Theyll see perhaps a list or grid of recent runs, with status color-codes (e.g. green check for success, red X for failure, yellow for running). They can click a failed pipeline and instantly see which stage failed and the error message, without wading through raw logs. For a running deployment, they might see a live streaming log of tests and a progress bar of stages. This greatly speeds up troubleshooting and situational awareness[getambush.com](https://www.getambush.com/article/devops-for-classified-environments/#:~:text=Information%20radiators%20display%20system%20status,demonstrate%20system%20health%20and%20trends). Moreover, from this view they can trigger actions e.g. re-run a failed job or approve a manual gate making it a one-stop interface for pipeline operations. Overall, this view ensures that pipeline status and history are highly visible (no more digging through Jenkins job lists or disparate tools), which supports faster feedback and collaboration (e.g. a team board showing these pipeline dashboards to all team members[getambush.com](https://www.getambush.com/article/devops-for-classified-environments/#:~:text=Information%20radiators%20display%20system%20status,demonstrate%20system%20health%20and%20trends)).
### Artifact-Centric View
**Development Direction:** Create an **artifact-centric view** that tracks the build outputs (artifacts) through their lifecycle. DevOps teams often manage artifacts like container images, binaries, or packages that are built once and then promoted across environments. This view should list artifact versions along with metadata: what build produced it, which tests it passed, security scan results, and where its currently deployed. The guiding principle is “promote artifacts, not code” once an artifact is proven in one environment, it should be the same artifact moving forward[getambush.com](https://www.getambush.com/article/devops-for-classified-environments/#:~:text=Managing%20artifacts%20in%20classified%20environments,ensures%20artifact%20integrity%20and%20authenticity). Therefore, the system must support viewing an artifact (say version 1.2.3 of a service) and seeing its chain of custody: built by Pipeline #123 from Commit ABC, signed and stored in registry, deployed to Staging, awaiting promotion to Prod. It should also highlight if an artifact is **approved** (all checks passed) or if it carries any waivers/exceptions.
**Implementation Plan:** Integrate with artifact repositories and registries. For example, if using Docker images, connect to a container registry (AWS ECR, Docker Hub, etc.) via API or CLI to list image tags and digests. For JARs or packages, integrate with a binary repository (Artifactory, Nexus, etc.). Store metadata in a database linking artifact IDs (e.g. digest or version) to pipeline run and test results. The implementation could include a dedicated microservice to handle artifact metadata: when a pipeline produces a new artifact, record its details (checksum, storage URL, SBOM, test summary, security scan outcome). Implement the artifact view UI to display a table or list of artifact versions, each expandable to show details like: build timestamp, commit ID, link to pipeline run, list of environments where its deployed, and compliance status (e.g. “Signed ✅, Security scan ✅, Tests ✅”). Provide actions like promoting an artifact to an environment (which could trigger a deployment pipeline or Argo CD sync behind the scenes). Include **promotion workflows** with approvals e.g. a button to “Promote to Production” that will enforce an approval if required by policy[getambush.com](https://www.getambush.com/article/devops-for-classified-environments/#:~:text=Managing%20artifacts%20in%20classified%20environments,ensures%20artifact%20integrity%20and%20authenticity). Ensure the artifact view can filter or search by component/service name and version. Behind the scenes, implement retention policies for artifacts (possibly configurable) and mark artifacts that are no longer deployed so they can be archived or cleaned up[getambush.com](https://www.getambush.com/article/devops-for-classified-environments/#:~:text=Managing%20artifacts%20in%20classified%20environments,ensures%20artifact%20integrity%20and%20authenticity). Use signing tools (like Cosign for container images) and display signature verification status in the UI to ensure integrity[getambush.com](https://www.getambush.com/article/devops-for-classified-environments/#:~:text=Managing%20artifacts%20in%20classified%20environments,ensures%20artifact%20integrity%20and%20authenticity). This likely means storing signature info and verification results in our metadata DB and updating on artifact fetch.
**DevOps-facing Outcome:** Users gain a **single source of truth** for artifacts. Instead of manually cross-referencing CI runs and Docker registries, they can go to “Artifact X version Y” page and get a comprehensive picture: “Built 2 days ago from commit `abc123` by pipeline #56[getambush.com](https://www.getambush.com/article/devops-for-classified-environments/#:~:text=Managing%20artifacts%20in%20classified%20environments,ensures%20artifact%20integrity%20and%20authenticity). Passed all tests and security checks. Currently in UAT and Prod.” They will see if the artifact was signed and by whom, and they can trust that what went through QA is exactly whats in production (no surprise re-builds). If an artifact has a known vulnerability, they can quickly find everywhere its running. Conversely, if a deployment is failing, they can confirm the artifacts provenance (maybe the issue is that it wasnt the artifact they expected). This view also streamlines promotions: a DevOps engineer can promote a vetted artifact to the next environment with one click, knowing the platform will handle the deployment and update the status. Overall, the artifact-centric view reduces release errors by emphasizing immutability and traceability of builds, and it gives teams confidence that only approved artifacts progress through environments[getambush.com](https://www.getambush.com/article/devops-for-classified-environments/#:~:text=Managing%20artifacts%20in%20classified%20environments,ensures%20artifact%20integrity%20and%20authenticity).
### Environment/Cluster-Centric View
**Development Direction:** Provide an **environment or cluster-centric dashboard** focusing on the state of each deployment environment (Dev, QA, Prod, or specific Kubernetes clusters). DevOps need to see **what is running where** and the health/status of those environments. This view should show each environments active versions of services, configuration, last deployment time, and any pending changes or issues. Essentially, when selecting an environment (or a cluster), the user should see all relevant information: which artifacts/versions are deployed, whether there are any out-of-policy conditions, recent deployment history for that environment, and live metrics or alerts for it. Its about answering “Is everything OK in environment X right now? Whats deployed there?” at a glance. The environment view should also integrate any Infrastructure-as-Code context e.g. show if the environments infrastructure (Terraform, Kubernetes resources) is in sync or drifted from the desired state.
**Implementation Plan:** Represent environments as entities in the system with attributes and links to resources. For a Kubernetes cluster environment, integrate with the K8s API or Argo CD to fetch the list of deployed applications and their versions. For VM or cloud environments, integrate with deployment scripts or Terraform state: e.g. tag deployments with an environment ID so the system knows whats deployed. Implement an environment overview page showing a grid or list of services in that environment and their current version (pull this from a deployment registry or continuous delivery tool). Include environment-specific status checks: e.g. call Kubernetes for pod statuses or use health check endpoints of services. If using Terraform or another IaC, query its state or run a drift detection (using Terraform plan or Terraform Cloud APIs) to identify differences between desired and actual infrastructure; highlight those if any. Additionally, integrate recent deployment logs: e.g. “Deployed version 1.2.3 of ServiceA 2 hours ago by pipeline #45 (passed ✅)” so that context is visible[getambush.com](https://www.getambush.com/article/devops-for-classified-environments/#:~:text=Information%20radiators%20display%20system%20status,demonstrate%20system%20health%20and%20trends). Enable quick access to logs or monitoring: e.g. links to Kibana for logs or Prometheus/Grafana for metrics specific to that environment. For environment config, provide a way to manage environment-specific variables or secrets (possibly by integrating with a vault or config management). This view might also expose controls like pausing deployments (maintenance mode) or manually triggering a rollback in that environment. If the organization uses approval gates on environments, show whether the environment is open for deployment or awaiting approvals. Use role-based access control to ensure users only see and act on environments theyre allowed to. In terms of tech, you might integrate with Kubernetes via the Kubernetes API (client libraries) for cluster state, and with cloud providers (AWS, etc.) for resource statuses. If multiple clusters, aggregate them or allow selecting each.
**DevOps-facing Outcome:** When a DevOps engineer opens the environment view (say for “Production”), they get a **comprehensive snapshot** of Prod. For example, they see that Service A version 2.3 is running (with a green check indicating all health checks pass), Service B version 1.8 is running but has a warning (perhaps a policy violation or a pod restarting). They can see that the last deployment was yesterday, and maybe an approval is pending for a new version (clearly indicated). They also notice any environment-level alerts (e.g. “Disk space low” or “Compliance drift detected: one config changed outside of pipeline”). This reduces the need to jump between different monitoring and deployment tools key information is aggregated. They can directly access logs or metrics if something looks off. For example, if an incident occurs in production, the on-call can open this view to quickly find what changed recently and on which nodes. The environment-centric view thus **bridges operations and release info**: its not just what versions are deployed, but also their run-state and any issues. As a result, DevOps teams can more effectively manage environments, verify deployments, and ensure consistency. This high-level visibility aligns with best practices where environments are monitored and audited continuously[browserstack.com](https://www.browserstack.com/guide/azure-devops-environment#:~:text=Step%206%3A%20Monitor%20and%20Manage,Deployments) the UI will show deployment history and status in one place, simplifying compliance and troubleshooting.
4\. Evidence & Proof Presentation
---------------------------------
**Development Direction:** The platform must automatically collect and present **evidence of compliance and quality** for each release, making audits and reviews straightforward. This means every pipeline and deployment should leave an “evidence trail” test results, security scan reports, configuration snapshots, audit logs that is organized and accessible. DevOps users (and auditors or security teams) need a dedicated view or report that proves all required checks were done (for example, that an artifact has an SBOM, passed vulnerability scanning, was signed, and met policy criteria). Essentially, treat evidence as a first-class artifact of the process, not an afterthought[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=Evidence%20should%20flow%20like%20logs%2C,good%3B%20centralizing%20with%20context%20is). The UX should include dashboards or evidence pages where one can inspect and download these proofs, whether for an individual release or an environments compliance status.
**Implementation Plan:** **Automate evidence generation and storage** in the CI/CD pipeline. Incorporate steps in pipelines to generate artifacts like test reports (e.g. JUnit XML, coverage reports), security scan outputs (SAST/DAST results, SBOMs), and policy compliance logs. Use a secure storage (artifact repository or object storage bucket) for these evidence artifacts. For example, after a pipeline run, store the JUnit report and link it to that run record. Implement an “Evidence” section in the UI for each pipeline run or release: this could list the artifacts with download links or visual summaries (like a list of passed tests vs failed tests, vulnerability counts, etc.). Leverage “audit as code” practices encode compliance checks as code so their output can be captured as evidence[getambush.com](https://www.getambush.com/article/devops-for-classified-environments/#:~:text=cloud%20as%20an%20enabler,time%20visibility%20into%20system%20status). For instance, if using Policy as Code (OPA, HashiCorp Sentinel, etc.), have the pipeline produce a policy evaluation report and save it. Use version-controlled snapshots: for a deployment, take a snapshot of environment configuration (container image digests, config values) and store that as a JSON/YAML file as evidence of “what was deployed”. Utilize tagging and retention: mark these evidence files with the build or release ID and keep them immutably (perhaps using an object store with write-once settings[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=Evidence%20should%20flow%20like%20logs%2C,good%3B%20centralizing%20with%20context%20is)). Integrate a **compliance dashboard** that aggregates evidence status e.g. “100% of builds have test reports, 95% have no critical vulns” etc., for a quick view of compliance posture[getambush.com](https://www.getambush.com/article/devops-for-classified-environments/#:~:text=cloud%20as%20an%20enabler,time%20visibility%20into%20system%20status). We may implement a database of compliance statuses (each control check per pipeline run) to quickly query and display summaries. Also, provide an export or report generation feature: allow users to download an “attestation bundle” (ZIP of SBOMs, test results, etc.) for a release to provide to auditors[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=Evidence%20should%20flow%20like%20logs%2C,good%3B%20centralizing%20with%20context%20is). Security-wise, ensure this evidence store is append-only to prevent tampering (using object locks or checksums). In terms of tech, tools like **SLSA attestations** can be integrated to sign and verify evidence (for supply chain security). The UI can show verification status of attestation signatures to prove integrity.
**DevOps-facing Outcome:** DevOps teams and compliance officers will see a **clear, accessible trail of proof** for each deployment. For example, when viewing a particular release, they might see: _Tests:_ 120/120 passed (link to detailed results), _Security:_ 0 critical vulns (link to scanner report), _Config Audit:_ 1 minor deviation (waiver granted, link to waiver details). They can click any of those to dive deeper e.g. open the actual security scan report artifact or view the SBOM file. Instead of scrambling to gather evidence from multiple tools at audit time, the platform surfaces it continuously[getambush.com](https://www.getambush.com/article/devops-for-classified-environments/#:~:text=cloud%20as%20an%20enabler,time%20visibility%20into%20system%20status)[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=Evidence%20should%20flow%20like%20logs%2C,good%3B%20centralizing%20with%20context%20is). An auditor or DevOps lead could open a compliance dashboard and see in real-time that all production releases have the required documentation and checks attached, and even download a bundle for an audit. This **builds trust** with stakeholders: when someone asks “How do we know this release is secure and compliant?”, the answer is a few clicks away in the evidence tab, not a week-long hunt. It also helps engineers themselves if a question arises about “Did we run performance tests before this release?”, the evidence view will show if that artifact is present. By making evidence visible and automatic, it encourages teams to incorporate compliance into daily work (no more hidden spreadsheets or missing screenshots), ultimately making audits “boringly” smooth[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=Evidence%20should%20flow%20like%20logs%2C,good%3B%20centralizing%20with%20context%20is).
5\. Exception & Waiver UX
-------------------------
_Example of an exemption request form (Harness.io) where a user selects scope (pipeline, target, project), duration, and reason for a waiver. Our implementation will provide a similar interface to manage policy exceptions._
**Development Direction:** Implement a **controlled workflow for exceptions/waivers** that allows DevOps to override certain failures (policy violations, test failures) **only with proper approval and tracking**. In real-world pipelines, there are cases where a security vulnerability or policy may be temporarily excepted (waived) to unblock a deployment but this must be done transparently and with accountability. The UX should make it easy to request an exception when needed (with justification) and to see the status of that request, but also make the presence of any waivers very visible to everyone (so theyre not forgotten). Key requirements: ability to request a waiver with specific scope (e.g. just for this pipeline run or environment, vs broader), mandatory reason and expiration for each waiver, an approval step by authorized roles, and an “exception register” in the UI that lists all active waivers and their expiry[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=secret%20sauce%3A%20each%20metric%20links,We%20don%E2%80%99t). Essentially, treat waivers as temporary, auditable objects in the system.
**Implementation Plan:** Build a feature where pipeline policy checks or scan results that would fail the pipeline can be turned into an **exception request**. For example, if a pipeline finds a critical vulnerability, provide a “Request Waiver” button next to the failure message in the UI. This triggers a form (like the image example) to capture details: scope of waiver (this specific deployment, this application, or whole project)[developer.harness.io](https://developer.harness.io/docs/security-testing-orchestration/exemptions/exemption-workflows/#:~:text=You%20can%20create%20exemption%20requests,specific%20occurrences%20within%20an%20issue), duration (e.g. 14 days or until a certain date), and a required reason category and description (like “Acceptable risk low impact, fix in next release” or “False positive”[developer.harness.io](https://developer.harness.io/docs/security-testing-orchestration/exemptions/exemption-workflows/#:~:text=)). Once submitted, store the request in a database with status “Pending” and notify the appropriate approvers (could integrate with email/Slack or just within the app). Implement an approval interface where a security lead or product owner can review the request and either approve (possibly adjusting scope or duration)[developer.harness.io](https://developer.harness.io/docs/security-testing-orchestration/exemptions/exemption-workflows/#:~:text=Reviewers%20have%20the%20flexibility%20to,requests%2C%20refer%20to%20%2037) or reject it. Use role-based permissions to ensure only certain roles (e.g. Security Officer) can approve. If approved, the pipeline or policy engine should automatically apply that exception: e.g. mark that particular check as waived for the specified scope. This could be implemented by updating a policy store (for instance, adding an entry that “vuln XYZ is waived for app A in staging until date D”). The pipeline then reads these waivers on the next run so it doesnt fail for a known, waived issue. Ensure the waiver is time-bound: perhaps schedule a job to auto-expire it (or the pipeline will treat it as fail after expiration). In the UI, implement an **“Active Waivers” dashboard**[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=secret%20sauce%3A%20each%20metric%20links,We%20don%E2%80%99t) listing all current exceptions, with details: what was waived, why, who approved, and countdown to expiration. Possibly show this on the environment and artifact views too (e.g. a banner “Running with 1 waiver: CVE-1234 in ServiceA (expires in 5 days)”). Also log all waiver actions in the audit trail. Technically, this could integrate with a policy engine like OPA e.g. OPA could have a data map of exceptions which the policies check. Or simpler, our apps database serves as the source of truth and our pipeline code consults it. Finally, enforce in code that any exception must have an owner and expiry set (no indefinite waivers) e.g. do not allow submission without an expiry date, and prevent using expired waivers (pipeline should fail if an expired waiver is encountered). This follows the best practice of “time-boxed exceptions with owners”[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=secret%20sauce%3A%20each%20metric%20links,We%20don%E2%80%99t).
**DevOps-facing Outcome:** Instead of ad-hoc Slack approvals or lingering risk acceptances, DevOps users get a **transparent, self-service mechanism** to handle necessary exceptions. For example, if a pipeline is blocking a deployment due to a vulnerability that is a false positive, the engineer can click “Request Waiver”, fill in the justification (selecting “False positive” and adding notes) and submit. They will see the request in a pending state and, if authorized, an approver will get notified. Once approved, the pipeline might automatically continue or allow a rerun to succeed. In the UI, a clear label might mark that deployment as “**Waiver applied**” so its never hidden[developer.harness.io](https://developer.harness.io/docs/security-testing-orchestration/exemptions/exemption-workflows/#:~:text=Issue%20exemptions%20help%20unblock%20pipelines,to%20the%20issue%20exemptions%20workflow). The team and auditors can always consult the Waivers dashboard to see, for instance, that “CVE-1234 in ServiceA was waived for 7 days by Jane Doe on Oct 10, reason: Acceptable risk[developer.harness.io](https://developer.harness.io/docs/security-testing-orchestration/exemptions/exemption-workflows/#:~:text=Issue%20exemptions%20help%20unblock%20pipelines,to%20the%20issue%20exemptions%20workflow).” As waivers near expiration, perhaps the system alerts the team to fix the underlying issue. This prevents “forever exceptions” its obvious if something is continuously waived. By integrating this UX, we **maintain velocity without sacrificing governance**: teams arent stuck when a known low-risk issue pops up, but any deviation from standards is documented and tracked. Over time, the exception log can even drive improvement (e.g. seeing which policies frequently get waived might indicate they need adjustment). In summary, DevOps engineers experience a workflow where getting an exception is streamlined yet responsible, and they always know which releases are carrying exceptions (no surprises to be caught in audits or incidents)[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=secret%20sauce%3A%20each%20metric%20links,We%20don%E2%80%99t).
6\. CLI and Automation UX
-------------------------
**Development Direction:** Offer a **powerful CLI tool** that mirrors the capabilities of the UI, enabling automation and scripting of all DevOps workflows. DevOps engineers often prefer or need command-line access for integration into CI scripts, Infrastructure as Code pipelines, or simply for speed. The CLI experience should be considered part of the products UX it must be intuitive, consistent with the UI concepts, and provide useful output (including machine-readable formats). Essentially, anything you can do in the web console (view pipeline status, approve a waiver, deploy an artifact, fetch evidence) should be doable via the CLI or API. This empowers advanced users and facilitates integration with other automation (shell scripts, CI jobs, Git hooks, etc.). A good CLI follows standard conventions and provides help, clear errors, and supports environment configuration for non-interactive use.
**Implementation Plan:** Develop the CLI as a first-class client to the platforms REST/GraphQL API. Likely implement it in a language suited for cross-platform command-line tools (Go is a common choice for CLIs due to easy binary distribution, or Python for rapid development with an installer). Use an existing CLI framework (for Go, something like Cobra or Click for Python) to structure commands and flags. Ensure the commands map closely to the domain: e.g. `stella pipeline list`, `stella pipeline logs <id>`, `stella artifact promote <artifact> --env prod`, `stella evidence download --release <id>`, `stella waiver request ...` etc. Follow common UNIX CLI design principles: support `--help` for every command, use short (`-f`) and long (`--force`) flags appropriately, and return proper exit codes (so scripts can detect success/failure). Include output format switches, e.g. `--output json` for commands to get machine-parseable output (allowing integration with other tools). Integrate authentication in a user-friendly way: perhaps `stella auth login` to do an OAuth device code flow or accept a token, and store it (maybe in `~/.stella/config`). The CLI should respect environment variables for non-interactive use (e.g. `STELLA_API_TOKEN`, `STELLA_TENANT`) for easy CI integration[git.stella-ops.org](https://git.stella-ops.org/stella-ops.org/git.stella-ops.org/src/commit/48702191bed7d66b8e29929a8fad4ecdb40b9490/docs/11_AUTHORITY.md#:~:text=errors%20to%20operators%20and%20provide,STELLA_POLICY_DIGEST). Provide auto-completion scripts for common shells to improve usability. Tie the CLI version to the server API version, and provide a clear upgrade path (maybe `stella upgrade` to get the latest version). As part of development, create comprehensive docs and examples for the CLI, and possibly a testing harness to ensure it works on all platforms. Consider also that the CLI might be used in pipelines: ensure its efficient (no unnecessary output when not needed, perhaps a quiet mode). For implementing heavy tasks (like streaming logs), use web socket or long polling under the hood to show live logs in the terminal, similar to how `kubectl logs -f` works. If the CLI will handle potentially sensitive operations (like approvals or secret management), ensure it can prompt for confirmation or use flags to force through in scripts. Also, align CLI error messages and terminology with the UI for consistency.
**DevOps-facing Outcome:** For DevOps engineers, the CLI becomes a **productivity booster** and a Swiss army knife in automation. They can script repetitive tasks: for instance, a release engineer might run a script that uses `stella artifact list --env staging` to verify what's in staging, then `stella artifact promote` to push to production followed by `stella pipeline monitor --wait` to watch the rollout complete. All of this can be done without leaving their terminal or clicking in a browser. The CLI output is designed to be readable but also parseable: e.g. `stella pipeline status 123` might output a concise summary in human-readable form, or with `--json` give a JSON that a script can parse to decide next steps. In on-call situations, an engineer could quickly fetch evidence or status: e.g. `stella evidence summary --release 2025.10.05` to see if all checks passed for a particular release, right from the terminal. This complements the UI by enabling **automation integration** the CLI can be used in CI pipelines (maybe even in other systems, e.g. a Jenkins job could call `stella ...` to trigger something in Stella). Because the CLI uses the same language as the UI, users dont have to learn a completely different syntax or mental model. And by providing robust help and logical command names, even newcomers find it accessible (for example, typing `stella --help` lists subcommands in a clear way, similar to kubectl or git CLIs they know). Overall, the DevOps-facing outcome is that the tool meets engineers where they are whether they love GUIs or CLIs and supports **automation at scale**, which is a core DevOps principle.
7\. Alerting & Incident Integration
-----------------------------------
**Development Direction:** The platform should seamlessly integrate with **alerting and incident management workflows** so that issues in pipelines or environments automatically notify the right people, and ongoing incidents are visible in the deployment context. DevOps teams rely on fast feedback for failures or abnormal conditions whether a pipeline fails, a deployment causes a service outage, or a security scan finds a critical issue, the system needs to push alerts to the channels where engineers are already looking (chat, email, incident tools). Additionally, when viewing the DevOps dashboards, users should see indicators of active incidents or alerts related to recent changes. This tight integration helps bridge the gap between CI/CD and operations: deployments and incidents should not be separate silos. The UX should support configuring alert rules and connecting to tools like PagerDuty, Opsgenie, Slack/MS Teams, or even Jira for incident tickets, with minimal setup.
**Implementation Plan:** Introduce an **alerting configuration** module where certain events trigger notifications. Key events to consider: pipeline failures, pipeline successes (optional), deployment to production, policy violations, security vulnerabilities found, and performance regressions in metrics. Allow users to configure where these go e.g. a Slack webhook, an email list, or an incident management systems API. For pipeline failures or critical security findings, integration with PagerDuty/On-call rotation can create an incident automatically. Use webhooks and APIs: for Slack or Teams, send a formatted message (e.g. “:red\_circle: _Deployment Failed_ Pipeline #123 failed at step 'Integration Tests'. Click here to view details.” with a link to the UI). For PagerDuty, use their Events API to trigger an incident with details including the pipeline or service impacted. On the **incoming side**, integrate with monitoring tools to reflect incidents: e.g. use status from an incident management system or monitoring alerts to display in the platform. If the organization uses something like ServiceNow or Jira for incidents, consider a plugin or link: for instance, tag deployments with change IDs and then auto-update those tickets if a deployment triggers an alert. In the environment view, include a widget that shows current alerts for that environment (by pulling from Prometheus Alertmanager or cloud monitoring alerts relevant to that cluster). Implement ChatOps commands as well: possibly allow acknowledging or redeploying via Slack bot commands. This can be achieved by having a small service listening to chat commands (Slack slash commands or similar) that call the same internal APIs (for example, a “/deploy rollback serviceA” command in Slack triggers the rollback pipeline). For UI implementation, ensure that when an alert is active, its clearly indicated: e.g. a red badge on the environment or pipeline view, and maybe a top-level “Incidents” section that lists all unresolved incidents (with links to their external system if applicable). Use the information radiators approach maybe a large screen mode or summary panel showing system health and any ongoing incidents[getambush.com](https://www.getambush.com/article/devops-for-classified-environments/#:~:text=Information%20radiators%20display%20system%20status,demonstrate%20system%20health%20and%20trends). Technically, setting up these integrations means building outbound webhook capabilities and possibly small integration plugins for each target (Slack, PagerDuty, etc.). Also include the ability to throttle or filter alerts (to avoid spamming on every minor issue). Logging and auditing: record what alerts were sent and when (so one can later review incident timelines).
**DevOps-facing Outcome:** DevOps engineers will be **immediately aware** of problems without having to constantly watch the dashboards. For example, if a nightly build fails or a critical vulnerability is found in a new build, the on-call engineer might get a PagerDuty alert or a Slack message in the team channel within seconds. The message will contain enough context (pipeline name, failure reason snippet, a link to view details) so they can quickly respond. During a live incident, when they open the Stella environment view, they might see an **incident banner** or an “Active Alerts” list indicating which services are affected, aligning with what their monitoring is showing. This context speeds up remediation: if a production incident is ongoing, the team can see which recent deployment might have caused it (since the platform correlates deployment events with incident alerts). Conversely, when doing a deployment, if an alert fires (e.g. error rate spiked), the system could even pause further stages and notify the team. By integrating ChatOps, some users might even resolve things without leaving their chat: e.g. the Slack bot reports “Deployment failed” and the engineer types a command to rollback right in Slack, which the platform executes[getambush.com](https://www.getambush.com/article/devops-for-classified-environments/#:~:text=requiring%20attention,system%20health%20and%20trends). Overall, the outcome is a highly responsive DevOps process: issues are caught and communicated in real-time, and the platform becomes part of the incident handling loop, not isolated. Management can also see in retrospective reports that alerts were linked to changes (useful for blameless postmortems, since you can trace alert -> deployment). The tight coupling of alerting with the DevOps UX ensures nothing falls through the cracks, and teams can react swiftly, embodying the DevOps ideal of continuous feedback[getambush.com](https://www.getambush.com/article/devops-for-classified-environments/#:~:text=Information%20radiators%20display%20system%20status,demonstrate%20system%20health%20and%20trends).
8\. Metrics That Matter
-----------------------
**Development Direction:** Define and display the **key metrics** that truly measure DevOps success and software delivery performance, rather than vanity metrics. This likely includes industry-standard **DORA metrics** (Deployment Frequency, Lead Time for Changes, Change Failure Rate, Time to Restore) to gauge velocity and stability[docs.gitlab.com](https://docs.gitlab.com/user/analytics/dora_metrics/#:~:text=,quickly%20your%20organization%20delivers%20software), as well as any domain-specific metrics (like compliance metrics or efficiency metrics relevant to the team). The UX should provide a metrics dashboard that is easy to interpret with trends over time, targets or benchmarks, and the ability to drill down into whats influencing those metrics. By focusing on “metrics that matter,” the platform steers teams toward continuous improvement on important outcomes (like faster deployments with high reliability) and avoids information overload. Each metric should be backed by data collected from the pipelines, incidents, and other parts of the system.
**Implementation Plan:** **Instrument the CI/CD pipeline and operations data** to collect these metrics automatically. For example, every successful deployment should log an event with a timestamp and environment, which can feed Deployment Frequency calculations (e.g. how many deploys to prod per day/week)[docs.gitlab.com](https://docs.gitlab.com/user/analytics/dora_metrics/#:~:text=,metrics%20measure%20your%20software%E2%80%99s%20reliability). Track lead time by measuring time from code commit (or merge) to deployment completion this might involve integrating with the version control system to get commit timestamps and comparing to deployment events[docs.gitlab.com](https://docs.gitlab.com/user/analytics/dora_metrics/#:~:text=,metrics%20measure%20your%20software%E2%80%99s%20reliability). Change Failure Rate can be inferred by flagging deployments that resulted in a failure or rollback integrate with incident tracking or post-deployment health checks to mark a deployment as “failed” if it had to be reverted or caused an alert. Time to Restore is measured from incident start to resolution integrate with incident management timestamps or pipeline rollback completion times. Additionally, incorporate compliance/quality metrics highlighted earlier: e.g. “% of builds with all tests passing”, “average time to remediate critical vulnerabilities” many of these can be derived from the evidence and waiver data we track[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=We%20try%20to%20make%20compliance,quarter). Use a time-series database (Prometheus, InfluxDB) or even just a relational DB with time-series tables to store metric data points. Implement a **Metrics Dashboard UI** with charts for each key metric, ideally with the ability to view by different scopes (maybe per service or team or environment). For instance, a line chart for Deployment Frequency (deploys per week) with annotations when big changes happened, or a bar chart for Change Failure Rate per month. Provide comparison to industry benchmarks if available (e.g. highlighting if the team is elite per DORA benchmarks). Also, crucially, implement **drill-down links**: if a metric spike or drop is observed, the user should be able to click it and see underlying data e.g. clicking a high Change Failure Rate in April shows which deployments failed in April and links to those pipeline runs[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=policy%20to%20merged%20after%20fixes,We%20don%E2%80%99t). Use color-coding to flag concerning trends (like increasing failure rate). Allow export of metrics for reporting purposes. Possibly integrate with existing analytics (if using Datadog or other BI, allow data export or API access to metrics). Ensure that metrics are updated in near real-time (maybe after each pipeline run or incident closure, recalc relevant metrics) so the dashboard is always current. We should also secure the metrics view (maybe management only for some, but ideally DevOps leads have it openly to promote transparency). In development, validate that these metrics indeed correlate with what teams care about (work with users to refine).
**DevOps-facing Outcome:** The team gets a **focused insight** into how they are performing and where to improve. On the metrics dashboard, they might see for example: Deployment Frequency 20 deploys/week (trending upward), Lead Time 1 day median, Change Failure Rate 5%, Time to Restore 1 hour median. These will be shown perhaps as simple cards or charts. They can quickly glean, say, “Were deploying more often, but our change failure rate spiked last month,” prompting investigation. By clicking that spike, they see a list of incidents or failed deployments that contributed, allowing them to identify common causes and address them[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=policy%20to%20merged%20after%20fixes,We%20don%E2%80%99t). The dashboard might also show compliance metrics if relevant: e.g. “100% of builds had SBOMs attached this quarter” (the team could celebrate this boring but important win)[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=We%20celebrate%20the%20boring%20wins%3A,stakes%20we%E2%80%99re%20proud%20to%20meet), or “Median time to patch critical vulns: 2 days” these could be in a separate section for security/compliance. Importantly, all metrics shown are ones that drive behavior the organization cares about no pointless graphs that dont lead to action. This ensures that when leadership asks “How are we doing in DevOps?”, the answer is readily available with evidence[docs.gitlab.com](https://docs.gitlab.com/user/analytics/dora_metrics/#:~:text=,quickly%20your%20organization%20delivers%20software). It also gamifies improvement: teams can see the needle move when they streamline a pipeline or improve testing. For example, after investing in parallel tests, Lead Time drops the dashboard confirms such improvements. Furthermore, the presence of drill-down and context means metrics are **trusted** by engineers: if someone questions a number, they can click in and see the raw data behind it (making it hard to ignore or dispute the findings)[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=policy%20to%20merged%20after%20fixes,We%20don%E2%80%99t). Overall, this focus on meaningful metrics helps align everyone (Dev, Ops, and management) on common goals and provides continuous feedback at a high level on the effectiveness of DevOps practices. Its not just data for managers its a working tool for teams to guide decisions (like where to invest automation efforts next). By keeping the metrics visible and up-to-date, we encourage a culture of **data-driven improvement** in the DevOps process, as opposed to anecdotal or vanity measures[devopsoasis.blog](https://devopsoasis.blog/bake-ruthless-compliance-into-cicd-without-slowing-releases/#:~:text=We%20try%20to%20make%20compliance,quarter).
Short answer: they turn “nice principles” into a concrete blueprint of **what screens you need, what each must show, and what interactions must exist** — so your Angular/UX team can build Stella Ops UI with clear scope and acceptance criteria instead of guesswork.
Ill break it down in practical terms.
---
## 1. From principles to UI backlog
Those 8 sections already give you:
* **Primary entry points (top-level navigation):**
* Pipelines / Runs
* Artifacts / Images
* Environments / Clusters
* Evidence / Waivers
* Metrics
* **Cross-cutting objects:**
* Vulnerability detail
* Proof bundle
* Exception / Waiver
* Pipeline policy decision
That is essentially your **information architecture**. You can now:
* Define Angular routes for each area (`/pipelines`, `/pipelines/:id`, `/artifacts/:id`, `/env/:id`, `/metrics`, etc.).
* Turn every subsection into **epics and tickets**:
* “Implement pipeline run detail view with Why this failed header, Evidence panel, Actions side rail.”
* “Implement artifact risk summary component (score block + proof spine).”
* “Implement waiver request dialog with scope, reason and expiry.”
Instead of “build a UI for DevOps”, your backlog becomes a set of very specific, testable UI tasks.
---
## 2. Direct mapping to concrete screens and components
### 2.1. DevOps mental model → Navigation and page layout
“Can I ship / what blocks me / whats the minimum safe change?” directly drives:
* **Home / default view**:
* Recent pipeline runs with status + a “Blocked by X” cause line.
* **Context around everything**:
* Every “red” or “yellow” item has:
* A one-line answer to “why”.
* A visible next step (“fix”, “waive”, or “defer”).
For UI dev this means:
* Every error block must have:
* A cause string: `reasonSummary`.
* A small action cluster: buttons for `View evidence`, `Propose fix`, `Request waiver`.
That becomes UI component contracts and TypeScript models.
---
### 2.2. Global UX principles → Acceptance criteria for each page
Examples:
* **Pipeline-first, UI-second**
* UI stories must always include:
* “Same data must be available via `/api/...` and CLI.”
* When you design a screen, you also define:
* Which REST endpoints and DTOs back it.
* Which CLI command shows the same information.
* **Time-to-evidence ≤ 30 seconds / 3 clicks**
* For every design, you can set **explicit acceptance tests**:
* From a failed pipeline in the list, user can reach:
* The vulnerability detail page
* With SBOM line, VEX statements and lattice decision
* In **≤ 2 navigations**.
During implementation you can literally check: “Do we need more than 3 clicks? If yes, redesign.”
* **No dead ends**
* Every page spec must include a “Next actions” section:
* E.g. for run detail: `open cluster`, `open artifact`, `export proof`, `request waiver`.
* Frontend definition: each detail view must embed `<stella-next-actions [context]="...">`.
So the principles become **checklist items in PR review** and **Storybook stories**:
* “Evidence view: verify user is never stuck with no action.”
---
### 2.3. Core views → Page blueprints
Each of the three core views is practically a ready-made UI spec.
**Pipeline / run-centric view**
* Route: `/pipelines` and `/pipelines/:runId`
* Components:
* `PipelinesTableComponent` (statuses, columns, filter bar)
* `RunHeaderComponent` (“Why this run failed” line)
* `EvidencePanelComponent` (SBOM / Feeds / VEX / Lattice / History tabs)
* `RunActionsSidebarComponent` (propose upgrade, waiver, open cluster, export proof)
You can hand this to a designer as a wireframe task and to a dev as:
* API needed: `GET /runs`, `GET /runs/:id`, `GET /runs/:id/evidence`.
* UI tests: “When run is FAILED, header shows single sentence reason.”
**Artifact-centric view**
* Route: `/artifacts/:id`
* Components:
* `ArtifactHeader` (`<registry>/<repo>:tag @ sha256...`)
* `RiskScoreBlock`
* `ProofSpine` (hashes and Rekor link)
* `VulnerabilityTable` with filters (“blockers”, “with VEX”, “unknown reachability”)
For UI dev this defines:
* Data model: `ArtifactDto` with `verdict`, `vulnSummary`, `proofSpine`.
* Interactions: toggling filters updates the table (Angular reactive forms + data source).
**Environment / cluster view**
* Route: `/environments/:envId`
* Components:
* `EnvironmentTree` (`env → service → artifact`)
* `StatusLegend` (green / yellow / red)
* Quick drill-down: click red service → artifact view → vuln view.
This is a straightforward “topology” page spec.
---
## 3. Evidence & waivers → Reusable UI patterns
The “Evidence tabs” and “Waiver flow” sections give you **reusable patterns**:
* **Evidence tabs**:
* One generic `EvidenceTabsComponent` with five tabs:
* SBOM, Feeds, VEX, Lattice decision, History.
* Used in:
* Pipeline run details
* Artifact view
* Single-vulnerability modal
* **Waiver UX**:
* A single `WaiverDialogComponent` with:
* Scope dropdown
* Reason dropdown
* Free-text justification
* Expiry picker
* Used from:
* Run detail
* Vulnerability detail
* Environment warnings
Implement once, reuse everywhere. That keeps the UI consistent and reduces dev effort.
---
## 4. CLI & alerts → UI/UX boundaries
The CLI and alerting guidelines tell you **what not to overstuff into the web UI**:
* Some flows **must** be comfortable in terminal:
* Monitoring runs in CI logs, non-interactive approvals, export JSON.
* Some interactions primarily happen in:
* Slack / email (alert text pattern).
For UI design this means:
* Your web UI concentrates on:
* Exploration, debugging, visualizing proof, managing waivers and policy.
* CLI/Chat handle:
* “Fast path” operations (quick approvals, re-runs).
That clear separation prevents you from bloating the UI with obscure admin features that only scripts should use.
---
## 5. Metrics → Management / lead views
The metrics section defines:
* A dedicated **“Metrics & Reports”** area of the UI.
* Concrete charts to build:
* DORA metrics
* Exception health
* Time-to-evidence, time-to-decision
For the UI team:
* You can design one `MetricsDashboard` route, fed by `/metrics/...` endpoints.
* The drill-down requirement (“click spike → list underlying runs”) becomes:
* Link from chart points to pre-filtered `/pipelines` or `/incidents`.
Again, this is a direct translation from text to wires, not a vague “we should have some stats”.
---
## 6. How to operationalize this for Stella Ops
If you want this to directly drive implementation, you can do:
1. **Create a UI map document**
* List all routes and screens inferred above.
* For each screen:
* Purpose (which question it answers)
* Main components
* Required backend endpoints.
2. **Derive a component inventory**
* Common reusables:
* EvidenceTabs
* NextActionsSidebar
* RiskScoreBlock
* WaiverDialog
* Build them first in isolation (Storybook) to lock UX.
3. **Turn principles into checklists**
* Add “3 clicks to evidence”, “No dead ends”, “Explain why for every verdict” as:
* UI review checklists
* Acceptance criteria on tickets.
4. **Prioritize DevOps-critical flows first**
* Sprint 12:
* Pipeline list + run detail with evidence and waiver.
* Sprint 34:
* Artifact view + environment view.
* Later:
* Metrics dashboards and richer filters.
So: these guidelines are not abstract; they **define your pages, your components, your routes, and your acceptance tests**. That is exactly what your Stella Ops UI team needs to move from idea → concrete Angular screens that DevOps can actually live in every day.

View File

@@ -0,0 +1,209 @@
# Deprecation metadata schema for OpenAPI extensions
# Used by API Governance tools for deprecation tracking and notification workflows.
# Per APIGOV-63-001.
schemas:
DeprecationMetadata:
type: object
description: |
Deprecation metadata for API endpoints. Applied as x-deprecation extension
on operation objects. Used by Spectral rules, changelog generation, and
notification templates.
required:
- deprecatedAt
- sunsetAt
- successorPath
- reason
properties:
deprecatedAt:
type: string
format: date-time
description: ISO 8601 timestamp when the endpoint was marked deprecated.
example: "2025-01-15T00:00:00Z"
sunsetAt:
type: string
format: date-time
description: ISO 8601 timestamp when the endpoint will be removed.
example: "2025-07-15T00:00:00Z"
successorPath:
type: string
description: Path to the replacement endpoint (if available).
example: "/v2/resources"
successorOperationId:
type: string
description: Operation ID of the replacement endpoint.
example: "getResourcesV2"
reason:
type: string
description: Human-readable explanation for the deprecation.
example: "Replaced by paginated v2 endpoint with cursor-based pagination."
migrationGuide:
type: string
format: uri
description: URL to migration documentation.
example: "https://docs.stella-ops.org/migration/resources-v2"
notificationChannels:
type: array
description: Notification channels for deprecation announcements.
items:
type: string
enum:
- slack
- teams
- email
- webhook
default:
- email
affectedConsumerHints:
type: array
description: Hints about affected consumers (e.g., SDK names, client IDs).
items:
type: string
breakingChanges:
type: array
description: List of breaking changes in the successor endpoint.
items:
$ref: '#/schemas/BreakingChange'
BreakingChange:
type: object
description: Description of a breaking change between deprecated and successor endpoints.
required:
- type
- description
properties:
type:
type: string
enum:
- parameter-removed
- parameter-renamed
- parameter-type-changed
- response-schema-changed
- header-removed
- header-renamed
- status-code-changed
- content-type-changed
- authentication-changed
description: Category of the breaking change.
path:
type: string
description: JSON path to the affected element.
example: "$.parameters[0].name"
description:
type: string
description: Human-readable description of the change.
example: "Parameter 'page' renamed to 'cursor'"
migrationAction:
type: string
description: Recommended action for consumers.
example: "Replace 'page' parameter with 'cursor' using the nextCursor value from previous response."
DeprecationNotificationEvent:
type: object
description: Event payload for deprecation notifications sent to Notify service.
required:
- eventId
- eventType
- timestamp
- tenantId
- deprecation
properties:
eventId:
type: string
format: uuid
description: Unique identifier for this notification event.
eventType:
type: string
const: "api.deprecation.announced"
description: Event type for routing in Notify service.
timestamp:
type: string
format: date-time
description: ISO 8601 timestamp when the event was generated.
tenantId:
type: string
description: Tenant scope for the notification.
deprecation:
$ref: '#/schemas/DeprecationSummary'
DeprecationSummary:
type: object
description: Summary of a deprecated endpoint for notification purposes.
required:
- service
- path
- method
- deprecatedAt
- sunsetAt
properties:
service:
type: string
description: Service name owning the deprecated endpoint.
example: "authority"
path:
type: string
description: API path of the deprecated endpoint.
example: "/v1/tokens"
method:
type: string
enum:
- GET
- POST
- PUT
- PATCH
- DELETE
- HEAD
- OPTIONS
description: HTTP method of the deprecated endpoint.
operationId:
type: string
description: OpenAPI operation ID.
example: "createToken"
deprecatedAt:
type: string
format: date-time
sunsetAt:
type: string
format: date-time
daysUntilSunset:
type: integer
description: Computed days remaining until sunset.
example: 180
successorPath:
type: string
description: Path to the replacement endpoint.
reason:
type: string
description: Deprecation reason.
migrationGuide:
type: string
format: uri
changelogUrl:
type: string
format: uri
description: URL to the API changelog entry for this deprecation.
DeprecationReport:
type: object
description: Aggregated report of all deprecations for changelog/SDK publishing.
required:
- generatedAt
- schemaVersion
- deprecations
properties:
generatedAt:
type: string
format: date-time
description: When this report was generated.
schemaVersion:
type: string
const: "api.deprecation.report@1"
totalCount:
type: integer
description: Total number of deprecated endpoints.
upcomingSunsets:
type: integer
description: Number of endpoints with sunset within 90 days.
deprecations:
type: array
items:
$ref: '#/schemas/DeprecationSummary'

View File

@@ -19,7 +19,6 @@
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets> <IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
<PrivateAssets>all</PrivateAssets> <PrivateAssets>all</PrivateAssets>
</PackageReference> </PackageReference>
<PackageReference Include="EphemeralMongo" Version="3.0.0" />
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>

View File

@@ -9,8 +9,6 @@
</PropertyGroup> </PropertyGroup>
<ItemGroup> <ItemGroup>
<PackageReference Include="MongoDB.Driver" Version="3.5.0" />
<PackageReference Include="EphemeralMongo" Version="3.0.0" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" /> <PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" />
</ItemGroup> </ItemGroup>
</Project> </Project>

View File

@@ -1,10 +1,8 @@
using MongoDB.Bson;
namespace StellaOps.Bench.LinkNotMerge.Vex; namespace StellaOps.Bench.LinkNotMerge.Vex;
internal sealed class VexLinksetAggregator internal sealed class VexLinksetAggregator
{ {
public VexAggregationResult Correlate(IEnumerable<BsonDocument> documents) public VexAggregationResult Correlate(IEnumerable<VexObservationDocument> documents)
{ {
ArgumentNullException.ThrowIfNull(documents); ArgumentNullException.ThrowIfNull(documents);
@@ -13,39 +11,21 @@ internal sealed class VexLinksetAggregator
foreach (var document in documents) foreach (var document in documents)
{ {
var tenant = document.GetValue("tenant", "unknown").AsString; var tenant = document.Tenant;
var linksetValue = document.GetValue("linkset", new BsonDocument()); var aliases = document.Aliases;
var linkset = linksetValue.IsBsonDocument ? linksetValue.AsBsonDocument : new BsonDocument(); var statements = document.Statements;
var aliases = linkset.GetValue("aliases", new BsonArray()).AsBsonArray;
var statementsValue = document.GetValue("statements", new BsonArray());
var statements = statementsValue.IsBsonArray ? statementsValue.AsBsonArray : new BsonArray();
foreach (var statementValue in statements) foreach (var statementValue in statements)
{ {
if (!statementValue.IsBsonDocument)
{
continue;
}
statementsSeen++; statementsSeen++;
var statement = statementValue.AsBsonDocument; var status = statementValue.Status;
var status = statement.GetValue("status", "unknown").AsString; var justification = statementValue.Justification;
var justification = statement.GetValue("justification", BsonNull.Value); var lastUpdated = statementValue.LastUpdated;
var lastUpdated = statement.GetValue("last_updated", BsonNull.Value); var productKey = statementValue.Product.Purl;
var productValue = statement.GetValue("product", new BsonDocument());
var product = productValue.IsBsonDocument ? productValue.AsBsonDocument : new BsonDocument();
var productKey = product.GetValue("purl", "unknown").AsString;
foreach (var aliasValue in aliases) foreach (var alias in aliases)
{ {
if (!aliasValue.IsString)
{
continue;
}
var alias = aliasValue.AsString;
var key = string.Create(alias.Length + tenant.Length + productKey.Length + 2, (tenant, alias, productKey), static (span, data) => var key = string.Create(alias.Length + tenant.Length + productKey.Length + 2, (tenant, alias, productKey), static (span, data) =>
{ {
var (tenantValue, aliasValue, productValue) = data; var (tenantValue, aliasValue, productValue) = data;
@@ -70,7 +50,7 @@ internal sealed class VexLinksetAggregator
} }
} }
var eventDocuments = new List<BsonDocument>(groups.Count); var eventDocuments = new List<VexEvent>(groups.Count);
foreach (var accumulator in groups.Values) foreach (var accumulator in groups.Values)
{ {
if (accumulator.ShouldEmitEvent) if (accumulator.ShouldEmitEvent)
@@ -93,7 +73,7 @@ internal sealed class VexLinksetAggregator
private readonly string _tenant; private readonly string _tenant;
private readonly string _alias; private readonly string _alias;
private readonly string _product; private readonly string _product;
private DateTime? _latest; private DateTimeOffset? _latest;
public VexAccumulator(string tenant, string alias, string product) public VexAccumulator(string tenant, string alias, string product)
{ {
@@ -102,22 +82,22 @@ internal sealed class VexLinksetAggregator
_product = product; _product = product;
} }
public void AddStatement(string status, BsonValue justification, BsonValue updatedAt) public void AddStatement(string status, string justification, DateTimeOffset updatedAt)
{ {
if (!_statusCounts.TryAdd(status, 1)) if (!_statusCounts.TryAdd(status, 1))
{ {
_statusCounts[status]++; _statusCounts[status]++;
} }
if (justification.IsString) if (!string.IsNullOrEmpty(justification))
{ {
_justifications.Add(justification.AsString); _justifications.Add(justification);
} }
if (updatedAt.IsValidDateTime) if (updatedAt != default)
{ {
var value = updatedAt.ToUniversalTime(); var value = updatedAt.ToUniversalTime();
if (!_latest.HasValue || value > _latest) if (!_latest.HasValue || value > _latest.Value)
{ {
_latest = value; _latest = value;
} }
@@ -142,19 +122,15 @@ internal sealed class VexLinksetAggregator
} }
} }
public BsonDocument ToEvent() public VexEvent ToEvent()
{ {
var payload = new BsonDocument return new VexEvent(
{ _tenant,
["tenant"] = _tenant, _alias,
["alias"] = _alias, _product,
["product"] = _product, new Dictionary<string, int>(_statusCounts, StringComparer.Ordinal),
["statuses"] = new BsonDocument(_statusCounts.Select(kvp => new BsonElement(kvp.Key, kvp.Value))), _justifications.ToArray(),
["justifications"] = new BsonArray(_justifications.Select(justification => justification)), _latest);
["last_updated"] = _latest.HasValue ? _latest.Value : (BsonValue)BsonNull.Value,
};
return payload;
} }
} }
} }
@@ -163,4 +139,12 @@ internal sealed record VexAggregationResult(
int LinksetCount, int LinksetCount,
int StatementCount, int StatementCount,
int EventCount, int EventCount,
IReadOnlyList<BsonDocument> EventDocuments); IReadOnlyList<VexEvent> EventDocuments);
internal sealed record VexEvent(
string Tenant,
string Alias,
string Product,
IReadOnlyDictionary<string, int> Statuses,
IReadOnlyCollection<string> Justifications,
DateTimeOffset? LastUpdated);

View File

@@ -1,6 +1,6 @@
using System.Collections.Immutable; using System.Collections.Immutable;
using System.Security.Cryptography; using System.Security.Cryptography;
using MongoDB.Bson; using System.Text;
namespace StellaOps.Bench.LinkNotMerge.Vex; namespace StellaOps.Bench.LinkNotMerge.Vex;
@@ -48,8 +48,7 @@ internal static class VexObservationGenerator
var products = CreateProducts(group, revision, productsPerObservation); var products = CreateProducts(group, revision, productsPerObservation);
var statements = CreateStatements(vulnerabilityAlias, products, statementsPerObservation, random, fetchedAt); var statements = CreateStatements(vulnerabilityAlias, products, statementsPerObservation, random, fetchedAt);
var rawPayload = CreateRawPayload(upstreamId, vulnerabilityAlias, statements); var contentHash = ComputeContentHash(upstreamId, vulnerabilityAlias, statements, tenant, group, revision);
var contentHash = ComputeContentHash(rawPayload, tenant, group, revision);
var aliases = ImmutableArray.Create(vulnerabilityAlias, $"GHSA-{group:D4}-{revision % 26 + 'a'}{revision % 26 + 'a'}"); var aliases = ImmutableArray.Create(vulnerabilityAlias, $"GHSA-{group:D4}-{revision % 26 + 'a'}{revision % 26 + 'a'}");
var references = ImmutableArray.Create( var references = ImmutableArray.Create(
@@ -74,8 +73,7 @@ internal static class VexObservationGenerator
Statements: statements, Statements: statements,
References: references, References: references,
ContentFormat: "CycloneDX-VEX", ContentFormat: "CycloneDX-VEX",
SpecVersion: "1.4", SpecVersion: "1.4");
RawPayload: rawPayload);
} }
return seeds; return seeds;
@@ -93,14 +91,14 @@ internal static class VexObservationGenerator
return builder.MoveToImmutable(); return builder.MoveToImmutable();
} }
private static ImmutableArray<BsonDocument> CreateStatements( private static ImmutableArray<VexStatement> CreateStatements(
string vulnerabilityAlias, string vulnerabilityAlias,
ImmutableArray<VexProduct> products, ImmutableArray<VexProduct> products,
int statementsPerObservation, int statementsPerObservation,
Random random, Random random,
DateTimeOffset baseTime) DateTimeOffset baseTime)
{ {
var builder = ImmutableArray.CreateBuilder<BsonDocument>(statementsPerObservation); var builder = ImmutableArray.CreateBuilder<VexStatement>(statementsPerObservation);
for (var index = 0; index < statementsPerObservation; index++) for (var index = 0; index < statementsPerObservation; index++)
{ {
var statusIndex = random.Next(StatusPool.Length); var statusIndex = random.Next(StatusPool.Length);
@@ -108,52 +106,43 @@ internal static class VexObservationGenerator
var justification = JustificationPool[random.Next(JustificationPool.Length)]; var justification = JustificationPool[random.Next(JustificationPool.Length)];
var product = products[index % products.Length]; var product = products[index % products.Length];
var statementId = $"stmt-{vulnerabilityAlias}-{index:D2}"; var statementId = $"stmt-{vulnerabilityAlias}-{index:D2}";
var lastUpdated = baseTime.AddMinutes(index).ToUniversalTime();
var document = new BsonDocument builder.Add(new VexStatement(
{ StatementId: statementId,
["statement_id"] = statementId, VulnerabilityAlias: vulnerabilityAlias,
["vulnerability_alias"] = vulnerabilityAlias, Product: product,
["product"] = new BsonDocument Status: status,
{ Justification: justification,
["purl"] = product.Purl, LastUpdated: lastUpdated));
["component"] = product.Component,
["namespace"] = product.Namespace,
},
["status"] = status,
["justification"] = justification,
["impact"] = status == "affected" ? "high" : "none",
["last_updated"] = baseTime.AddMinutes(index).UtcDateTime,
};
builder.Add(document);
} }
return builder.MoveToImmutable(); return builder.MoveToImmutable();
} }
private static BsonDocument CreateRawPayload(string upstreamId, string vulnerabilityAlias, ImmutableArray<BsonDocument> statements) private static string ComputeContentHash(
{ string upstreamId,
var doc = new BsonDocument string vulnerabilityAlias,
{ ImmutableArray<VexStatement> statements,
["documentId"] = upstreamId, string tenant,
["title"] = $"Simulated VEX report {upstreamId}", int group,
["summary"] = $"Synthetic VEX payload for {vulnerabilityAlias}.", int revision)
["statements"] = new BsonArray(statements),
};
return doc;
}
private static string ComputeContentHash(BsonDocument rawPayload, string tenant, int group, int revision)
{ {
using var sha256 = SHA256.Create(); using var sha256 = SHA256.Create();
var seed = $"{tenant}|{group}|{revision}"; var builder = new StringBuilder();
var rawBytes = rawPayload.ToBson(); builder.Append(tenant).Append('|').Append(group).Append('|').Append(revision).Append('|');
var seedBytes = System.Text.Encoding.UTF8.GetBytes(seed); builder.Append(upstreamId).Append('|').Append(vulnerabilityAlias).Append('|');
var combined = new byte[rawBytes.Length + seedBytes.Length]; foreach (var statement in statements)
Buffer.BlockCopy(rawBytes, 0, combined, 0, rawBytes.Length); {
Buffer.BlockCopy(seedBytes, 0, combined, rawBytes.Length, seedBytes.Length); builder.Append(statement.StatementId).Append('|')
var hash = sha256.ComputeHash(combined); .Append(statement.Status).Append('|')
.Append(statement.Product.Purl).Append('|')
.Append(statement.Justification).Append('|')
.Append(statement.LastUpdated.ToUniversalTime().ToString("O")).Append('|');
}
var data = Encoding.UTF8.GetBytes(builder.ToString());
var hash = sha256.ComputeHash(data);
return $"sha256:{Convert.ToHexString(hash)}"; return $"sha256:{Convert.ToHexString(hash)}";
} }
} }
@@ -173,80 +162,33 @@ internal sealed record VexObservationSeed(
string VulnerabilityAlias, string VulnerabilityAlias,
ImmutableArray<string> Aliases, ImmutableArray<string> Aliases,
ImmutableArray<VexProduct> Products, ImmutableArray<VexProduct> Products,
ImmutableArray<BsonDocument> Statements, ImmutableArray<VexStatement> Statements,
ImmutableArray<VexReference> References, ImmutableArray<VexReference> References,
string ContentFormat, string ContentFormat,
string SpecVersion, string SpecVersion)
BsonDocument RawPayload)
{ {
public BsonDocument ToBsonDocument() public VexObservationDocument ToDocument()
{ {
var aliases = new BsonArray(Aliases.Select(alias => alias)); return new VexObservationDocument(
var statements = new BsonArray(Statements); Tenant,
var productsArray = new BsonArray(Products.Select(product => new BsonDocument Aliases,
{ Statements);
["purl"] = product.Purl,
["component"] = product.Component,
["namespace"] = product.Namespace,
}));
var references = new BsonArray(References.Select(reference => new BsonDocument
{
["type"] = reference.Type,
["url"] = reference.Url,
}));
var document = new BsonDocument
{
["_id"] = ObservationId,
["tenant"] = Tenant,
["source"] = new BsonDocument
{
["vendor"] = Vendor,
["stream"] = Stream,
["api"] = Api,
["collector_version"] = CollectorVersion,
},
["upstream"] = new BsonDocument
{
["upstream_id"] = UpstreamId,
["document_version"] = DocumentVersion,
["fetched_at"] = FetchedAt.UtcDateTime,
["received_at"] = ReceivedAt.UtcDateTime,
["content_hash"] = ContentHash,
["signature"] = new BsonDocument
{
["present"] = false,
["format"] = BsonNull.Value,
["key_id"] = BsonNull.Value,
["signature"] = BsonNull.Value,
},
},
["content"] = new BsonDocument
{
["format"] = ContentFormat,
["spec_version"] = SpecVersion,
["raw"] = RawPayload,
},
["identifiers"] = new BsonDocument
{
["aliases"] = aliases,
["primary"] = VulnerabilityAlias,
},
["statements"] = statements,
["linkset"] = new BsonDocument
{
["aliases"] = aliases,
["products"] = productsArray,
["references"] = references,
["reconciled_from"] = new BsonArray { "/statements" },
},
["supersedes"] = BsonNull.Value,
};
return document;
} }
} }
internal sealed record VexObservationDocument(
string Tenant,
ImmutableArray<string> Aliases,
ImmutableArray<VexStatement> Statements);
internal sealed record VexStatement(
string StatementId,
string VulnerabilityAlias,
VexProduct Product,
string Status,
string Justification,
DateTimeOffset LastUpdated);
internal sealed record VexProduct(string Purl, string Component, string Namespace); internal sealed record VexProduct(string Purl, string Component, string Namespace);
internal sealed record VexReference(string Type, string Url); internal sealed record VexReference(string Type, string Url);

View File

@@ -1,7 +1,4 @@
using System.Diagnostics; using System.Diagnostics;
using EphemeralMongo;
using MongoDB.Bson;
using MongoDB.Driver;
namespace StellaOps.Bench.LinkNotMerge.Vex; namespace StellaOps.Bench.LinkNotMerge.Vex;
@@ -29,38 +26,19 @@ internal sealed class VexScenarioRunner
var allocated = new double[iterations]; var allocated = new double[iterations];
var observationThroughputs = new double[iterations]; var observationThroughputs = new double[iterations];
var eventThroughputs = new double[iterations]; var eventThroughputs = new double[iterations];
VexAggregationResult lastAggregation = new(0, 0, 0, Array.Empty<BsonDocument>()); VexAggregationResult lastAggregation = new(0, 0, 0, Array.Empty<VexEvent>());
for (var iteration = 0; iteration < iterations; iteration++) for (var iteration = 0; iteration < iterations; iteration++)
{ {
cancellationToken.ThrowIfCancellationRequested(); cancellationToken.ThrowIfCancellationRequested();
using var runner = MongoRunner.Run(new MongoRunnerOptions
{
UseSingleNodeReplicaSet = false,
});
var client = new MongoClient(runner.ConnectionString);
var database = client.GetDatabase("linknotmerge_vex_bench");
var collection = database.GetCollection<BsonDocument>("vex_observations");
CreateIndexes(collection, cancellationToken);
var beforeAllocated = GC.GetTotalAllocatedBytes(); var beforeAllocated = GC.GetTotalAllocatedBytes();
var insertStopwatch = Stopwatch.StartNew(); var insertStopwatch = Stopwatch.StartNew();
InsertObservations(collection, _seeds, _config.ResolveBatchSize(), cancellationToken); var documents = InsertObservations(_seeds, _config.ResolveBatchSize(), cancellationToken);
insertStopwatch.Stop(); insertStopwatch.Stop();
var correlationStopwatch = Stopwatch.StartNew(); var correlationStopwatch = Stopwatch.StartNew();
var documents = collection
.Find(FilterDefinition<BsonDocument>.Empty)
.Project(Builders<BsonDocument>.Projection
.Include("tenant")
.Include("statements")
.Include("linkset"))
.ToList(cancellationToken);
var aggregator = new VexLinksetAggregator(); var aggregator = new VexLinksetAggregator();
lastAggregation = aggregator.Correlate(documents); lastAggregation = aggregator.Correlate(documents);
correlationStopwatch.Stop(); correlationStopwatch.Stop();
@@ -95,44 +73,26 @@ internal sealed class VexScenarioRunner
AggregationResult: lastAggregation); AggregationResult: lastAggregation);
} }
private static void InsertObservations( private static IReadOnlyList<VexObservationDocument> InsertObservations(
IMongoCollection<BsonDocument> collection,
IReadOnlyList<VexObservationSeed> seeds, IReadOnlyList<VexObservationSeed> seeds,
int batchSize, int batchSize,
CancellationToken cancellationToken) CancellationToken cancellationToken)
{ {
var documents = new List<VexObservationDocument>(seeds.Count);
for (var offset = 0; offset < seeds.Count; offset += batchSize) for (var offset = 0; offset < seeds.Count; offset += batchSize)
{ {
cancellationToken.ThrowIfCancellationRequested(); cancellationToken.ThrowIfCancellationRequested();
var remaining = Math.Min(batchSize, seeds.Count - offset); var remaining = Math.Min(batchSize, seeds.Count - offset);
var batch = new List<BsonDocument>(remaining); var batch = new List<VexObservationDocument>(remaining);
for (var index = 0; index < remaining; index++) for (var index = 0; index < remaining; index++)
{ {
batch.Add(seeds[offset + index].ToBsonDocument()); batch.Add(seeds[offset + index].ToDocument());
} }
collection.InsertMany(batch, new InsertManyOptions documents.AddRange(batch);
{
IsOrdered = false,
BypassDocumentValidation = true,
}, cancellationToken);
} }
}
private static void CreateIndexes(IMongoCollection<BsonDocument> collection, CancellationToken cancellationToken) return documents;
{
var indexKeys = Builders<BsonDocument>.IndexKeys
.Ascending("tenant")
.Ascending("linkset.aliases");
try
{
collection.Indexes.CreateOne(new CreateIndexModel<BsonDocument>(indexKeys), cancellationToken: cancellationToken);
}
catch
{
// non-fatal
}
} }
} }

View File

@@ -19,7 +19,6 @@
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets> <IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
<PrivateAssets>all</PrivateAssets> <PrivateAssets>all</PrivateAssets>
</PackageReference> </PackageReference>
<PackageReference Include="EphemeralMongo" Version="3.0.0" />
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>

View File

@@ -1,7 +1,4 @@
using System.Diagnostics; using System.Diagnostics;
using EphemeralMongo;
using MongoDB.Bson;
using MongoDB.Driver;
namespace StellaOps.Bench.LinkNotMerge; namespace StellaOps.Bench.LinkNotMerge;
@@ -35,30 +32,12 @@ internal sealed class LinkNotMergeScenarioRunner
{ {
cancellationToken.ThrowIfCancellationRequested(); cancellationToken.ThrowIfCancellationRequested();
using var runner = MongoRunner.Run(new MongoRunnerOptions
{
UseSingleNodeReplicaSet = false,
});
var client = new MongoClient(runner.ConnectionString);
var database = client.GetDatabase("linknotmerge_bench");
var collection = database.GetCollection<BsonDocument>("advisory_observations");
CreateIndexes(collection, cancellationToken);
var beforeAllocated = GC.GetTotalAllocatedBytes(); var beforeAllocated = GC.GetTotalAllocatedBytes();
var insertStopwatch = Stopwatch.StartNew(); var insertStopwatch = Stopwatch.StartNew();
InsertObservations(collection, _seeds, _config.ResolveBatchSize(), cancellationToken); var documents = InsertObservations(_seeds, _config.ResolveBatchSize(), cancellationToken);
insertStopwatch.Stop(); insertStopwatch.Stop();
var correlationStopwatch = Stopwatch.StartNew(); var correlationStopwatch = Stopwatch.StartNew();
var documents = collection
.Find(FilterDefinition<BsonDocument>.Empty)
.Project(Builders<BsonDocument>.Projection
.Include("tenant")
.Include("linkset"))
.ToList(cancellationToken);
var correlator = new LinksetAggregator(); var correlator = new LinksetAggregator();
lastAggregation = correlator.Correlate(documents); lastAggregation = correlator.Correlate(documents);
correlationStopwatch.Stop(); correlationStopwatch.Stop();
@@ -92,44 +71,26 @@ internal sealed class LinkNotMergeScenarioRunner
AggregationResult: lastAggregation); AggregationResult: lastAggregation);
} }
private static void InsertObservations( private static IReadOnlyList<ObservationDocument> InsertObservations(
IMongoCollection<BsonDocument> collection,
IReadOnlyList<ObservationSeed> seeds, IReadOnlyList<ObservationSeed> seeds,
int batchSize, int batchSize,
CancellationToken cancellationToken) CancellationToken cancellationToken)
{ {
var documents = new List<ObservationDocument>(seeds.Count);
for (var offset = 0; offset < seeds.Count; offset += batchSize) for (var offset = 0; offset < seeds.Count; offset += batchSize)
{ {
cancellationToken.ThrowIfCancellationRequested(); cancellationToken.ThrowIfCancellationRequested();
var remaining = Math.Min(batchSize, seeds.Count - offset); var remaining = Math.Min(batchSize, seeds.Count - offset);
var batch = new List<BsonDocument>(remaining); var batch = new List<ObservationDocument>(remaining);
for (var index = 0; index < remaining; index++) for (var index = 0; index < remaining; index++)
{ {
batch.Add(seeds[offset + index].ToBsonDocument()); batch.Add(seeds[offset + index].ToDocument());
} }
collection.InsertMany(batch, new InsertManyOptions documents.AddRange(batch);
{
IsOrdered = false,
BypassDocumentValidation = true,
}, cancellationToken);
} }
}
private static void CreateIndexes(IMongoCollection<BsonDocument> collection, CancellationToken cancellationToken) return documents;
{
var indexKeys = Builders<BsonDocument>.IndexKeys
.Ascending("tenant")
.Ascending("identifiers.aliases");
try
{
collection.Indexes.CreateOne(new CreateIndexModel<BsonDocument>(indexKeys), cancellationToken: cancellationToken);
}
catch
{
// Index creation failures should not abort the benchmark; they may occur when running multiple iterations concurrently.
}
} }
} }

View File

@@ -1,10 +1,8 @@
using MongoDB.Bson;
namespace StellaOps.Bench.LinkNotMerge; namespace StellaOps.Bench.LinkNotMerge;
internal sealed class LinksetAggregator internal sealed class LinksetAggregator
{ {
public LinksetAggregationResult Correlate(IEnumerable<BsonDocument> documents) public LinksetAggregationResult Correlate(IEnumerable<ObservationDocument> documents)
{ {
ArgumentNullException.ThrowIfNull(documents); ArgumentNullException.ThrowIfNull(documents);
@@ -15,21 +13,16 @@ internal sealed class LinksetAggregator
{ {
totalObservations++; totalObservations++;
var tenant = document.GetValue("tenant", "unknown").AsString; var tenant = document.Tenant;
var linkset = document.GetValue("linkset", new BsonDocument()).AsBsonDocument; var linkset = document.Linkset;
var aliases = linkset.GetValue("aliases", new BsonArray()).AsBsonArray; var aliases = linkset.Aliases;
var purls = linkset.GetValue("purls", new BsonArray()).AsBsonArray; var purls = linkset.Purls;
var cpes = linkset.GetValue("cpes", new BsonArray()).AsBsonArray; var cpes = linkset.Cpes;
var references = linkset.GetValue("references", new BsonArray()).AsBsonArray; var references = linkset.References;
foreach (var aliasValue in aliases) foreach (var aliasValue in aliases)
{ {
if (!aliasValue.IsString) var alias = aliasValue;
{
continue;
}
var alias = aliasValue.AsString;
var key = string.Create(alias.Length + tenant.Length + 1, (tenant, alias), static (span, data) => var key = string.Create(alias.Length + tenant.Length + 1, (tenant, alias), static (span, data) =>
{ {
var (tenantValue, aliasValue) = data; var (tenantValue, aliasValue) = data;
@@ -91,42 +84,30 @@ internal sealed class LinksetAggregator
public int ReferenceCount => _references.Count; public int ReferenceCount => _references.Count;
public void AddPurls(BsonArray array) public void AddPurls(IEnumerable<string> array)
{ {
foreach (var item in array) foreach (var item in array)
{ {
if (item.IsString) if (!string.IsNullOrEmpty(item))
{ _purls.Add(item);
_purls.Add(item.AsString);
}
} }
} }
public void AddCpes(BsonArray array) public void AddCpes(IEnumerable<string> array)
{ {
foreach (var item in array) foreach (var item in array)
{ {
if (item.IsString) if (!string.IsNullOrEmpty(item))
{ _cpes.Add(item);
_cpes.Add(item.AsString);
}
} }
} }
public void AddReferences(BsonArray array) public void AddReferences(IEnumerable<ObservationReference> array)
{ {
foreach (var item in array) foreach (var item in array)
{ {
if (!item.IsBsonDocument) if (!string.IsNullOrEmpty(item.Url))
{ _references.Add(item.Url);
continue;
}
var document = item.AsBsonDocument;
if (document.TryGetValue("url", out var urlValue) && urlValue.IsString)
{
_references.Add(urlValue.AsString);
}
} }
} }
} }

View File

@@ -1,6 +1,6 @@
using System.Collections.Immutable; using System.Collections.Immutable;
using System.Security.Cryptography; using System.Security.Cryptography;
using MongoDB.Bson; using System.Text;
namespace StellaOps.Bench.LinkNotMerge; namespace StellaOps.Bench.LinkNotMerge;
@@ -43,8 +43,7 @@ internal static class ObservationGenerator
var cpes = CreateCpes(group, revision, cpesPerObservation); var cpes = CreateCpes(group, revision, cpesPerObservation);
var references = CreateReferences(primaryAlias, referencesPerObservation); var references = CreateReferences(primaryAlias, referencesPerObservation);
var rawPayload = CreateRawPayload(primaryAlias, vendorAlias, purls, cpes, references); var contentHash = ComputeContentHash(primaryAlias, vendorAlias, purls, cpes, references, tenant, group, revision);
var contentHash = ComputeContentHash(rawPayload, tenant, group, revision);
seeds[index] = new ObservationSeed( seeds[index] = new ObservationSeed(
ObservationId: observationId, ObservationId: observationId,
@@ -63,8 +62,7 @@ internal static class ObservationGenerator
Cpes: cpes, Cpes: cpes,
References: references, References: references,
ContentFormat: "CSAF", ContentFormat: "CSAF",
SpecVersion: "2.0", SpecVersion: "2.0");
RawPayload: rawPayload);
} }
return seeds; return seeds;
@@ -123,62 +121,37 @@ internal static class ObservationGenerator
return builder.MoveToImmutable(); return builder.MoveToImmutable();
} }
private static BsonDocument CreateRawPayload( private static string ComputeContentHash(
string primaryAlias, string primaryAlias,
string vendorAlias, string vendorAlias,
IReadOnlyCollection<string> purls, IReadOnlyCollection<string> purls,
IReadOnlyCollection<string> cpes, IReadOnlyCollection<string> cpes,
IReadOnlyCollection<ObservationReference> references) IReadOnlyCollection<ObservationReference> references,
{ string tenant,
var document = new BsonDocument int group,
{ int revision)
["id"] = primaryAlias,
["vendorId"] = vendorAlias,
["title"] = $"Simulated advisory {primaryAlias}",
["summary"] = "Synthetic payload produced by Link-Not-Merge benchmark.",
["metrics"] = new BsonArray
{
new BsonDocument
{
["kind"] = "cvss:v3.1",
["score"] = 7.5,
["vector"] = "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N",
},
},
};
if (purls.Count > 0)
{
document["purls"] = new BsonArray(purls);
}
if (cpes.Count > 0)
{
document["cpes"] = new BsonArray(cpes);
}
if (references.Count > 0)
{
document["references"] = new BsonArray(references.Select(reference => new BsonDocument
{
["type"] = reference.Type,
["url"] = reference.Url,
}));
}
return document;
}
private static string ComputeContentHash(BsonDocument rawPayload, string tenant, int group, int revision)
{ {
using var sha256 = SHA256.Create(); using var sha256 = SHA256.Create();
var seed = $"{tenant}|{group}|{revision}"; var builder = new StringBuilder();
var rawBytes = rawPayload.ToBson(); builder.Append(tenant).Append('|').Append(group).Append('|').Append(revision).Append('|');
var seedBytes = System.Text.Encoding.UTF8.GetBytes(seed); builder.Append(primaryAlias).Append('|').Append(vendorAlias).Append('|');
var combined = new byte[rawBytes.Length + seedBytes.Length]; foreach (var purl in purls)
Buffer.BlockCopy(rawBytes, 0, combined, 0, rawBytes.Length); {
Buffer.BlockCopy(seedBytes, 0, combined, rawBytes.Length, seedBytes.Length); builder.Append(purl).Append('|');
var hash = sha256.ComputeHash(combined); }
foreach (var cpe in cpes)
{
builder.Append(cpe).Append('|');
}
foreach (var reference in references)
{
builder.Append(reference.Type).Append(':').Append(reference.Url).Append('|');
}
var data = Encoding.UTF8.GetBytes(builder.ToString());
var hash = sha256.ComputeHash(data);
return $"sha256:{Convert.ToHexString(hash)}"; return $"sha256:{Convert.ToHexString(hash)}";
} }
} }
@@ -200,71 +173,26 @@ internal sealed record ObservationSeed(
ImmutableArray<string> Cpes, ImmutableArray<string> Cpes,
ImmutableArray<ObservationReference> References, ImmutableArray<ObservationReference> References,
string ContentFormat, string ContentFormat,
string SpecVersion, string SpecVersion)
BsonDocument RawPayload)
{ {
public BsonDocument ToBsonDocument() public ObservationDocument ToDocument()
{ {
var aliases = new BsonArray(Aliases.Select(alias => alias)); return new ObservationDocument(
var purls = new BsonArray(Purls.Select(purl => purl)); Tenant,
var cpes = new BsonArray(Cpes.Select(cpe => cpe)); new LinksetDocument(
var references = new BsonArray(References.Select(reference => new BsonDocument Aliases,
{ Purls,
["type"] = reference.Type, Cpes,
["url"] = reference.Url, References));
}));
var document = new BsonDocument
{
["_id"] = ObservationId,
["tenant"] = Tenant,
["source"] = new BsonDocument
{
["vendor"] = Vendor,
["stream"] = Stream,
["api"] = Api,
["collector_version"] = CollectorVersion,
},
["upstream"] = new BsonDocument
{
["upstream_id"] = UpstreamId,
["document_version"] = DocumentVersion,
["fetched_at"] = FetchedAt.UtcDateTime,
["received_at"] = ReceivedAt.UtcDateTime,
["content_hash"] = ContentHash,
["signature"] = new BsonDocument
{
["present"] = false,
["format"] = BsonNull.Value,
["key_id"] = BsonNull.Value,
["signature"] = BsonNull.Value,
},
},
["content"] = new BsonDocument
{
["format"] = ContentFormat,
["spec_version"] = SpecVersion,
["raw"] = RawPayload,
},
["identifiers"] = new BsonDocument
{
["aliases"] = aliases,
["primary"] = UpstreamId,
["cve"] = Aliases.FirstOrDefault(alias => alias.StartsWith("CVE-", StringComparison.Ordinal)) ?? UpstreamId,
},
["linkset"] = new BsonDocument
{
["aliases"] = aliases,
["purls"] = purls,
["cpes"] = cpes,
["references"] = references,
["reconciled_from"] = new BsonArray { "/content/product_tree" },
},
["supersedes"] = BsonNull.Value,
};
return document;
} }
} }
internal sealed record ObservationDocument(string Tenant, LinksetDocument Linkset);
internal sealed record LinksetDocument(
ImmutableArray<string> Aliases,
ImmutableArray<string> Purls,
ImmutableArray<string> Cpes,
ImmutableArray<ObservationReference> References);
internal sealed record ObservationReference(string Type, string Url); internal sealed record ObservationReference(string Type, string Url);

View File

@@ -9,8 +9,6 @@
</PropertyGroup> </PropertyGroup>
<ItemGroup> <ItemGroup>
<PackageReference Include="MongoDB.Driver" Version="3.5.0" />
<PackageReference Include="EphemeralMongo" Version="3.0.0" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" /> <PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" />
</ItemGroup> </ItemGroup>
</Project> </Project>

View File

@@ -1,35 +0,0 @@
using StellaOps.IssuerDirectory.Core.Abstractions;
using StellaOps.IssuerDirectory.Core.Domain;
using StellaOps.IssuerDirectory.Infrastructure.Documents;
using StellaOps.IssuerDirectory.Infrastructure.Internal;
namespace StellaOps.IssuerDirectory.Infrastructure.Audit;
public sealed class MongoIssuerAuditSink : IIssuerAuditSink
{
private readonly IssuerDirectoryMongoContext _context;
public MongoIssuerAuditSink(IssuerDirectoryMongoContext context)
{
_context = context ?? throw new ArgumentNullException(nameof(context));
}
public async Task WriteAsync(IssuerAuditEntry entry, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(entry);
var document = new IssuerAuditDocument
{
Id = Guid.NewGuid().ToString("N"),
TenantId = entry.TenantId,
IssuerId = entry.IssuerId,
Action = entry.Action,
TimestampUtc = entry.TimestampUtc,
Actor = entry.Actor,
Reason = entry.Reason,
Metadata = new Dictionary<string, string>(entry.Metadata, StringComparer.OrdinalIgnoreCase)
};
await _context.Audits.InsertOneAsync(document, cancellationToken: cancellationToken).ConfigureAwait(false);
}
}

View File

@@ -1,31 +0,0 @@
using MongoDB.Bson.Serialization.Attributes;
namespace StellaOps.IssuerDirectory.Infrastructure.Documents;
[BsonIgnoreExtraElements]
public sealed class IssuerAuditDocument
{
[BsonId]
public string Id { get; set; } = Guid.NewGuid().ToString("N");
[BsonElement("tenant_id")]
public string TenantId { get; set; } = string.Empty;
[BsonElement("issuer_id")]
public string IssuerId { get; set; } = string.Empty;
[BsonElement("action")]
public string Action { get; set; } = string.Empty;
[BsonElement("timestamp")]
public DateTimeOffset TimestampUtc { get; set; }
[BsonElement("actor")]
public string Actor { get; set; } = string.Empty;
[BsonElement("reason")]
public string? Reason { get; set; }
[BsonElement("metadata")]
public Dictionary<string, string> Metadata { get; set; } = new(StringComparer.OrdinalIgnoreCase);
}

View File

@@ -1,103 +0,0 @@
using MongoDB.Bson.Serialization.Attributes;
namespace StellaOps.IssuerDirectory.Infrastructure.Documents;
[BsonIgnoreExtraElements]
public sealed class IssuerDocument
{
[BsonId]
public string Id { get; set; } = string.Empty;
[BsonElement("tenant_id")]
public string TenantId { get; set; } = string.Empty;
[BsonElement("display_name")]
public string DisplayName { get; set; } = string.Empty;
[BsonElement("slug")]
public string Slug { get; set; } = string.Empty;
[BsonElement("description")]
public string? Description { get; set; }
[BsonElement("contact")]
public IssuerContactDocument Contact { get; set; } = new();
[BsonElement("metadata")]
public IssuerMetadataDocument Metadata { get; set; } = new();
[BsonElement("endpoints")]
public List<IssuerEndpointDocument> Endpoints { get; set; } = new();
[BsonElement("tags")]
public List<string> Tags { get; set; } = new();
[BsonElement("created_at")]
public DateTimeOffset CreatedAtUtc { get; set; }
[BsonElement("created_by")]
public string CreatedBy { get; set; } = string.Empty;
[BsonElement("updated_at")]
public DateTimeOffset UpdatedAtUtc { get; set; }
[BsonElement("updated_by")]
public string UpdatedBy { get; set; } = string.Empty;
[BsonElement("is_seed")]
public bool IsSystemSeed { get; set; }
}
[BsonIgnoreExtraElements]
public sealed class IssuerContactDocument
{
[BsonElement("email")]
public string? Email { get; set; }
[BsonElement("phone")]
public string? Phone { get; set; }
[BsonElement("website")]
public string? Website { get; set; }
[BsonElement("timezone")]
public string? Timezone { get; set; }
}
[BsonIgnoreExtraElements]
public sealed class IssuerMetadataDocument
{
[BsonElement("cve_org_id")]
public string? CveOrgId { get; set; }
[BsonElement("csaf_publisher_id")]
public string? CsafPublisherId { get; set; }
[BsonElement("security_advisories_url")]
public string? SecurityAdvisoriesUrl { get; set; }
[BsonElement("catalog_url")]
public string? CatalogUrl { get; set; }
[BsonElement("languages")]
public List<string> Languages { get; set; } = new();
[BsonElement("attributes")]
public Dictionary<string, string> Attributes { get; set; } = new(StringComparer.OrdinalIgnoreCase);
}
[BsonIgnoreExtraElements]
public sealed class IssuerEndpointDocument
{
[BsonElement("kind")]
public string Kind { get; set; } = string.Empty;
[BsonElement("url")]
public string Url { get; set; } = string.Empty;
[BsonElement("format")]
public string? Format { get; set; }
[BsonElement("requires_auth")]
public bool RequiresAuthentication { get; set; }
}

View File

@@ -1,55 +0,0 @@
using MongoDB.Bson.Serialization.Attributes;
namespace StellaOps.IssuerDirectory.Infrastructure.Documents;
[BsonIgnoreExtraElements]
public sealed class IssuerKeyDocument
{
[BsonId]
public string Id { get; set; } = string.Empty;
[BsonElement("issuer_id")]
public string IssuerId { get; set; } = string.Empty;
[BsonElement("tenant_id")]
public string TenantId { get; set; } = string.Empty;
[BsonElement("type")]
public string Type { get; set; } = string.Empty;
[BsonElement("status")]
public string Status { get; set; } = string.Empty;
[BsonElement("material_format")]
public string MaterialFormat { get; set; } = string.Empty;
[BsonElement("material_value")]
public string MaterialValue { get; set; } = string.Empty;
[BsonElement("fingerprint")]
public string Fingerprint { get; set; } = string.Empty;
[BsonElement("created_at")]
public DateTimeOffset CreatedAtUtc { get; set; }
[BsonElement("created_by")]
public string CreatedBy { get; set; } = string.Empty;
[BsonElement("updated_at")]
public DateTimeOffset UpdatedAtUtc { get; set; }
[BsonElement("updated_by")]
public string UpdatedBy { get; set; } = string.Empty;
[BsonElement("expires_at")]
public DateTimeOffset? ExpiresAtUtc { get; set; }
[BsonElement("retired_at")]
public DateTimeOffset? RetiredAtUtc { get; set; }
[BsonElement("revoked_at")]
public DateTimeOffset? RevokedAtUtc { get; set; }
[BsonElement("replaces_key_id")]
public string? ReplacesKeyId { get; set; }
}

View File

@@ -1,34 +0,0 @@
using MongoDB.Bson.Serialization.Attributes;
namespace StellaOps.IssuerDirectory.Infrastructure.Documents;
[BsonIgnoreExtraElements]
public sealed class IssuerTrustDocument
{
[BsonId]
public string Id { get; set; } = string.Empty;
[BsonElement("issuer_id")]
public string IssuerId { get; set; } = string.Empty;
[BsonElement("tenant_id")]
public string TenantId { get; set; } = string.Empty;
[BsonElement("weight")]
public decimal Weight { get; set; }
[BsonElement("reason")]
public string? Reason { get; set; }
[BsonElement("created_at")]
public DateTimeOffset CreatedAtUtc { get; set; }
[BsonElement("created_by")]
public string CreatedBy { get; set; } = string.Empty;
[BsonElement("updated_at")]
public DateTimeOffset UpdatedAtUtc { get; set; }
[BsonElement("updated_by")]
public string UpdatedBy { get; set; } = string.Empty;
}

View File

@@ -0,0 +1,27 @@
using System.Collections.Concurrent;
using StellaOps.IssuerDirectory.Core.Abstractions;
using StellaOps.IssuerDirectory.Core.Domain;
namespace StellaOps.IssuerDirectory.Infrastructure.InMemory;
/// <summary>
/// In-memory audit sink; retains last N entries for inspection/testing.
/// </summary>
internal sealed class InMemoryIssuerAuditSink : IIssuerAuditSink
{
private readonly ConcurrentQueue<IssuerAuditEntry> _entries = new();
private const int MaxEntries = 1024;
public Task WriteAsync(IssuerAuditEntry entry, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(entry);
_entries.Enqueue(entry);
while (_entries.Count > MaxEntries && _entries.TryDequeue(out _))
{
// drop oldest to bound memory
}
return Task.CompletedTask;
}
}

View File

@@ -0,0 +1,88 @@
using System.Collections.Concurrent;
using StellaOps.IssuerDirectory.Core.Abstractions;
using StellaOps.IssuerDirectory.Core.Domain;
namespace StellaOps.IssuerDirectory.Infrastructure.InMemory;
/// <summary>
/// Deterministic in-memory issuer key store used as a Mongo replacement.
/// </summary>
internal sealed class InMemoryIssuerKeyRepository : IIssuerKeyRepository
{
private readonly ConcurrentDictionary<string, ConcurrentDictionary<string, IssuerKeyRecord>> _keys = new(StringComparer.Ordinal);
public Task<IssuerKeyRecord?> GetAsync(string tenantId, string issuerId, string keyId, CancellationToken cancellationToken)
{
ArgumentException.ThrowIfNullOrWhiteSpace(tenantId);
ArgumentException.ThrowIfNullOrWhiteSpace(issuerId);
ArgumentException.ThrowIfNullOrWhiteSpace(keyId);
var bucketKey = GetBucketKey(tenantId, issuerId);
if (_keys.TryGetValue(bucketKey, out var map) && map.TryGetValue(keyId, out var record))
{
return Task.FromResult<IssuerKeyRecord?>(record);
}
return Task.FromResult<IssuerKeyRecord?>(null);
}
public Task<IssuerKeyRecord?> GetByFingerprintAsync(string tenantId, string issuerId, string fingerprint, CancellationToken cancellationToken)
{
ArgumentException.ThrowIfNullOrWhiteSpace(tenantId);
ArgumentException.ThrowIfNullOrWhiteSpace(issuerId);
ArgumentException.ThrowIfNullOrWhiteSpace(fingerprint);
var bucketKey = GetBucketKey(tenantId, issuerId);
if (_keys.TryGetValue(bucketKey, out var map))
{
var match = map.Values.FirstOrDefault(key => string.Equals(key.Fingerprint, fingerprint, StringComparison.Ordinal));
return Task.FromResult<IssuerKeyRecord?>(match);
}
return Task.FromResult<IssuerKeyRecord?>(null);
}
public Task<IReadOnlyCollection<IssuerKeyRecord>> ListAsync(string tenantId, string issuerId, CancellationToken cancellationToken)
{
ArgumentException.ThrowIfNullOrWhiteSpace(tenantId);
ArgumentException.ThrowIfNullOrWhiteSpace(issuerId);
var bucketKey = GetBucketKey(tenantId, issuerId);
if (_keys.TryGetValue(bucketKey, out var map))
{
var ordered = map.Values.OrderBy(k => k.Id, StringComparer.Ordinal).ToArray();
return Task.FromResult<IReadOnlyCollection<IssuerKeyRecord>>(ordered);
}
return Task.FromResult<IReadOnlyCollection<IssuerKeyRecord>>(Array.Empty<IssuerKeyRecord>());
}
public Task<IReadOnlyCollection<IssuerKeyRecord>> ListGlobalAsync(string issuerId, CancellationToken cancellationToken)
{
ArgumentException.ThrowIfNullOrWhiteSpace(issuerId);
var all = _keys.Values
.SelectMany(dict => dict.Values)
.Where(k => string.Equals(k.IssuerId, issuerId, StringComparison.Ordinal))
.OrderBy(k => k.TenantId, StringComparer.Ordinal)
.ThenBy(k => k.Id, StringComparer.Ordinal)
.ToArray();
return Task.FromResult<IReadOnlyCollection<IssuerKeyRecord>>(all);
}
public Task UpsertAsync(IssuerKeyRecord record, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(record);
var bucketKey = GetBucketKey(record.TenantId, record.IssuerId);
var map = _keys.GetOrAdd(bucketKey, _ => new ConcurrentDictionary<string, IssuerKeyRecord>(StringComparer.Ordinal));
map.AddOrUpdate(record.Id, record, (_, _) => record);
return Task.CompletedTask;
}
private static string GetBucketKey(string tenantId, string issuerId)
{
return $"{tenantId}|{issuerId}";
}
}

View File

@@ -0,0 +1,72 @@
using System.Collections.Concurrent;
using StellaOps.IssuerDirectory.Core.Abstractions;
using StellaOps.IssuerDirectory.Core.Domain;
namespace StellaOps.IssuerDirectory.Infrastructure.InMemory;
/// <summary>
/// Deterministic in-memory issuer store used as a Mongo replacement.
/// </summary>
internal sealed class InMemoryIssuerRepository : IIssuerRepository
{
private readonly ConcurrentDictionary<string, ConcurrentDictionary<string, IssuerRecord>> _issuers = new(StringComparer.Ordinal);
public Task<IssuerRecord?> GetAsync(string tenantId, string issuerId, CancellationToken cancellationToken)
{
ArgumentException.ThrowIfNullOrWhiteSpace(tenantId);
ArgumentException.ThrowIfNullOrWhiteSpace(issuerId);
if (_issuers.TryGetValue(tenantId, out var map) && map.TryGetValue(issuerId, out var record))
{
return Task.FromResult<IssuerRecord?>(record);
}
return Task.FromResult<IssuerRecord?>(null);
}
public Task<IReadOnlyCollection<IssuerRecord>> ListAsync(string tenantId, CancellationToken cancellationToken)
{
ArgumentException.ThrowIfNullOrWhiteSpace(tenantId);
if (_issuers.TryGetValue(tenantId, out var map))
{
var ordered = map.Values.OrderBy(r => r.Id, StringComparer.Ordinal).ToArray();
return Task.FromResult<IReadOnlyCollection<IssuerRecord>>(ordered);
}
return Task.FromResult<IReadOnlyCollection<IssuerRecord>>(Array.Empty<IssuerRecord>());
}
public Task<IReadOnlyCollection<IssuerRecord>> ListGlobalAsync(CancellationToken cancellationToken)
{
var ordered = _issuers.Values
.SelectMany(dict => dict.Values)
.OrderBy(r => r.TenantId, StringComparer.Ordinal)
.ThenBy(r => r.Id, StringComparer.Ordinal)
.ToArray();
return Task.FromResult<IReadOnlyCollection<IssuerRecord>>(ordered);
}
public Task UpsertAsync(IssuerRecord record, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(record);
var tenantMap = _issuers.GetOrAdd(record.TenantId, _ => new ConcurrentDictionary<string, IssuerRecord>(StringComparer.Ordinal));
tenantMap.AddOrUpdate(record.Id, record, (_, _) => record);
return Task.CompletedTask;
}
public Task DeleteAsync(string tenantId, string issuerId, CancellationToken cancellationToken)
{
ArgumentException.ThrowIfNullOrWhiteSpace(tenantId);
ArgumentException.ThrowIfNullOrWhiteSpace(issuerId);
if (_issuers.TryGetValue(tenantId, out var map))
{
map.TryRemove(issuerId, out _);
}
return Task.CompletedTask;
}
}

View File

@@ -0,0 +1,42 @@
using System.Collections.Concurrent;
using StellaOps.IssuerDirectory.Core.Abstractions;
using StellaOps.IssuerDirectory.Core.Domain;
namespace StellaOps.IssuerDirectory.Infrastructure.InMemory;
/// <summary>
/// Deterministic in-memory trust override store used as a Mongo replacement.
/// </summary>
internal sealed class InMemoryIssuerTrustRepository : IIssuerTrustRepository
{
private readonly ConcurrentDictionary<string, IssuerTrustOverrideRecord> _trust = new(StringComparer.Ordinal);
public Task<IssuerTrustOverrideRecord?> GetAsync(string tenantId, string issuerId, CancellationToken cancellationToken)
{
ArgumentException.ThrowIfNullOrWhiteSpace(tenantId);
ArgumentException.ThrowIfNullOrWhiteSpace(issuerId);
var key = GetKey(tenantId, issuerId);
return Task.FromResult(_trust.TryGetValue(key, out var record) ? record : null);
}
public Task UpsertAsync(IssuerTrustOverrideRecord record, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(record);
var key = GetKey(record.TenantId, record.IssuerId);
_trust[key] = record;
return Task.CompletedTask;
}
public Task DeleteAsync(string tenantId, string issuerId, CancellationToken cancellationToken)
{
ArgumentException.ThrowIfNullOrWhiteSpace(tenantId);
ArgumentException.ThrowIfNullOrWhiteSpace(issuerId);
var key = GetKey(tenantId, issuerId);
_trust.TryRemove(key, out _);
return Task.CompletedTask;
}
private static string GetKey(string tenantId, string issuerId) => $"{tenantId}|{issuerId}";
}

View File

@@ -1,103 +0,0 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using MongoDB.Driver;
using StellaOps.IssuerDirectory.Infrastructure.Documents;
using StellaOps.IssuerDirectory.Infrastructure.Options;
namespace StellaOps.IssuerDirectory.Infrastructure.Internal;
/// <summary>
/// MongoDB context for Issuer Directory persistence.
/// </summary>
public sealed class IssuerDirectoryMongoContext
{
public IssuerDirectoryMongoContext(
IOptions<IssuerDirectoryMongoOptions> options,
ILogger<IssuerDirectoryMongoContext> logger)
{
ArgumentNullException.ThrowIfNull(options);
ArgumentNullException.ThrowIfNull(logger);
var value = options.Value ?? throw new InvalidOperationException("Mongo options must be provided.");
value.Validate();
var mongoUrl = new MongoUrl(value.ConnectionString);
var settings = MongoClientSettings.FromUrl(mongoUrl);
if (mongoUrl.UseTls is true && settings.SslSettings is not null)
{
settings.SslSettings.CheckCertificateRevocation = true;
}
var client = new MongoClient(settings);
var database = client.GetDatabase(value.Database);
logger.LogDebug("IssuerDirectory Mongo connected to {Database}", value.Database);
Issuers = database.GetCollection<IssuerDocument>(value.IssuersCollection);
IssuerKeys = database.GetCollection<IssuerKeyDocument>(value.IssuerKeysCollection);
IssuerTrustOverrides = database.GetCollection<IssuerTrustDocument>(value.IssuerTrustCollection);
Audits = database.GetCollection<IssuerAuditDocument>(value.AuditCollection);
EnsureIndexes().GetAwaiter().GetResult();
}
public IMongoCollection<IssuerDocument> Issuers { get; }
public IMongoCollection<IssuerKeyDocument> IssuerKeys { get; }
public IMongoCollection<IssuerTrustDocument> IssuerTrustOverrides { get; }
public IMongoCollection<IssuerAuditDocument> Audits { get; }
private async Task EnsureIndexes()
{
var tenantSlugIndex = new CreateIndexModel<IssuerDocument>(
Builders<IssuerDocument>.IndexKeys
.Ascending(document => document.TenantId)
.Ascending(document => document.Slug),
new CreateIndexOptions<IssuerDocument>
{
Name = "tenant_slug_unique",
Unique = true
});
await Issuers.Indexes.CreateOneAsync(tenantSlugIndex).ConfigureAwait(false);
var keyIndex = new CreateIndexModel<IssuerKeyDocument>(
Builders<IssuerKeyDocument>.IndexKeys
.Ascending(document => document.TenantId)
.Ascending(document => document.IssuerId)
.Ascending(document => document.Id),
new CreateIndexOptions<IssuerKeyDocument>
{
Name = "issuer_keys_unique",
Unique = true
});
var fingerprintIndex = new CreateIndexModel<IssuerKeyDocument>(
Builders<IssuerKeyDocument>.IndexKeys
.Ascending(document => document.TenantId)
.Ascending(document => document.IssuerId)
.Ascending(document => document.Fingerprint),
new CreateIndexOptions<IssuerKeyDocument>
{
Name = "issuer_keys_fingerprint",
Unique = true
});
await IssuerKeys.Indexes.CreateOneAsync(keyIndex).ConfigureAwait(false);
await IssuerKeys.Indexes.CreateOneAsync(fingerprintIndex).ConfigureAwait(false);
var trustIndex = new CreateIndexModel<IssuerTrustDocument>(
Builders<IssuerTrustDocument>.IndexKeys
.Ascending(document => document.TenantId)
.Ascending(document => document.IssuerId),
new CreateIndexOptions<IssuerTrustDocument>
{
Name = "issuer_trust_unique",
Unique = true
});
await IssuerTrustOverrides.Indexes.CreateOneAsync(trustIndex).ConfigureAwait(false);
}
}

View File

@@ -1,54 +0,0 @@
namespace StellaOps.IssuerDirectory.Infrastructure.Options;
/// <summary>
/// Mongo persistence configuration for the Issuer Directory service.
/// </summary>
public sealed class IssuerDirectoryMongoOptions
{
public const string SectionName = "IssuerDirectory:Mongo";
public string ConnectionString { get; set; } = "mongodb://localhost:27017";
public string Database { get; set; } = "issuer-directory";
public string IssuersCollection { get; set; } = "issuers";
public string IssuerKeysCollection { get; set; } = "issuer_keys";
public string IssuerTrustCollection { get; set; } = "issuer_trust_overrides";
public string AuditCollection { get; set; } = "issuer_audit";
public void Validate()
{
if (string.IsNullOrWhiteSpace(ConnectionString))
{
throw new InvalidOperationException("IssuerDirectory Mongo connection string must be configured.");
}
if (string.IsNullOrWhiteSpace(Database))
{
throw new InvalidOperationException("IssuerDirectory Mongo database must be configured.");
}
if (string.IsNullOrWhiteSpace(IssuersCollection))
{
throw new InvalidOperationException("IssuerDirectory Mongo issuers collection must be configured.");
}
if (string.IsNullOrWhiteSpace(IssuerKeysCollection))
{
throw new InvalidOperationException("IssuerDirectory Mongo issuer keys collection must be configured.");
}
if (string.IsNullOrWhiteSpace(IssuerTrustCollection))
{
throw new InvalidOperationException("IssuerDirectory Mongo issuer trust collection must be configured.");
}
if (string.IsNullOrWhiteSpace(AuditCollection))
{
throw new InvalidOperationException("IssuerDirectory Mongo audit collection must be configured.");
}
}
}

View File

@@ -1,131 +0,0 @@
using MongoDB.Driver;
using StellaOps.IssuerDirectory.Core.Abstractions;
using StellaOps.IssuerDirectory.Core.Domain;
using StellaOps.IssuerDirectory.Infrastructure.Documents;
using StellaOps.IssuerDirectory.Infrastructure.Internal;
namespace StellaOps.IssuerDirectory.Infrastructure.Repositories;
public sealed class MongoIssuerKeyRepository : IIssuerKeyRepository
{
private readonly IssuerDirectoryMongoContext _context;
public MongoIssuerKeyRepository(IssuerDirectoryMongoContext context)
{
_context = context ?? throw new ArgumentNullException(nameof(context));
}
public async Task<IssuerKeyRecord?> GetAsync(string tenantId, string issuerId, string keyId, CancellationToken cancellationToken)
{
var filter = Builders<IssuerKeyDocument>.Filter.And(
Builders<IssuerKeyDocument>.Filter.Eq(doc => doc.TenantId, tenantId),
Builders<IssuerKeyDocument>.Filter.Eq(doc => doc.IssuerId, issuerId),
Builders<IssuerKeyDocument>.Filter.Eq(doc => doc.Id, keyId));
var document = await _context.IssuerKeys.Find(filter).FirstOrDefaultAsync(cancellationToken).ConfigureAwait(false);
return document is null ? null : MapToDomain(document);
}
public async Task<IssuerKeyRecord?> GetByFingerprintAsync(string tenantId, string issuerId, string fingerprint, CancellationToken cancellationToken)
{
var filter = Builders<IssuerKeyDocument>.Filter.And(
Builders<IssuerKeyDocument>.Filter.Eq(doc => doc.TenantId, tenantId),
Builders<IssuerKeyDocument>.Filter.Eq(doc => doc.IssuerId, issuerId),
Builders<IssuerKeyDocument>.Filter.Eq(doc => doc.Fingerprint, fingerprint));
var document = await _context.IssuerKeys.Find(filter).FirstOrDefaultAsync(cancellationToken).ConfigureAwait(false);
return document is null ? null : MapToDomain(document);
}
public async Task<IReadOnlyCollection<IssuerKeyRecord>> ListAsync(string tenantId, string issuerId, CancellationToken cancellationToken)
{
var filter = Builders<IssuerKeyDocument>.Filter.And(
Builders<IssuerKeyDocument>.Filter.Eq(doc => doc.TenantId, tenantId),
Builders<IssuerKeyDocument>.Filter.Eq(doc => doc.IssuerId, issuerId));
var documents = await _context.IssuerKeys
.Find(filter)
.SortBy(document => document.CreatedAtUtc)
.ToListAsync(cancellationToken)
.ConfigureAwait(false);
return documents.Select(MapToDomain).ToArray();
}
public async Task<IReadOnlyCollection<IssuerKeyRecord>> ListGlobalAsync(string issuerId, CancellationToken cancellationToken)
{
var filter = Builders<IssuerKeyDocument>.Filter.And(
Builders<IssuerKeyDocument>.Filter.Eq(doc => doc.TenantId, IssuerTenants.Global),
Builders<IssuerKeyDocument>.Filter.Eq(doc => doc.IssuerId, issuerId));
var documents = await _context.IssuerKeys
.Find(filter)
.SortBy(document => document.CreatedAtUtc)
.ToListAsync(cancellationToken)
.ConfigureAwait(false);
return documents.Select(MapToDomain).ToArray();
}
public async Task UpsertAsync(IssuerKeyRecord record, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(record);
var document = MapToDocument(record);
var filter = Builders<IssuerKeyDocument>.Filter.And(
Builders<IssuerKeyDocument>.Filter.Eq(doc => doc.TenantId, record.TenantId),
Builders<IssuerKeyDocument>.Filter.Eq(doc => doc.IssuerId, record.IssuerId),
Builders<IssuerKeyDocument>.Filter.Eq(doc => doc.Id, record.Id));
await _context.IssuerKeys.ReplaceOneAsync(
filter,
document,
new ReplaceOptions { IsUpsert = true },
cancellationToken).ConfigureAwait(false);
}
private static IssuerKeyRecord MapToDomain(IssuerKeyDocument document)
{
return new IssuerKeyRecord
{
Id = document.Id,
IssuerId = document.IssuerId,
TenantId = document.TenantId,
Type = Enum.Parse<IssuerKeyType>(document.Type, ignoreCase: true),
Status = Enum.Parse<IssuerKeyStatus>(document.Status, ignoreCase: true),
Material = new IssuerKeyMaterial(document.MaterialFormat, document.MaterialValue),
Fingerprint = document.Fingerprint,
CreatedAtUtc = document.CreatedAtUtc,
CreatedBy = document.CreatedBy,
UpdatedAtUtc = document.UpdatedAtUtc,
UpdatedBy = document.UpdatedBy,
ExpiresAtUtc = document.ExpiresAtUtc,
RetiredAtUtc = document.RetiredAtUtc,
RevokedAtUtc = document.RevokedAtUtc,
ReplacesKeyId = document.ReplacesKeyId
};
}
private static IssuerKeyDocument MapToDocument(IssuerKeyRecord record)
{
return new IssuerKeyDocument
{
Id = record.Id,
IssuerId = record.IssuerId,
TenantId = record.TenantId,
Type = record.Type.ToString(),
Status = record.Status.ToString(),
MaterialFormat = record.Material.Format,
MaterialValue = record.Material.Value,
Fingerprint = record.Fingerprint,
CreatedAtUtc = record.CreatedAtUtc,
CreatedBy = record.CreatedBy,
UpdatedAtUtc = record.UpdatedAtUtc,
UpdatedBy = record.UpdatedBy,
ExpiresAtUtc = record.ExpiresAtUtc,
RetiredAtUtc = record.RetiredAtUtc,
RevokedAtUtc = record.RevokedAtUtc,
ReplacesKeyId = record.ReplacesKeyId
};
}
}

View File

@@ -1,177 +0,0 @@
using MongoDB.Driver;
using StellaOps.IssuerDirectory.Core.Abstractions;
using StellaOps.IssuerDirectory.Core.Domain;
using StellaOps.IssuerDirectory.Infrastructure.Documents;
using StellaOps.IssuerDirectory.Infrastructure.Internal;
namespace StellaOps.IssuerDirectory.Infrastructure.Repositories;
public sealed class MongoIssuerRepository : IIssuerRepository
{
private readonly IssuerDirectoryMongoContext _context;
public MongoIssuerRepository(IssuerDirectoryMongoContext context)
{
_context = context ?? throw new ArgumentNullException(nameof(context));
}
public async Task<IssuerRecord?> GetAsync(string tenantId, string issuerId, CancellationToken cancellationToken)
{
var filter = Builders<IssuerDocument>.Filter.And(
Builders<IssuerDocument>.Filter.Eq(doc => doc.TenantId, tenantId),
Builders<IssuerDocument>.Filter.Eq(doc => doc.Id, issuerId));
var cursor = await _context.Issuers
.Find(filter)
.Limit(1)
.FirstOrDefaultAsync(cancellationToken)
.ConfigureAwait(false);
return cursor is null ? null : MapToDomain(cursor);
}
public async Task<IReadOnlyCollection<IssuerRecord>> ListAsync(string tenantId, CancellationToken cancellationToken)
{
var filter = Builders<IssuerDocument>.Filter.Eq(doc => doc.TenantId, tenantId);
var documents = await _context.Issuers.Find(filter)
.SortBy(doc => doc.Slug)
.ToListAsync(cancellationToken)
.ConfigureAwait(false);
return documents.Select(MapToDomain).ToArray();
}
public async Task<IReadOnlyCollection<IssuerRecord>> ListGlobalAsync(CancellationToken cancellationToken)
{
var documents = await _context.Issuers
.Find(doc => doc.TenantId == IssuerTenants.Global)
.SortBy(doc => doc.Slug)
.ToListAsync(cancellationToken)
.ConfigureAwait(false);
return documents.Select(MapToDomain).ToArray();
}
public async Task UpsertAsync(IssuerRecord record, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(record);
var document = MapToDocument(record);
var filter = Builders<IssuerDocument>.Filter.And(
Builders<IssuerDocument>.Filter.Eq(doc => doc.TenantId, record.TenantId),
Builders<IssuerDocument>.Filter.Eq(doc => doc.Id, record.Id));
await _context.Issuers
.ReplaceOneAsync(
filter,
document,
new ReplaceOptions { IsUpsert = true },
cancellationToken)
.ConfigureAwait(false);
}
public async Task DeleteAsync(string tenantId, string issuerId, CancellationToken cancellationToken)
{
var filter = Builders<IssuerDocument>.Filter.And(
Builders<IssuerDocument>.Filter.Eq(doc => doc.TenantId, tenantId),
Builders<IssuerDocument>.Filter.Eq(doc => doc.Id, issuerId));
await _context.Issuers.DeleteOneAsync(filter, cancellationToken).ConfigureAwait(false);
}
private static IssuerRecord MapToDomain(IssuerDocument document)
{
var contact = new IssuerContact(
document.Contact.Email,
document.Contact.Phone,
string.IsNullOrWhiteSpace(document.Contact.Website) ? null : new Uri(document.Contact.Website),
document.Contact.Timezone);
var metadata = new IssuerMetadata(
document.Metadata.CveOrgId,
document.Metadata.CsafPublisherId,
string.IsNullOrWhiteSpace(document.Metadata.SecurityAdvisoriesUrl)
? null
: new Uri(document.Metadata.SecurityAdvisoriesUrl),
string.IsNullOrWhiteSpace(document.Metadata.CatalogUrl)
? null
: new Uri(document.Metadata.CatalogUrl),
document.Metadata.Languages,
document.Metadata.Attributes);
var endpoints = document.Endpoints
.Select(endpoint => new IssuerEndpoint(
endpoint.Kind,
new Uri(endpoint.Url),
endpoint.Format,
endpoint.RequiresAuthentication))
.ToArray();
return new IssuerRecord
{
Id = document.Id,
TenantId = document.TenantId,
DisplayName = document.DisplayName,
Slug = document.Slug,
Description = document.Description,
Contact = contact,
Metadata = metadata,
Endpoints = endpoints,
Tags = document.Tags,
CreatedAtUtc = document.CreatedAtUtc,
CreatedBy = document.CreatedBy,
UpdatedAtUtc = document.UpdatedAtUtc,
UpdatedBy = document.UpdatedBy,
IsSystemSeed = document.IsSystemSeed
};
}
private static IssuerDocument MapToDocument(IssuerRecord record)
{
var contact = new IssuerContactDocument
{
Email = record.Contact.Email,
Phone = record.Contact.Phone,
Website = record.Contact.Website?.ToString(),
Timezone = record.Contact.Timezone
};
var metadataDocument = new IssuerMetadataDocument
{
CveOrgId = record.Metadata.CveOrgId,
CsafPublisherId = record.Metadata.CsafPublisherId,
SecurityAdvisoriesUrl = record.Metadata.SecurityAdvisoriesUrl?.ToString(),
CatalogUrl = record.Metadata.CatalogUrl?.ToString(),
Languages = record.Metadata.SupportedLanguages.ToList(),
Attributes = new Dictionary<string, string>(record.Metadata.Attributes, StringComparer.OrdinalIgnoreCase)
};
var endpoints = record.Endpoints
.Select(endpoint => new IssuerEndpointDocument
{
Kind = endpoint.Kind,
Url = endpoint.Url.ToString(),
Format = endpoint.Format,
RequiresAuthentication = endpoint.RequiresAuthentication
})
.ToList();
return new IssuerDocument
{
Id = record.Id,
TenantId = record.TenantId,
DisplayName = record.DisplayName,
Slug = record.Slug,
Description = record.Description,
Contact = contact,
Metadata = metadataDocument,
Endpoints = endpoints,
Tags = record.Tags.ToList(),
CreatedAtUtc = record.CreatedAtUtc,
CreatedBy = record.CreatedBy,
UpdatedAtUtc = record.UpdatedAtUtc,
UpdatedBy = record.UpdatedBy,
IsSystemSeed = record.IsSystemSeed
};
}
}

View File

@@ -1,88 +0,0 @@
using System.Globalization;
using MongoDB.Driver;
using StellaOps.IssuerDirectory.Core.Abstractions;
using StellaOps.IssuerDirectory.Core.Domain;
using StellaOps.IssuerDirectory.Infrastructure.Documents;
using StellaOps.IssuerDirectory.Infrastructure.Internal;
namespace StellaOps.IssuerDirectory.Infrastructure.Repositories;
public sealed class MongoIssuerTrustRepository : IIssuerTrustRepository
{
private readonly IssuerDirectoryMongoContext _context;
public MongoIssuerTrustRepository(IssuerDirectoryMongoContext context)
{
_context = context ?? throw new ArgumentNullException(nameof(context));
}
public async Task<IssuerTrustOverrideRecord?> GetAsync(string tenantId, string issuerId, CancellationToken cancellationToken)
{
var filter = Builders<IssuerTrustDocument>.Filter.And(
Builders<IssuerTrustDocument>.Filter.Eq(doc => doc.TenantId, tenantId),
Builders<IssuerTrustDocument>.Filter.Eq(doc => doc.IssuerId, issuerId));
var document = await _context.IssuerTrustOverrides
.Find(filter)
.FirstOrDefaultAsync(cancellationToken)
.ConfigureAwait(false);
return document is null ? null : MapToDomain(document);
}
public async Task UpsertAsync(IssuerTrustOverrideRecord record, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(record);
var document = MapToDocument(record);
var filter = Builders<IssuerTrustDocument>.Filter.And(
Builders<IssuerTrustDocument>.Filter.Eq(doc => doc.TenantId, record.TenantId),
Builders<IssuerTrustDocument>.Filter.Eq(doc => doc.IssuerId, record.IssuerId));
await _context.IssuerTrustOverrides.ReplaceOneAsync(
filter,
document,
new ReplaceOptions { IsUpsert = true },
cancellationToken).ConfigureAwait(false);
}
public async Task DeleteAsync(string tenantId, string issuerId, CancellationToken cancellationToken)
{
var filter = Builders<IssuerTrustDocument>.Filter.And(
Builders<IssuerTrustDocument>.Filter.Eq(doc => doc.TenantId, tenantId),
Builders<IssuerTrustDocument>.Filter.Eq(doc => doc.IssuerId, issuerId));
await _context.IssuerTrustOverrides.DeleteOneAsync(filter, cancellationToken).ConfigureAwait(false);
}
private static IssuerTrustOverrideRecord MapToDomain(IssuerTrustDocument document)
{
return new IssuerTrustOverrideRecord
{
IssuerId = document.IssuerId,
TenantId = document.TenantId,
Weight = document.Weight,
Reason = document.Reason,
CreatedAtUtc = document.CreatedAtUtc,
CreatedBy = document.CreatedBy,
UpdatedAtUtc = document.UpdatedAtUtc,
UpdatedBy = document.UpdatedBy
};
}
private static IssuerTrustDocument MapToDocument(IssuerTrustOverrideRecord record)
{
return new IssuerTrustDocument
{
Id = string.Create(CultureInfo.InvariantCulture, $"{record.TenantId}:{record.IssuerId}"),
IssuerId = record.IssuerId,
TenantId = record.TenantId,
Weight = record.Weight,
Reason = record.Reason,
CreatedAtUtc = record.CreatedAtUtc,
CreatedBy = record.CreatedBy,
UpdatedAtUtc = record.UpdatedAtUtc,
UpdatedBy = record.UpdatedBy
};
}
}

View File

@@ -1,10 +1,7 @@
using Microsoft.Extensions.Configuration; using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.DependencyInjection;
using StellaOps.IssuerDirectory.Core.Abstractions; using StellaOps.IssuerDirectory.Core.Abstractions;
using StellaOps.IssuerDirectory.Infrastructure.Audit; using StellaOps.IssuerDirectory.Infrastructure.InMemory;
using StellaOps.IssuerDirectory.Infrastructure.Internal;
using StellaOps.IssuerDirectory.Infrastructure.Options;
using StellaOps.IssuerDirectory.Infrastructure.Repositories;
namespace StellaOps.IssuerDirectory.Infrastructure; namespace StellaOps.IssuerDirectory.Infrastructure;
@@ -17,19 +14,10 @@ public static class ServiceCollectionExtensions
ArgumentNullException.ThrowIfNull(services); ArgumentNullException.ThrowIfNull(services);
ArgumentNullException.ThrowIfNull(configuration); ArgumentNullException.ThrowIfNull(configuration);
services.AddOptions<IssuerDirectoryMongoOptions>() services.AddSingleton<IIssuerRepository, InMemoryIssuerRepository>();
.Bind(configuration.GetSection(IssuerDirectoryMongoOptions.SectionName)) services.AddSingleton<IIssuerKeyRepository, InMemoryIssuerKeyRepository>();
.Validate(options => services.AddSingleton<IIssuerTrustRepository, InMemoryIssuerTrustRepository>();
{ services.AddSingleton<IIssuerAuditSink, InMemoryIssuerAuditSink>();
options.Validate();
return true;
});
services.AddSingleton<IssuerDirectoryMongoContext>();
services.AddSingleton<IIssuerRepository, MongoIssuerRepository>();
services.AddSingleton<IIssuerKeyRepository, MongoIssuerKeyRepository>();
services.AddSingleton<IIssuerTrustRepository, MongoIssuerTrustRepository>();
services.AddSingleton<IIssuerAuditSink, MongoIssuerAuditSink>();
return services; return services;
} }

View File

@@ -11,8 +11,6 @@
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="10.0.0" /> <PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" /> <PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Options.ConfigurationExtensions" Version="10.0.0" /> <PackageReference Include="Microsoft.Extensions.Options.ConfigurationExtensions" Version="10.0.0" />
<PackageReference Include="MongoDB.Bson" Version="3.5.0" />
<PackageReference Include="MongoDB.Driver" Version="3.5.0" />
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ProjectReference Include="..\\StellaOps.IssuerDirectory.Core\\StellaOps.IssuerDirectory.Core.csproj" /> <ProjectReference Include="..\\StellaOps.IssuerDirectory.Core\\StellaOps.IssuerDirectory.Core.csproj" />

View File

@@ -121,7 +121,7 @@ static void ConfigurePersistence(
WebApplicationBuilder builder, WebApplicationBuilder builder,
IssuerDirectoryWebServiceOptions options) IssuerDirectoryWebServiceOptions options)
{ {
var provider = options.Persistence.Provider?.Trim().ToLowerInvariant() ?? "mongo"; var provider = options.Persistence.Provider?.Trim().ToLowerInvariant() ?? "postgres";
if (provider == "postgres") if (provider == "postgres")
{ {
@@ -134,7 +134,7 @@ static void ConfigurePersistence(
} }
else else
{ {
Log.Information("Using MongoDB persistence for IssuerDirectory."); Log.Information("Using in-memory persistence for IssuerDirectory (non-production).");
builder.Services.AddIssuerDirectoryInfrastructure(builder.Configuration); builder.Services.AddIssuerDirectoryInfrastructure(builder.Configuration);
} }
} }

View File

@@ -114,7 +114,7 @@ internal static class NativeReachabilityGraphBuilder
.ToImmutableArray(); .ToImmutableArray();
var distinctEdges = edges var distinctEdges = edges
.GroupBy(e => (e.From, e.To, e.Reason), ValueTuple.Create) .GroupBy(e => (e.From, e.To, e.Reason))
.Select(g => g.First()) .Select(g => g.First())
.OrderBy(e => e.From, StringComparer.Ordinal) .OrderBy(e => e.From, StringComparer.Ordinal)
.ThenBy(e => e.To, StringComparer.Ordinal) .ThenBy(e => e.To, StringComparer.Ordinal)

View File

@@ -44,7 +44,9 @@ public sealed class ReachabilityGraphBuilder
string? display = null, string? display = null,
string? sourceFile = null, string? sourceFile = null,
int? sourceLine = null, int? sourceLine = null,
IReadOnlyDictionary<string, string>? attributes = null) IReadOnlyDictionary<string, string>? attributes = null,
string? purl = null,
string? symbolDigest = null)
{ {
if (string.IsNullOrWhiteSpace(symbolId)) if (string.IsNullOrWhiteSpace(symbolId))
{ {
@@ -59,7 +61,9 @@ public sealed class ReachabilityGraphBuilder
display?.Trim(), display?.Trim(),
sourceFile?.Trim(), sourceFile?.Trim(),
sourceLine, sourceLine,
attributes?.ToImmutableSortedDictionary(StringComparer.Ordinal) ?? ImmutableSortedDictionary<string, string>.Empty); attributes?.ToImmutableSortedDictionary(StringComparer.Ordinal) ?? ImmutableSortedDictionary<string, string>.Empty,
purl?.Trim(),
symbolDigest?.Trim());
_richNodes[id] = node; _richNodes[id] = node;
nodes.Add(id); nodes.Add(id);
@@ -93,6 +97,9 @@ public sealed class ReachabilityGraphBuilder
/// <param name="origin">Origin: static or runtime.</param> /// <param name="origin">Origin: static or runtime.</param>
/// <param name="provenance">Provenance hint: jvm-bytecode, il, ts-ast, ssa, ebpf, etw, jfr, hook.</param> /// <param name="provenance">Provenance hint: jvm-bytecode, il, ts-ast, ssa, ebpf, etw, jfr, hook.</param>
/// <param name="evidence">Evidence locator (e.g., "file:path:line").</param> /// <param name="evidence">Evidence locator (e.g., "file:path:line").</param>
/// <param name="purl">PURL of the component that defines the callee.</param>
/// <param name="symbolDigest">Stable hash of the normalized callee signature.</param>
/// <param name="candidates">Ranked candidate purls when resolution is ambiguous.</param>
public ReachabilityGraphBuilder AddEdge( public ReachabilityGraphBuilder AddEdge(
string from, string from,
string to, string to,
@@ -100,7 +107,10 @@ public sealed class ReachabilityGraphBuilder
EdgeConfidence confidence, EdgeConfidence confidence,
string origin = "static", string origin = "static",
string? provenance = null, string? provenance = null,
string? evidence = null) string? evidence = null,
string? purl = null,
string? symbolDigest = null,
IReadOnlyList<(string Purl, string? SymbolDigest, double? Score)>? candidates = null)
{ {
if (string.IsNullOrWhiteSpace(from) || string.IsNullOrWhiteSpace(to)) if (string.IsNullOrWhiteSpace(from) || string.IsNullOrWhiteSpace(to))
{ {
@@ -118,7 +128,10 @@ public sealed class ReachabilityGraphBuilder
confidence, confidence,
origin?.Trim() ?? "static", origin?.Trim() ?? "static",
provenance?.Trim(), provenance?.Trim(),
evidence?.Trim()); evidence?.Trim(),
purl?.Trim(),
symbolDigest?.Trim(),
candidates);
_richEdges.Add(richEdge); _richEdges.Add(richEdge);
nodes.Add(fromId); nodes.Add(fromId);
@@ -172,7 +185,9 @@ public sealed class ReachabilityGraphBuilder
rich.Kind, rich.Kind,
rich.Display, rich.Display,
source, source,
rich.Attributes.Count > 0 ? rich.Attributes : null)); rich.Attributes.Count > 0 ? rich.Attributes : null,
rich.Purl,
rich.SymbolDigest));
} }
else else
{ {
@@ -199,12 +214,17 @@ public sealed class ReachabilityGraphBuilder
rich.Provenance, rich.Provenance,
rich.Evidence); rich.Evidence);
var candidates = rich.Candidates?.Select(c => new ReachabilityEdgeCandidate(c.Purl, c.SymbolDigest, c.Score)).ToList();
edgeList.Add(new ReachabilityUnionEdge( edgeList.Add(new ReachabilityUnionEdge(
rich.From, rich.From,
rich.To, rich.To,
rich.EdgeType, rich.EdgeType,
ConfidenceToString(rich.Confidence), ConfidenceToString(rich.Confidence),
source)); source,
rich.Purl,
rich.SymbolDigest,
candidates));
} }
// Add any legacy edges not already covered // Add any legacy edges not already covered
@@ -315,7 +335,9 @@ public sealed class ReachabilityGraphBuilder
string? Display, string? Display,
string? SourceFile, string? SourceFile,
int? SourceLine, int? SourceLine,
ImmutableSortedDictionary<string, string> Attributes); ImmutableSortedDictionary<string, string> Attributes,
string? Purl = null,
string? SymbolDigest = null);
private sealed record RichEdge( private sealed record RichEdge(
string From, string From,
@@ -324,7 +346,10 @@ public sealed class ReachabilityGraphBuilder
EdgeConfidence Confidence, EdgeConfidence Confidence,
string Origin, string Origin,
string? Provenance, string? Provenance,
string? Evidence); string? Evidence,
string? Purl = null,
string? SymbolDigest = null,
IReadOnlyList<(string Purl, string? SymbolDigest, double? Score)>? Candidates = null);
} }
/// <summary> /// <summary>

View File

@@ -15,6 +15,7 @@ public interface IRichGraphPublisher
/// <summary> /// <summary>
/// Packages richgraph-v1 JSON + meta into a deterministic zip and stores it in CAS. /// Packages richgraph-v1 JSON + meta into a deterministic zip and stores it in CAS.
/// CAS paths follow the richgraph-v1 contract: cas://reachability/graphs/{blake3}
/// </summary> /// </summary>
public sealed class ReachabilityRichGraphPublisher : IRichGraphPublisher public sealed class ReachabilityRichGraphPublisher : IRichGraphPublisher
{ {
@@ -45,11 +46,20 @@ public sealed class ReachabilityRichGraphPublisher : IRichGraphPublisher
var zipPath = Path.Combine(folder, "richgraph.zip"); var zipPath = Path.Combine(folder, "richgraph.zip");
CreateDeterministicZip(folder, zipPath); CreateDeterministicZip(folder, zipPath);
// Use BLAKE3 graph_hash as the CAS key per CONTRACT-RICHGRAPH-V1-015
var casKey = ExtractHashDigest(writeResult.GraphHash);
await using var stream = File.OpenRead(zipPath); await using var stream = File.OpenRead(zipPath);
var sha = ComputeSha256(zipPath); var casEntry = await cas.PutAsync(new FileCasPutRequest(casKey, stream, leaveOpen: false), cancellationToken).ConfigureAwait(false);
var casEntry = await cas.PutAsync(new FileCasPutRequest(sha, stream, leaveOpen: false), cancellationToken).ConfigureAwait(false);
return new RichGraphPublishResult(writeResult.GraphHash, casEntry.RelativePath, writeResult.NodeCount, writeResult.EdgeCount); // Build CAS URI per contract: cas://reachability/graphs/{blake3}
var casUri = $"cas://reachability/graphs/{casKey}";
return new RichGraphPublishResult(
writeResult.GraphHash,
casEntry.RelativePath,
casUri,
writeResult.NodeCount,
writeResult.EdgeCount);
} }
private static void CreateDeterministicZip(string sourceDir, string destinationZip) private static void CreateDeterministicZip(string sourceDir, string destinationZip)
@@ -71,16 +81,19 @@ public sealed class ReachabilityRichGraphPublisher : IRichGraphPublisher
} }
} }
private static string ComputeSha256(string path) /// <summary>
/// Extracts the hex digest from a prefixed hash (e.g., "blake3:abc123" → "abc123").
/// </summary>
private static string ExtractHashDigest(string prefixedHash)
{ {
using var sha = System.Security.Cryptography.SHA256.Create(); var colonIndex = prefixedHash.IndexOf(':');
using var stream = File.OpenRead(path); return colonIndex >= 0 ? prefixedHash[(colonIndex + 1)..] : prefixedHash;
return Convert.ToHexString(sha.ComputeHash(stream)).ToLowerInvariant();
} }
} }
public sealed record RichGraphPublishResult( public sealed record RichGraphPublishResult(
string GraphHash, string GraphHash,
string RelativePath, string RelativePath,
string CasUri,
int NodeCount, int NodeCount,
int EdgeCount); int EdgeCount);

View File

@@ -75,7 +75,9 @@ public sealed class ReachabilityUnionWriter
Source = n.Source?.Trimmed(), Source = n.Source?.Trimmed(),
Attributes = (n.Attributes ?? ImmutableDictionary<string, string>.Empty) Attributes = (n.Attributes ?? ImmutableDictionary<string, string>.Empty)
.Where(kv => !string.IsNullOrWhiteSpace(kv.Key) && kv.Value is not null) .Where(kv => !string.IsNullOrWhiteSpace(kv.Key) && kv.Value is not null)
.ToImmutableSortedDictionary(kv => kv.Key.Trim(), kv => kv.Value!.Trim()) .ToImmutableSortedDictionary(kv => kv.Key.Trim(), kv => kv.Value!.Trim()),
Purl = Trim(n.Purl),
SymbolDigest = Trim(n.SymbolDigest)
}) })
.OrderBy(n => n.SymbolId, StringComparer.Ordinal) .OrderBy(n => n.SymbolId, StringComparer.Ordinal)
.ThenBy(n => n.Kind, StringComparer.Ordinal) .ThenBy(n => n.Kind, StringComparer.Ordinal)
@@ -89,7 +91,10 @@ public sealed class ReachabilityUnionWriter
To = Trim(e.To)!, To = Trim(e.To)!,
EdgeType = Trim(e.EdgeType) ?? "call", EdgeType = Trim(e.EdgeType) ?? "call",
Confidence = Trim(e.Confidence) ?? "certain", Confidence = Trim(e.Confidence) ?? "certain",
Source = e.Source?.Trimmed() Source = e.Source?.Trimmed(),
Purl = Trim(e.Purl),
SymbolDigest = Trim(e.SymbolDigest),
Candidates = NormalizeCandidates(e.Candidates)
}) })
.OrderBy(e => e.From, StringComparer.Ordinal) .OrderBy(e => e.From, StringComparer.Ordinal)
.ThenBy(e => e.To, StringComparer.Ordinal) .ThenBy(e => e.To, StringComparer.Ordinal)
@@ -110,6 +115,24 @@ public sealed class ReachabilityUnionWriter
return new NormalizedGraph(nodes, edges, facts); return new NormalizedGraph(nodes, edges, facts);
} }
private static IReadOnlyList<ReachabilityEdgeCandidate>? NormalizeCandidates(IReadOnlyList<ReachabilityEdgeCandidate>? candidates)
{
if (candidates is null || candidates.Count == 0)
{
return null;
}
return candidates
.Where(c => !string.IsNullOrWhiteSpace(c.Purl))
.Select(c => new ReachabilityEdgeCandidate(
c.Purl.Trim(),
Trim(c.SymbolDigest),
c.Score))
.OrderByDescending(c => c.Score ?? 0)
.ThenBy(c => c.Purl, StringComparer.Ordinal)
.ToList();
}
private static async Task<FileHashInfo> WriteNdjsonAsync<T>( private static async Task<FileHashInfo> WriteNdjsonAsync<T>(
string path, string path,
IReadOnlyCollection<T> items, IReadOnlyCollection<T> items,
@@ -145,6 +168,16 @@ public sealed class ReachabilityUnionWriter
jw.WriteString("display", node.Display); jw.WriteString("display", node.Display);
} }
if (!string.IsNullOrWhiteSpace(node.Purl))
{
jw.WriteString("purl", node.Purl);
}
if (!string.IsNullOrWhiteSpace(node.SymbolDigest))
{
jw.WriteString("symbol_digest", node.SymbolDigest);
}
if (node.Source is not null) if (node.Source is not null)
{ {
jw.WritePropertyName("source"); jw.WritePropertyName("source");
@@ -180,6 +213,37 @@ public sealed class ReachabilityUnionWriter
jw.WriteString("edge_type", edge.EdgeType); jw.WriteString("edge_type", edge.EdgeType);
jw.WriteString("confidence", edge.Confidence); jw.WriteString("confidence", edge.Confidence);
if (!string.IsNullOrWhiteSpace(edge.Purl))
{
jw.WriteString("purl", edge.Purl);
}
if (!string.IsNullOrWhiteSpace(edge.SymbolDigest))
{
jw.WriteString("symbol_digest", edge.SymbolDigest);
}
if (edge.Candidates is { Count: > 0 })
{
jw.WritePropertyName("candidates");
jw.WriteStartArray();
foreach (var candidate in edge.Candidates)
{
jw.WriteStartObject();
jw.WriteString("purl", candidate.Purl);
if (!string.IsNullOrWhiteSpace(candidate.SymbolDigest))
{
jw.WriteString("symbol_digest", candidate.SymbolDigest);
}
if (candidate.Score.HasValue)
{
jw.WriteNumber("score", candidate.Score.Value);
}
jw.WriteEndObject();
}
jw.WriteEndArray();
}
if (edge.Source is not null) if (edge.Source is not null)
{ {
jw.WritePropertyName("source"); jw.WritePropertyName("source");
@@ -327,14 +391,27 @@ public sealed record ReachabilityUnionNode(
string Kind, string Kind,
string? Display = null, string? Display = null,
ReachabilitySource? Source = null, ReachabilitySource? Source = null,
IReadOnlyDictionary<string, string>? Attributes = null); IReadOnlyDictionary<string, string>? Attributes = null,
string? Purl = null,
string? SymbolDigest = null);
public sealed record ReachabilityUnionEdge( public sealed record ReachabilityUnionEdge(
string From, string From,
string To, string To,
string EdgeType, string EdgeType,
string? Confidence = "certain", string? Confidence = "certain",
ReachabilitySource? Source = null); ReachabilitySource? Source = null,
string? Purl = null,
string? SymbolDigest = null,
IReadOnlyList<ReachabilityEdgeCandidate>? Candidates = null);
/// <summary>
/// Represents a candidate purl+digest when callee resolution is ambiguous.
/// </summary>
public sealed record ReachabilityEdgeCandidate(
string Purl,
string? SymbolDigest = null,
double? Score = null);
public sealed record ReachabilityRuntimeFact( public sealed record ReachabilityRuntimeFact(
string SymbolId, string SymbolId,

View File

@@ -38,4 +38,132 @@ public class ReachabilityUnionWriterTests
var nodeLines = await File.ReadAllLinesAsync(Path.Combine(temp.Path, "reachability_graphs/analysis-x/nodes.ndjson")); var nodeLines = await File.ReadAllLinesAsync(Path.Combine(temp.Path, "reachability_graphs/analysis-x/nodes.ndjson"));
Assert.Contains(nodeLines, l => l.Contains("sym:dotnet:A")); Assert.Contains(nodeLines, l => l.Contains("sym:dotnet:A"));
} }
[Fact]
public async Task WritesNodePurlAndSymbolDigest()
{
var writer = new ReachabilityUnionWriter();
using var temp = new TempDir();
var graph = new ReachabilityUnionGraph(
Nodes: new[]
{
new ReachabilityUnionNode(
"sym:dotnet:A",
"dotnet",
"method",
"TestMethod",
null,
null,
Purl: "pkg:nuget/TestPackage@1.0.0",
SymbolDigest: "sha256:abc123")
},
Edges: Array.Empty<ReachabilityUnionEdge>());
var result = await writer.WriteAsync(graph, temp.Path, "analysis-purl");
var nodeLines = await File.ReadAllLinesAsync(result.Nodes.Path);
Assert.Single(nodeLines);
Assert.Contains("\"purl\":\"pkg:nuget/TestPackage@1.0.0\"", nodeLines[0]);
Assert.Contains("\"symbol_digest\":\"sha256:abc123\"", nodeLines[0]);
}
[Fact]
public async Task WritesEdgePurlAndSymbolDigest()
{
var writer = new ReachabilityUnionWriter();
using var temp = new TempDir();
var graph = new ReachabilityUnionGraph(
Nodes: new[]
{
new ReachabilityUnionNode("sym:dotnet:A", "dotnet", "method"),
new ReachabilityUnionNode("sym:dotnet:B", "dotnet", "method")
},
Edges: new[]
{
new ReachabilityUnionEdge(
"sym:dotnet:A",
"sym:dotnet:B",
"call",
"high",
null,
Purl: "pkg:nuget/TargetPackage@2.0.0",
SymbolDigest: "sha256:def456")
});
var result = await writer.WriteAsync(graph, temp.Path, "analysis-edge-purl");
var edgeLines = await File.ReadAllLinesAsync(result.Edges.Path);
Assert.Single(edgeLines);
Assert.Contains("\"purl\":\"pkg:nuget/TargetPackage@2.0.0\"", edgeLines[0]);
Assert.Contains("\"symbol_digest\":\"sha256:def456\"", edgeLines[0]);
}
[Fact]
public async Task WritesEdgeCandidates()
{
var writer = new ReachabilityUnionWriter();
using var temp = new TempDir();
var graph = new ReachabilityUnionGraph(
Nodes: new[]
{
new ReachabilityUnionNode("sym:binary:main", "binary", "function"),
new ReachabilityUnionNode("sym:binary:openssl_connect", "binary", "function")
},
Edges: new[]
{
new ReachabilityUnionEdge(
"sym:binary:main",
"sym:binary:openssl_connect",
"call",
"medium",
null,
Purl: null,
SymbolDigest: null,
Candidates: new List<ReachabilityEdgeCandidate>
{
new("pkg:deb/ubuntu/openssl@3.0.2", "sha256:abc", 0.8),
new("pkg:deb/debian/openssl@3.0.2", "sha256:def", 0.6)
})
});
var result = await writer.WriteAsync(graph, temp.Path, "analysis-candidates");
var edgeLines = await File.ReadAllLinesAsync(result.Edges.Path);
Assert.Single(edgeLines);
Assert.Contains("\"candidates\":", edgeLines[0]);
Assert.Contains("pkg:deb/ubuntu/openssl@3.0.2", edgeLines[0]);
Assert.Contains("pkg:deb/debian/openssl@3.0.2", edgeLines[0]);
Assert.Contains("\"score\":0.8", edgeLines[0]);
}
[Fact]
public async Task OmitsPurlAndSymbolDigestWhenNull()
{
var writer = new ReachabilityUnionWriter();
using var temp = new TempDir();
var graph = new ReachabilityUnionGraph(
Nodes: new[]
{
new ReachabilityUnionNode("sym:dotnet:A", "dotnet", "method")
},
Edges: new[]
{
new ReachabilityUnionEdge("sym:dotnet:A", "sym:dotnet:A", "call")
});
var result = await writer.WriteAsync(graph, temp.Path, "analysis-null-purl");
var nodeLines = await File.ReadAllLinesAsync(result.Nodes.Path);
Assert.DoesNotContain("purl", nodeLines[0]);
Assert.DoesNotContain("symbol_digest", nodeLines[0]);
var edgeLines = await File.ReadAllLinesAsync(result.Edges.Path);
Assert.DoesNotContain("purl", edgeLines[0]);
Assert.DoesNotContain("symbol_digest", edgeLines[0]);
Assert.DoesNotContain("candidates", edgeLines[0]);
}
} }

View File

@@ -1,4 +1,5 @@
using System.Threading.Tasks; using System.Threading.Tasks;
using StellaOps.Cryptography;
using StellaOps.Scanner.Reachability; using StellaOps.Scanner.Reachability;
using Xunit; using Xunit;
@@ -9,7 +10,7 @@ public class RichGraphPublisherTests
[Fact] [Fact]
public async Task PublishesGraphToCas() public async Task PublishesGraphToCas()
{ {
var writer = new RichGraphWriter(); var writer = new RichGraphWriter(CryptoHashFactory.CreateDefault());
var publisher = new ReachabilityRichGraphPublisher(writer); var publisher = new ReachabilityRichGraphPublisher(writer);
var cas = new FakeFileContentAddressableStore(); var cas = new FakeFileContentAddressableStore();
@@ -21,7 +22,8 @@ public class RichGraphPublisherTests
var rich = RichGraphBuilder.FromUnion(union, "test", "1.0.0"); var rich = RichGraphBuilder.FromUnion(union, "test", "1.0.0");
var result = await publisher.PublishAsync(rich, "scan-1", cas, temp.Path); var result = await publisher.PublishAsync(rich, "scan-1", cas, temp.Path);
Assert.StartsWith("sha256:", result.GraphHash); Assert.Contains(":", result.GraphHash); // hash format: algorithm:digest
Assert.StartsWith("cas://reachability/graphs/", result.CasUri);
Assert.Equal(1, result.NodeCount); Assert.Equal(1, result.NodeCount);
} }
} }

View File

@@ -1,5 +1,6 @@
using System.IO; using System.IO;
using System.Threading.Tasks; using System.Threading.Tasks;
using StellaOps.Cryptography;
using StellaOps.Scanner.Reachability; using StellaOps.Scanner.Reachability;
using Xunit; using Xunit;
@@ -10,7 +11,7 @@ public class RichGraphWriterTests
[Fact] [Fact]
public async Task WritesCanonicalGraphAndMeta() public async Task WritesCanonicalGraphAndMeta()
{ {
var writer = new RichGraphWriter(); var writer = new RichGraphWriter(CryptoHashFactory.CreateDefault());
using var temp = new TempDir(); using var temp = new TempDir();
var union = new ReachabilityUnionGraph( var union = new ReachabilityUnionGraph(
@@ -31,7 +32,7 @@ public class RichGraphWriterTests
Assert.True(File.Exists(result.MetaPath)); Assert.True(File.Exists(result.MetaPath));
var json = await File.ReadAllTextAsync(result.GraphPath); var json = await File.ReadAllTextAsync(result.GraphPath);
Assert.Contains("richgraph-v1", json); Assert.Contains("richgraph-v1", json);
Assert.StartsWith("sha256:", result.GraphHash); Assert.Contains(":", result.GraphHash); // hash format: algorithm:digest
Assert.Equal(2, result.NodeCount); Assert.Equal(2, result.NodeCount);
Assert.Equal(1, result.EdgeCount); Assert.Equal(1, result.EdgeCount);
} }