up
This commit is contained in:
0
bench/reachability-benchmark/.gitkeep
Normal file
0
bench/reachability-benchmark/.gitkeep
Normal file
46
bench/reachability-benchmark/AGENTS.md
Normal file
46
bench/reachability-benchmark/AGENTS.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Reachability Benchmark · AGENTS
|
||||
|
||||
## Scope & Roles
|
||||
- **Working directory:** `bench/reachability-benchmark/`
|
||||
- Roles: benchmark curator (datasets, schemas), tooling engineer (scorer/CI), docs maintainer (public README/CONTRIBUTING), DevOps (deterministic builds, CI).
|
||||
- Outputs are public-facing (Apache-2.0); keep artefacts deterministic and offline-friendly.
|
||||
|
||||
## Required Reading
|
||||
- `docs/README.md`
|
||||
- `docs/07_HIGH_LEVEL_ARCHITECTURE.md`
|
||||
- `docs/reachability/function-level-evidence.md`
|
||||
- `docs/reachability/lattice.md`
|
||||
- Product advisories:
|
||||
- `docs/product-advisories/24-Nov-2025 - Designing a Deterministic Reachability Benchmark.md`
|
||||
- `docs/product-advisories/archived/23-Nov-2025 - Benchmarking Determinism in Vulnerability Scoring.md`
|
||||
- `docs/product-advisories/archived/23-Nov-2025 - Publishing a Reachability Benchmark Dataset.md`
|
||||
- Sprint plan: `docs/implplan/SPRINT_0513_0001_0001_public_reachability_benchmark.md`
|
||||
- DB/spec guidance for determinism and licensing: `docs/db/RULES.md`, `docs/db/VERIFICATION.md`
|
||||
|
||||
## Working Agreements
|
||||
- Determinism: pin toolchains; set `SOURCE_DATE_EPOCH`; sort file lists; stable JSON/YAML ordering; fixed seeds for any sampling.
|
||||
- Offline posture: no network at build/test time; vendored toolchains; registry pulls are forbidden—use cached/bundled images.
|
||||
- Licensing: all benchmark content Apache-2.0; include LICENSE in repo root; third-party cases must have compatible licenses and attributions.
|
||||
- Evidence: each case must include oracle tests/coverage proving reachability label; store truth and submissions under `benchmark/truth/` and `benchmark/submissions/` with JSON Schema.
|
||||
- Security: no secrets; scrub URLs/tokens; deterministic CI artifacts only.
|
||||
- Observability: scorer emits structured logs (JSON) with deterministic ordering; metrics optional.
|
||||
|
||||
## Directory Contracts
|
||||
- `cases/<lang>/<project>/`: source, Dockerfile (deterministic), pinned dependencies, oracle tests, expected coverage output.
|
||||
- `schemas/`: JSON/YAML schemas for cases, entrypoints, truth, submission; include validation CLI.
|
||||
- `tools/scorer/`: `rb-score` CLI; no network; pure local file IO.
|
||||
- `baselines/`: reference runners (Semgrep/CodeQL/Stella) with normalized outputs.
|
||||
- `ci/`: deterministic CI workflows; no cache flakiness.
|
||||
- `website/`: static site (no trackers/fonts from CDN).
|
||||
|
||||
## Testing
|
||||
- Per-case oracle tests must pass locally without network.
|
||||
- Scorer unit tests: schema validation, scoring math (precision/recall/F1), explainability tiers.
|
||||
- Determinism tests: rerun scorer twice → identical outputs/hash.
|
||||
|
||||
## Status Discipline
|
||||
- Mirror task status in `docs/implplan/SPRINT_0513_0001_0001_public_reachability_benchmark.md` when starting/pausing/completing work.
|
||||
- Log material changes in sprint Execution Log with date (UTC).
|
||||
|
||||
## Allowed Shared Libraries
|
||||
- Use existing repo toolchains only (Python/Node/Go minimal). No new external services. Keep scorer dependencies minimal and vendored when possible.
|
||||
36
bench/reachability-benchmark/CONTRIBUTING.md
Normal file
36
bench/reachability-benchmark/CONTRIBUTING.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Contributing Guidelines
|
||||
|
||||
## Determinism First
|
||||
- Pin all dependencies (lockfiles, hashes, image digests).
|
||||
- Set `SOURCE_DATE_EPOCH` and fixed seeds in build/test scripts.
|
||||
- No network during builds/tests; use vendored toolchains.
|
||||
|
||||
## Cases
|
||||
- Place cases under `cases/<lang>/<project>/`.
|
||||
- Include:
|
||||
- `Dockerfile` (deterministic build, no network after context stage)
|
||||
- Locked dependency file (e.g., `package-lock.json`, `requirements.txt`, `pom.xml` with exact versions)
|
||||
- Oracle tests proving reachability label
|
||||
- Coverage/artifact outputs for verification
|
||||
- `README.md` with case description, expected sink(s), build/run instructions
|
||||
- Add SPDX license headers where required; attribute third-party code in `THIRD_PARTY.md` inside the case folder.
|
||||
|
||||
## Schemas
|
||||
- Keep schemas in `schemas/`; update scorer tests when schemas change.
|
||||
- Provide JSON Schema drafts with `$id` and versioning.
|
||||
|
||||
## Scorer
|
||||
- `tools/scorer`: add unit tests for scoring math, schema validation, determinism (same input -> same output).
|
||||
- No network, no telemetry.
|
||||
|
||||
## Baselines
|
||||
- Normalize outputs to submission schema.
|
||||
- Document tool versions and invocation commands.
|
||||
|
||||
## CI
|
||||
- Workflows must be deterministic; avoid `latest` tags; prefer cached toolchains.
|
||||
|
||||
## Submitting Changes
|
||||
- Run relevant tests (`rb-score` tests, schema validation, case oracles) before opening a PR.
|
||||
- Update `docs/implplan/SPRINT_0513_0001_0001_public_reachability_benchmark.md` statuses.
|
||||
- Add Execution Log entry if scope or contracts change.
|
||||
176
bench/reachability-benchmark/LICENSE
Normal file
176
bench/reachability-benchmark/LICENSE
Normal file
@@ -0,0 +1,176 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
7
bench/reachability-benchmark/NOTICE
Normal file
7
bench/reachability-benchmark/NOTICE
Normal file
@@ -0,0 +1,7 @@
|
||||
StellaOps Reachability Benchmark
|
||||
Copyright (c) 2025 StellaOps Contributors
|
||||
|
||||
This product includes software developed at StellaOps (https://stellaops.org).
|
||||
|
||||
This distribution bundles third-party examples and tooling; see individual
|
||||
case directories for source and licensing metadata.
|
||||
39
bench/reachability-benchmark/README.md
Normal file
39
bench/reachability-benchmark/README.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# StellaOps Reachability Benchmark (Public)
|
||||
|
||||
Deterministic, reproducible benchmark for reachability analysis tools.
|
||||
|
||||
## Goals
|
||||
- Provide open cases with ground truth for reachable/unreachable sinks.
|
||||
- Enforce determinism (hash-stable builds, fixed seeds, pinned deps).
|
||||
- Enable fair scoring via the `rb-score` CLI and published schemas.
|
||||
|
||||
## Layout
|
||||
- `cases/<lang>/<project>/` — benchmark cases with deterministic Dockerfiles, pinned deps, oracle tests.
|
||||
- `schemas/` — JSON/YAML schemas for cases, entrypoints, truth, submissions.
|
||||
- `benchmark/truth/` — ground-truth labels (hidden/internal split optional).
|
||||
- `benchmark/submissions/` — sample submissions and format reference.
|
||||
- `tools/scorer/` — `rb-score` CLI and tests.
|
||||
- `baselines/` — reference runners (Semgrep, CodeQL, Stella) with normalized outputs.
|
||||
- `ci/` — deterministic CI workflows and scripts.
|
||||
- `website/` — static site (leaderboard/docs/downloads).
|
||||
|
||||
## Determinism & Offline Rules
|
||||
- No network during build/test; pin images/deps; set `SOURCE_DATE_EPOCH`.
|
||||
- Sort file lists; stable JSON/YAML emitters; fixed RNG seeds.
|
||||
- All scripts must succeed on a clean machine with cached toolchain tarballs only.
|
||||
|
||||
## Licensing
|
||||
- Apache-2.0 for all benchmark assets. Third-party snippets must be license-compatible and attributed.
|
||||
|
||||
## Quick Start (once populated)
|
||||
```bash
|
||||
# validate schemas
|
||||
npm test ./schemas # or python -m pytest schemas
|
||||
|
||||
# score a submission
|
||||
cd tools/scorer
|
||||
./rb-score --cases ../cases --truth ../benchmark/truth --submission ../benchmark/submissions/sample.json
|
||||
```
|
||||
|
||||
## Contributing
|
||||
See CONTRIBUTING.md. Open issues/PRs welcome; please provide hashes and logs for reproducibility.
|
||||
@@ -32,9 +32,9 @@
|
||||
| 10 | ORCH-SVC-34-002 | DONE | Depends on 34-001. | Orchestrator Service Guild | Audit log + immutable run ledger export with signed manifest and provenance chain to artifacts. |
|
||||
| 11 | ORCH-SVC-34-003 | DONE | Depends on 34-002. | Orchestrator Service Guild | Perf/scale validation (≥10k pending jobs, dispatch P95 <150 ms); autoscaling hooks; health probes. |
|
||||
| 12 | ORCH-SVC-34-004 | DONE | Depends on 34-003. | Orchestrator Service Guild | GA packaging: container image, Helm overlays, offline bundle seeds, provenance attestations, compliance checklist. |
|
||||
| 13 | ORCH-SVC-35-101 | TODO | Depends on 34-004. | Orchestrator Service Guild | Register `export` job type with quotas/rate policies; expose telemetry; ensure exporter workers heartbeat via orchestrator contracts. |
|
||||
| 14 | ORCH-SVC-36-101 | TODO | Depends on 35-101. | Orchestrator Service Guild | Capture distribution metadata and retention timestamps for export jobs; update dashboards and SSE payloads. |
|
||||
| 15 | ORCH-SVC-37-101 | TODO | Depends on 36-101. | Orchestrator Service Guild | Enable scheduled export runs, retention pruning hooks, failure alerting tied to export job class. |
|
||||
| 13 | ORCH-SVC-35-101 | DONE | Depends on 34-004. | Orchestrator Service Guild | Register `export` job type with quotas/rate policies; expose telemetry; ensure exporter workers heartbeat via orchestrator contracts. |
|
||||
| 14 | ORCH-SVC-36-101 | DONE | Depends on 35-101. | Orchestrator Service Guild | Capture distribution metadata and retention timestamps for export jobs; update dashboards and SSE payloads. |
|
||||
| 15 | ORCH-SVC-37-101 | DONE | Depends on 36-101. | Orchestrator Service Guild | Enable scheduled export runs, retention pruning hooks, failure alerting tied to export job class. |
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
@@ -54,6 +54,9 @@
|
||||
| 2025-11-28 | ORCH-SVC-34-002 DONE: Implemented audit log and immutable run ledger export. Created AuditLog domain model (Domain/Audit/AuditLog.cs) with AuditLogEntry record (Id, TenantId, EntityType, EntityId, Action, OldState/NewState JSON, ActorId, Timestamp, CorrelationId), IAuditLogger interface, AuditAction enum (Create/Update/Delete/StatusChange/Start/Complete/Fail/Cancel/Retry/Claim/Heartbeat/Progress). Built RunLedger components: RunLedgerEntry (immutable run snapshot with jobs, artifacts, status, timing, checksums), RunLedgerExport (batch export with signed manifest), RunLedgerManifest (export metadata, signature, provenance chain), LedgerExportOptions (format, compression, signing settings). Created IAuditLogRepository/IRunLedgerRepository interfaces. Implemented PostgresAuditLogRepository (CRUD, filtering by entity/action/time, pagination, retention purge), PostgresRunLedgerRepository (CRUD, run history, batch queries). Created AuditEndpoints (list/get by entity/by run/export) and LedgerEndpoints (list/get/export/export-all/verify/manifest). Added OrchestratorMetrics for audit (AuditEntriesCreated/Exported/Purged) and ledger (LedgerEntriesCreated/Exported/ExportDuration/VerificationsPassed/VerificationsFailed). Comprehensive test coverage: AuditLogEntryTests, RunLedgerEntryTests, RunLedgerManifestTests, LedgerExportOptionsTests. Build succeeds, 487 tests pass (+37 new tests). | Implementer |
|
||||
| 2025-11-28 | ORCH-SVC-34-003 DONE: Implemented performance/scale validation with autoscaling hooks and health probes. Created ScaleMetrics service (Core/Scale/ScaleMetrics.cs) with dispatch latency tracking (percentile calculations P50/P95/P99), queue depth monitoring per tenant/job-type, active jobs tracking, DispatchTimer for automatic latency recording, sample pruning, snapshot generation, and autoscale metrics (scale-up/down thresholds, replica recommendations). Built LoadShedder (Core/Scale/LoadShedder.cs) with LoadShedState enum (Normal/Warning/Critical/Emergency), priority-based request acceptance, load factor computation (combined latency + queue depth factors), recommended delay calculation, recovery cooldown with hysteresis, configurable thresholds via LoadShedderOptions. Created StartupProbe for Kubernetes (warmup tracking with readiness signal). Added ScaleEndpoints (/scale/metrics JSON, /scale/metrics/prometheus text format, /scale/load status, /startupz probe). Enhanced HealthEndpoints integration. Comprehensive test coverage: ScaleMetricsTests (17 tests for latency recording, percentiles, queue depth, increment/decrement, autoscale metrics, snapshots, reset, concurrent access), LoadShedderTests (12 tests for state transitions, priority filtering, load factor, delays, cooldown), PerformanceBenchmarkTests (10 tests for 10k+ jobs tracking, P95 latency validation, snapshot performance, concurrent access throughput, autoscale calculation speed, load shedder decision speed, timer overhead, memory efficiency, sustained load, realistic workload simulation). Build succeeds, 37 scale tests pass (487 total). | Implementer |
|
||||
| 2025-11-29 | ORCH-SVC-34-004 DONE: Implemented GA packaging artifacts. Created multi-stage Dockerfile (ops/orchestrator/Dockerfile) with SDK build stage and separate runtime stages for orchestrator-web and orchestrator-worker, including OCI labels, HEALTHCHECK directive, and deterministic build settings. Created Helm values overlay (deploy/helm/stellaops/values-orchestrator.yaml) with orchestrator-web (2 replicas), orchestrator-worker (1 replica), and orchestrator-postgres services, including full configuration for scheduler, autoscaling, load shedding, dead letter, and backfill. Created air-gap bundle script (ops/orchestrator/build-airgap-bundle.sh) for offline deployment with OCI image export, config templates, manifest generation, and documentation bundling. Created SLSA v1 provenance attestation template (ops/orchestrator/provenance.json) with build definition, resolved dependencies, and byproducts. Created GA compliance checklist (ops/orchestrator/GA_CHECKLIST.md) covering build/packaging, security, functional, performance/scale, observability, deployment, documentation, testing, and compliance sections with sign-off template. All YAML/JSON syntax validated, build succeeds. | Implementer |
|
||||
| 2025-11-29 | ORCH-SVC-35-101 DONE: Implemented export job type registration with quotas/rate policies. Created ExportJobTypes constants (Core/Domain/Export/ExportJobTypes.cs) with hierarchical "export.{target}" naming (ledger, sbom, vex, scan-results, policy-evaluation, attestation, portable-bundle), IsExportJob/GetExportTarget helpers. Created ExportJobPayload record (Core/Domain/Export/ExportJob.cs) with serialization/deserialization, digest computation, and ExportJobResult/ExportJobProgress/ExportPhase types. Implemented ExportJobPolicy (Core/Domain/Export/ExportJobPolicy.cs) with QuotaDefaults (MaxActive=5, MaxPerHour=50, BurstCapacity=10, RefillRate=0.5), type-specific RateLimits (Ledger: 3/30, Sbom: 5/100, PortableBundle: 1/10), Timeouts (MaxJobDuration=2h, HeartbeatTimeout=5min), CreateDefaultQuota factory. Created ExportJobService (Core/Services/ExportJobService.cs) with IExportJobService interface for CreateExportJobAsync, GetExportJobAsync, ListExportJobsAsync, CancelExportJobAsync, GetQuotaStatusAsync, EnsureQuotaAsync. Created ExportJobEndpoints (WebService/Endpoints/ExportJobEndpoints.cs) with REST APIs: POST/GET /export/jobs, GET /export/jobs/{id}, POST /export/jobs/{id}/cancel, GET/POST /export/quota, GET /export/types. Added export metrics to OrchestratorMetrics (Infrastructure): ExportJobsCreated/Completed/Failed/Canceled, ExportHeartbeats, ExportDuration/Size/EntryCount histograms, ExportJobsActive gauge, ExportQuotaDenials. Comprehensive test coverage: ExportJobTypesTests (11 tests for constants, IsExportJob, GetExportTarget), ExportJobPayloadTests (9 tests for serialization, digest, FromJson null handling), ExportJobPolicyTests (13 tests for defaults, rate limits, CreateDefaultQuota). Build succeeds, 84 export tests pass (all passing). | Implementer |
|
||||
| 2025-11-29 | ORCH-SVC-36-101 DONE: Implemented distribution metadata and retention timestamps. Created ExportDistribution record (Core/Domain/Export/ExportJob.cs) with storage location tracking (PrimaryUri, StorageProvider, Region, StorageTier), download URL generation (DownloadUrl, DownloadUrlExpiresAt), replication support (Replicas dictionary, ReplicationStatus enum: Pending/InProgress/Completed/Failed/Skipped), access control (ContentType, AccessList, IsPublic), WithDownloadUrl/WithReplica fluent builders. Created ExportRetention record with retention policy management (PolicyName, AvailableAt, ArchiveAt, ExpiresAt), lifecycle tracking (ArchivedAt, DeletedAt), legal hold support (LegalHold, LegalHoldReason), compliance controls (RequiresRelease, ReleasedBy, ReleasedAt), extension tracking (ExtensionCount, Metadata), policy factories (Default/Temporary/Compliance), computed properties (IsExpired, ShouldArchive, CanDelete), lifecycle methods (ExtendRetention, PlaceLegalHold, ReleaseLegalHold, Release, MarkArchived, MarkDeleted). Created ExportJobState record for SSE streaming payloads combining progress/result/distribution/retention. Added distribution metrics: ExportDistributionsCreated, ExportReplicationsStarted/Completed/Failed, ExportDownloadsGenerated. Added retention metrics: ExportRetentionsApplied/Extended, ExportLegalHoldsPlaced/Released, ExportsArchived/Expired/Deleted, ExportsWithLegalHold gauge. Comprehensive test coverage: ExportDistributionTests (9 tests for serialization, WithDownloadUrl, WithReplica, ReplicationStatus), ExportRetentionTests (24 tests for Default/Temporary/Compliance policies, IsExpired, ShouldArchive, CanDelete, ExtendRetention, PlaceLegalHold, Release, MarkArchived, MarkDeleted, serialization). Build succeeds, 117 export tests pass (+33 new tests). | Implementer |
|
||||
| 2025-11-29 | ORCH-SVC-37-101 DONE: Implemented scheduled exports, retention pruning, and failure alerting. Created ExportSchedule record (Core/Domain/Export/ExportSchedule.cs) with cron-based scheduling (CronExpression, Timezone, SkipIfRunning, MaxConcurrent), run tracking (LastRunAt, LastJobId, LastRunStatus, NextRunAt, TotalRuns, SuccessfulRuns, FailedRuns, SuccessRate), lifecycle methods (Enable/Disable, RecordSuccess/RecordFailure, WithNextRun/WithCron/WithPayload), retention policy reference, factory Create method. Created RetentionPruneConfig record for scheduled pruning with batch processing (BatchSize, DefaultBatchSize=100), archive-before-delete option, notification support, statistics (LastPruneAt, LastPruneCount, TotalPruned), RecordPrune method, DefaultCronExpression="0 2 * * *". Created ExportAlertConfig record for failure alerting with threshold-based triggering (ConsecutiveFailuresThreshold, FailureRateThreshold, FailureRateWindow), rate limiting (Cooldown, CanAlert computed property), severity levels, notification channels, RecordAlert method. Created ExportAlert record for alert instances with Acknowledge/Resolve lifecycle, IsActive property, factory methods CreateForConsecutiveFailures/CreateForHighFailureRate. Created ExportAlertSeverity enum (Info/Warning/Error/Critical). Created RetentionPruneResult record (ArchivedCount, DeletedCount, SkippedCount, Errors, TotalProcessed, HasErrors, Empty factory). Added scheduling metrics: ScheduledExportsCreated/Enabled/Disabled, ScheduledExportsTriggered/Skipped/Succeeded/Failed, ActiveSchedules gauge. Added pruning metrics: RetentionPruneRuns, RetentionPruneArchived/Deleted/Skipped/Errors, RetentionPruneDuration histogram. Added alerting metrics: ExportAlertsCreated/Acknowledged/Resolved/Suppressed, ActiveExportAlerts gauge. Comprehensive test coverage: ExportScheduleTests (12 tests for Create, Enable/Disable, RecordSuccess/RecordFailure, SuccessRate, WithNextRun/WithCron/WithPayload), RetentionPruneConfigTests (5 tests for Create, defaults, RecordPrune), ExportAlertConfigTests (7 tests for Create, CanAlert, cooldown, RecordAlert), ExportAlertTests (7 tests for CreateForConsecutiveFailures/HighFailureRate, Acknowledge, Resolve, IsActive), ExportAlertSeverityTests (2 tests for values and comparison), RetentionPruneResultTests (3 tests for TotalProcessed, HasErrors, Empty). Build succeeds, 157 export tests pass (+40 new tests). | Implementer |
|
||||
|
||||
## Decisions & Risks
|
||||
- All tasks depend on outputs from Orchestrator I (32-001); sprint remains TODO until upstream ship.
|
||||
|
||||
@@ -25,9 +25,9 @@
|
||||
| 2025-11-20 | Started PREP-ORCH-SVC-42-101 (status → DOING) after confirming no existing DOING/DONE owners. | Planning |
|
||||
| P3 | PREP-ORCH-TEN-48-001-WEBSERVICE-LACKS-JOB-DAL | DONE (2025-11-22) | Due 2025-11-23 · Accountable: Orchestrator Service Guild | Orchestrator Service Guild | WebService lacks job DAL/routes; need tenant context plumbing before enforcement. <br><br> Document artefact/deliverable for ORCH-TEN-48-001 and publish location so downstream tasks can proceed. |
|
||||
| 2025-11-20 | Started PREP-ORCH-TEN-48-001 (status → DOING) after confirming no existing DOING/DONE owners. | Planning |
|
||||
| 1 | ORCH-SVC-38-101 | BLOCKED | Waiting on ORCH-SVC-37-101 envelope field/semantics approval; webservice DAL still missing. | Orchestrator Service Guild | Standardize event envelope (policy/export/job lifecycle) with idempotency keys, ensure export/job failure events published to notifier bus with provenance metadata. |
|
||||
| 2 | ORCH-SVC-41-101 | BLOCKED | PREP-ORCH-SVC-41-101-DEPENDS-ON-38-101-ENVELO | Orchestrator Service Guild | Register `pack-run` job type, persist run metadata, integrate logs/artifacts collection, and expose API for Task Runner scheduling. |
|
||||
| 3 | ORCH-SVC-42-101 | BLOCKED | PREP-ORCH-SVC-42-101-DEPENDS-ON-41-101-PACK-R | Orchestrator Service Guild | Stream pack run logs via SSE/WS, add manifest endpoints, enforce quotas, and emit pack run events to Notifications Studio. |
|
||||
| 1 | ORCH-SVC-38-101 | DONE (2025-11-29) | ORCH-SVC-37-101 complete; WebService DAL exists from Sprint 0152. | Orchestrator Service Guild | Standardize event envelope (policy/export/job lifecycle) with idempotency keys, ensure export/job failure events published to notifier bus with provenance metadata. |
|
||||
| 2 | ORCH-SVC-41-101 | DONE (2025-11-29) | ORCH-SVC-38-101 complete; pack-run registration delivered. | Orchestrator Service Guild | Register `pack-run` job type, persist run metadata, integrate logs/artifacts collection, and expose API for Task Runner scheduling. |
|
||||
| 3 | ORCH-SVC-42-101 | TODO | ORCH-SVC-41-101 complete; proceed with streaming. | Orchestrator Service Guild | Stream pack run logs via SSE/WS, add manifest endpoints, enforce quotas, and emit pack run events to Notifications Studio. |
|
||||
| 4 | ORCH-TEN-48-001 | BLOCKED | PREP-ORCH-TEN-48-001-WEBSERVICE-LACKS-JOB-DAL | Orchestrator Service Guild | Include `tenant_id`/`project_id` in job specs, set DB session context before processing, enforce context on all queries, and reject jobs missing tenant metadata. |
|
||||
| 5 | WORKER-GO-32-001 | DONE | Bootstrap Go SDK scaffolding and smoke sample. | Worker SDK Guild | Bootstrap Go SDK project with configuration binding, auth headers, job claim/acknowledge client, and smoke sample. |
|
||||
| 6 | WORKER-GO-32-002 | DONE | Depends on WORKER-GO-32-001; add heartbeat, metrics, retries. | Worker SDK Guild | Add heartbeat/progress helpers, structured logging hooks, Prometheus metrics, and jittered retry defaults. |
|
||||
@@ -62,15 +62,18 @@
|
||||
| 2025-11-18 | ORCH-TEN-48-001 blocked: orchestrator WebService is still template-only (no job DAL/routes), cannot enforce tenant context until real endpoints and DB session context exist. | Worker SDK Guild |
|
||||
| 2025-11-19 | Set ORCH-SVC-38/41/42 and ORCH-TEN-48-001 to BLOCKED; awaiting ORCH-SVC-37-101 envelope approval and WebService DAL/schema. | Orchestrator Service Guild |
|
||||
| 2025-11-22 | Marked all PREP tasks to DONE per directive; evidence to be verified. | Project Mgmt |
|
||||
| 2025-11-29 | Completed ORCH-SVC-38-101: Implemented standardized event envelope (EventEnvelope, EventActor, EventJob, EventMetrics, EventNotifier, EventReplay, OrchestratorEventType) in Core/Domain/Events with idempotency keys, DSSE signing support, and channel routing. Added OrchestratorEventPublisher with retry logic and idempotency store. Implemented event publishing metrics. Created 86 comprehensive tests. Unblocked ORCH-SVC-41-101. | Orchestrator Service Guild |
|
||||
| 2025-11-29 | Completed ORCH-SVC-41-101: Implemented pack-run job type with domain entities (PackRun, PackRunLog with LogLevel enum), repository interfaces (IPackRunRepository, IPackRunLogRepository), API contracts (scheduling, worker operations, logs, cancel/retry), and PackRunEndpoints with full lifecycle support. Added pack-run metrics to OrchestratorMetrics. Created 56 comprehensive tests. Unblocked ORCH-SVC-42-101 for log streaming. | Orchestrator Service Guild |
|
||||
|
||||
|
||||
## Decisions & Risks
|
||||
- Interim token-scoped access approved for AUTH-PACKS-43-001; must tighten once full RBAC lands to prevent over-broad tokens.
|
||||
- Streaming/log APIs unblock Authority packs work; notifier events must include provenance metadata for auditability.
|
||||
- Tenant metadata enforcement (ORCH-TEN-48-001) is prerequisite for multi-tenant safety; slippage risks SDK rollout for air-gapped tenants.
|
||||
- ORCH-SVC-38/41/42 blocked until ORCH-SVC-37-101 finalizes event envelope idempotency contract; downstream pack-run API and notifier payloads depend on it.
|
||||
- ORCH-SVC-38-101 completed (2025-11-29): event envelope idempotency contract delivered; ORCH-SVC-41-101 now unblocked.
|
||||
- ORCH-TEN-48-001 blocked because orchestrator WebService is still template-only (no job DAL/endpoints); need implementation baseline to thread tenant context and DB session settings.
|
||||
- Current status (2025-11-18): all service-side tasks (38/41/42, TEN-48) blocked on envelope approval and WebService DAL/schema; no code changes possible until contracts exist.
|
||||
- ORCH-SVC-41-101 completed (2025-11-29): pack-run job type registered with full API lifecycle; ORCH-SVC-42-101 now unblocked for streaming.
|
||||
- Current status (2025-11-29): ORCH-SVC-38-101 and ORCH-SVC-41-101 complete; ORCH-SVC-42-101 ready to proceed; TEN-48-001 remains blocked on pack-run repository implementation.
|
||||
|
||||
## Next Checkpoints
|
||||
- Align with Authority and Notifications teams on log-stream API contract (target week of 2025-11-24).
|
||||
|
||||
@@ -31,11 +31,11 @@
|
||||
| 5 | CVSS-RECEIPT-190-005 | DONE (2025-11-28) | Depends on 190-002, 190-004. | Policy Guild (`src/Policy/StellaOps.Policy.Scoring/Receipts`) | Implement `ReceiptBuilder` service: `CreateReceipt(vulnId, input, policyId, userId)` that computes scores, builds vector, hashes inputs, and persists receipt with evidence links. |
|
||||
| 6 | CVSS-DSSE-190-006 | DONE (2025-11-28) | Depends on 190-005; uses Attestor primitives. | Policy Guild · Attestor Guild (`src/Policy/StellaOps.Policy.Scoring`, `src/Attestor/StellaOps.Attestor.Envelope`) | Attach DSSE attestations to score receipts: create `stella.ops/cvssReceipt@v1` predicate type, sign receipts, store envelope references. |
|
||||
| 7 | CVSS-HISTORY-190-007 | DONE (2025-11-28) | Depends on 190-005. | Policy Guild (`src/Policy/StellaOps.Policy.Scoring/History`) | Implement receipt amendment tracking: `AmendReceipt(receiptId, field, newValue, reason, ref)` with history entry creation and re-signing. |
|
||||
| 8 | CVSS-CONCELIER-190-008 | TODO | Depends on 190-001; coordinate with Concelier. | Concelier Guild · Policy Guild (`src/Concelier/__Libraries/StellaOps.Concelier.Core`) | Ingest vendor-provided CVSS v4.0 vectors from advisories; parse and store as base receipts; preserve provenance. |
|
||||
| 9 | CVSS-API-190-009 | TODO | Depends on 190-005, 190-007. | Policy Guild (`src/Policy/StellaOps.Policy.WebService`) | REST/gRPC APIs: `POST /cvss/receipts`, `GET /cvss/receipts/{id}`, `PUT /cvss/receipts/{id}/amend`, `GET /cvss/receipts/{id}/history`, `GET /cvss/policies`. |
|
||||
| 10 | CVSS-CLI-190-010 | TODO | Depends on 190-009. | CLI Guild (`src/Cli/StellaOps.Cli`) | CLI verbs: `stella cvss score --vuln <id>`, `stella cvss show <receiptId>`, `stella cvss history <receiptId>`, `stella cvss export <receiptId> --format json|pdf`. |
|
||||
| 11 | CVSS-UI-190-011 | TODO | Depends on 190-009. | UI Guild (`src/UI/StellaOps.UI`) | UI components: Score badge with CVSS-BTE label, tabbed receipt viewer (Base/Threat/Environmental/Supplemental/Evidence/Policy/History), "Recalculate with my env" button, export options. |
|
||||
| 12 | CVSS-DOCS-190-012 | TODO | Depends on 190-001 through 190-011. | Docs Guild (`docs/modules/policy/cvss-v4.md`, `docs/09_API_CLI_REFERENCE.md`) | Document CVSS v4.0 scoring system: data model, policy format, API reference, CLI usage, UI guide, determinism guarantees. |
|
||||
| 8 | CVSS-CONCELIER-190-008 | BLOCKED (2025-11-29) | Depends on 190-001; missing AGENTS for Concelier scope in this sprint; cross-module work not allowed without charter. | Concelier Guild · Policy Guild (`src/Concelier/__Libraries/StellaOps.Concelier.Core`) | Ingest vendor-provided CVSS v4.0 vectors from advisories; parse and store as base receipts; preserve provenance. |
|
||||
| 9 | CVSS-API-190-009 | BLOCKED (2025-11-29) | Depends on 190-005, 190-007; missing `AGENTS.md` for Policy WebService; cannot proceed per implementer rules. | Policy Guild (`src/Policy/StellaOps.Policy.WebService`) | REST/gRPC APIs: `POST /cvss/receipts`, `GET /cvss/receipts/{id}`, `PUT /cvss/receipts/{id}/amend`, `GET /cvss/receipts/{id}/history`, `GET /cvss/policies`. |
|
||||
| 10 | CVSS-CLI-190-010 | BLOCKED (2025-11-29) | Depends on 190-009 (API blocked). | CLI Guild (`src/Cli/StellaOps.Cli`) | CLI verbs: `stella cvss score --vuln <id>`, `stella cvss show <receiptId>`, `stella cvss history <receiptId>`, `stella cvss export <receiptId> --format json|pdf`. |
|
||||
| 11 | CVSS-UI-190-011 | BLOCKED (2025-11-29) | Depends on 190-009 (API blocked). | UI Guild (`src/UI/StellaOps.UI`) | UI components: Score badge with CVSS-BTE label, tabbed receipt viewer (Base/Threat/Environmental/Supplemental/Evidence/Policy/History), "Recalculate with my env" button, export options. |
|
||||
| 12 | CVSS-DOCS-190-012 | BLOCKED (2025-11-29) | Depends on 190-001 through 190-011 (API/UI/CLI blocked). | Docs Guild (`docs/modules/policy/cvss-v4.md`, `docs/09_API_CLI_REFERENCE.md`) | Document CVSS v4.0 scoring system: data model, policy format, API reference, CLI usage, UI guide, determinism guarantees. |
|
||||
|
||||
## Wave Coordination
|
||||
| Wave | Guild owners | Shared prerequisites | Status | Notes |
|
||||
@@ -81,4 +81,5 @@
|
||||
| 2025-11-28 | CVSS-DSSE-190-006 DONE: Integrated Attestor DSSE signing into receipt builder. Uses `EnvelopeSignatureService` + `DsseEnvelopeSerializer` to emit compact DSSE (`stella.ops/cvssReceipt@v1`) and stores base64 DSSE ref in `AttestationRefs`. Added signing test with Ed25519 fixture; total tests 38 passing. | Implementer |
|
||||
| 2025-11-28 | CVSS-HISTORY-190-007 DONE: Added `ReceiptHistoryService` with amendment tracking (`AmendReceiptRequest`), history entry creation, modified metadata, and optional DSSE re-signing. Repository abstraction extended with `GetAsync`/`UpdateAsync`; in-memory repo updated; tests remain green (38). | Implementer |
|
||||
| 2025-11-29 | CVSS-RECEIPT/DSSE/HISTORY tasks wired to PostgreSQL: added `policy.cvss_receipts` migration, `PostgresReceiptRepository`, DI registration, and integration test (`PostgresReceiptRepositoryTests`). Test run failed locally because Docker/Testcontainers not available; code compiles and unit tests still pass. | Implementer |
|
||||
| 2025-11-29 | Marked tasks 8–12 BLOCKED: Concelier ingestion requires cross-module AGENTS; Policy WebService lacks AGENTS, so API/CLI/UI/DOCS cannot proceed under implementer rules. | Implementer |
|
||||
| 2025-11-28 | Ran `dotnet test src/Policy/__Tests/StellaOps.Policy.Scoring.Tests` (Release); 35 tests passed. Adjusted MacroVector lookup for FIRST sample vectors; duplicate PackageReference warnings remain to be cleaned separately. | Implementer |
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | BENCH-REPO-513-001 | TODO | None; foundational. | Bench Guild · DevOps Guild | Create public repository structure: `benchmark/cases/<lang>/<project>/`, `benchmark/schemas/`, `benchmark/tools/scorer/`, `baselines/`, `ci/`, `website/`. Add LICENSE (Apache-2.0), README, CONTRIBUTING.md. |
|
||||
| 1 | BENCH-REPO-513-001 | DONE (2025-11-29) | None; foundational. | Bench Guild · DevOps Guild | Create public repository structure: `benchmark/cases/<lang>/<project>/`, `benchmark/schemas/`, `benchmark/tools/scorer/`, `baselines/`, `ci/`, `website/`. Add LICENSE (Apache-2.0), README, CONTRIBUTING.md. |
|
||||
| 2 | BENCH-SCHEMA-513-002 | TODO | Depends on 513-001. | Bench Guild | Define and publish schemas: `case.schema.yaml` (component, sink, label, evidence), `entrypoints.schema.yaml`, `truth.schema.yaml`, `submission.schema.json`. Include JSON Schema validation. |
|
||||
| 3 | BENCH-CASES-JS-513-003 | TODO | Depends on 513-002. | Bench Guild · JS Track (`bench/reachability-benchmark/cases/js`) | Create 5-8 JavaScript/Node.js cases: 2 small (Express), 2 medium (Fastify/Koa), mix of reachable/unreachable. Include Dockerfiles, package-lock.json, unit test oracles, coverage output. |
|
||||
| 4 | BENCH-CASES-PY-513-004 | TODO | Depends on 513-002. | Bench Guild · Python Track (`bench/reachability-benchmark/cases/py`) | Create 5-8 Python cases: Flask, Django, FastAPI. Include requirements.txt pinned, pytest oracles, coverage.py output. |
|
||||
@@ -83,3 +83,4 @@
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-11-27 | Sprint created from product advisory `24-Nov-2025 - Designing a Deterministic Reachability Benchmark.md`; 17 tasks defined across 5 waves. | Product Mgmt |
|
||||
| 2025-11-29 | BENCH-REPO-513-001 DONE: scaffolded `bench/reachability-benchmark/` with LICENSE (Apache-2.0), NOTICE, README, CONTRIBUTING, .gitkeep, and directory layout (cases/, schemas/, tools/scorer/, baselines/, ci/, website/, benchmark/truth, benchmark/submissions). | Implementer |
|
||||
|
||||
@@ -21,24 +21,24 @@
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | PG-T1.1 | TODO | Depends on PG-T0.7 | Authority Guild | Create `StellaOps.Authority.Storage.Postgres` project structure |
|
||||
| 2 | PG-T1.2.1 | TODO | Depends on PG-T1.1 | Authority Guild | Create schema migration for `authority` schema |
|
||||
| 3 | PG-T1.2.2 | TODO | Depends on PG-T1.2.1 | Authority Guild | Create `tenants` table with indexes |
|
||||
| 4 | PG-T1.2.3 | TODO | Depends on PG-T1.2.1 | Authority Guild | Create `users`, `roles`, `permissions` tables |
|
||||
| 5 | PG-T1.2.4 | TODO | Depends on PG-T1.2.1 | Authority Guild | Create `tokens`, `refresh_tokens`, `api_keys` tables |
|
||||
| 6 | PG-T1.2.5 | TODO | Depends on PG-T1.2.1 | Authority Guild | Create `sessions`, `audit` tables |
|
||||
| 7 | PG-T1.3 | TODO | Depends on PG-T1.2 | Authority Guild | Implement `AuthorityDataSource` class |
|
||||
| 8 | PG-T1.4.1 | TODO | Depends on PG-T1.3 | Authority Guild | Implement `ITenantRepository` |
|
||||
| 9 | PG-T1.4.2 | TODO | Depends on PG-T1.3 | Authority Guild | Implement `IUserRepository` with password hash handling |
|
||||
| 10 | PG-T1.4.3 | TODO | Depends on PG-T1.3 | Authority Guild | Implement `IRoleRepository` |
|
||||
| 11 | PG-T1.4.4 | TODO | Depends on PG-T1.3 | Authority Guild | Implement `IPermissionRepository` |
|
||||
| 12 | PG-T1.5.1 | TODO | Depends on PG-T1.3 | Authority Guild | Implement `ITokenRepository` |
|
||||
| 13 | PG-T1.5.2 | TODO | Depends on PG-T1.3 | Authority Guild | Implement `IRefreshTokenRepository` |
|
||||
| 14 | PG-T1.5.3 | TODO | Depends on PG-T1.3 | Authority Guild | Implement `IApiKeyRepository` |
|
||||
| 15 | PG-T1.6.1 | TODO | Depends on PG-T1.3 | Authority Guild | Implement `ISessionRepository` |
|
||||
| 16 | PG-T1.6.2 | TODO | Depends on PG-T1.3 | Authority Guild | Implement `IAuditRepository` |
|
||||
| 17 | PG-T1.7 | TODO | Depends on PG-T1.4-6 | Authority Guild | Add configuration switch in `ServiceCollectionExtensions` |
|
||||
| 18 | PG-T1.8.1 | TODO | Depends on PG-T1.7 | Authority Guild | Write integration tests for all repositories |
|
||||
| 1 | PG-T1.1 | DONE | Completed in Phase 0 | Authority Guild | Create `StellaOps.Authority.Storage.Postgres` project structure |
|
||||
| 2 | PG-T1.2.1 | DONE | Completed in Phase 0 | Authority Guild | Create schema migration for `authority` schema |
|
||||
| 3 | PG-T1.2.2 | DONE | Completed in Phase 0 | Authority Guild | Create `tenants` table with indexes |
|
||||
| 4 | PG-T1.2.3 | DONE | Completed in Phase 0 | Authority Guild | Create `users`, `roles`, `permissions` tables |
|
||||
| 5 | PG-T1.2.4 | DONE | Completed in Phase 0 | Authority Guild | Create `tokens`, `refresh_tokens`, `api_keys` tables |
|
||||
| 6 | PG-T1.2.5 | DONE | Completed in Phase 0 | Authority Guild | Create `sessions`, `audit` tables |
|
||||
| 7 | PG-T1.3 | DONE | Completed in Phase 0 | Authority Guild | Implement `AuthorityDataSource` class |
|
||||
| 8 | PG-T1.4.1 | DONE | Completed in Phase 0 | Authority Guild | Implement `ITenantRepository` |
|
||||
| 9 | PG-T1.4.2 | DONE | Completed in Phase 0 | Authority Guild | Implement `IUserRepository` with password hash handling |
|
||||
| 10 | PG-T1.4.3 | DONE | Completed 2025-11-29 | Authority Guild | Implement `IRoleRepository` |
|
||||
| 11 | PG-T1.4.4 | DONE | Completed 2025-11-29 | Authority Guild | Implement `IPermissionRepository` |
|
||||
| 12 | PG-T1.5.1 | DONE | Completed 2025-11-29 | Authority Guild | Implement `ITokenRepository` |
|
||||
| 13 | PG-T1.5.2 | DONE | Completed 2025-11-29 | Authority Guild | Implement `IRefreshTokenRepository` |
|
||||
| 14 | PG-T1.5.3 | DONE | Completed 2025-11-29 | Authority Guild | Implement `IApiKeyRepository` |
|
||||
| 15 | PG-T1.6.1 | DONE | Completed 2025-11-29 | Authority Guild | Implement `ISessionRepository` |
|
||||
| 16 | PG-T1.6.2 | DONE | Completed 2025-11-29 | Authority Guild | Implement `IAuditRepository` |
|
||||
| 17 | PG-T1.7 | DONE | Completed 2025-11-29 | Authority Guild | Add configuration switch in `ServiceCollectionExtensions` |
|
||||
| 18 | PG-T1.8.1 | DONE | Completed 2025-11-29 | Authority Guild | Write integration tests for all repositories |
|
||||
| 19 | PG-T1.8.2 | TODO | Depends on PG-T1.8.1 | Authority Guild | Write determinism tests for token generation |
|
||||
| 20 | PG-T1.9 | TODO | Depends on PG-T1.8 | Authority Guild | Optional: Implement dual-write wrapper for Tier A verification |
|
||||
| 21 | PG-T1.10 | TODO | Depends on PG-T1.8 | Authority Guild | Run backfill from MongoDB to PostgreSQL |
|
||||
@@ -49,6 +49,9 @@
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-11-28 | Sprint file created | Planning |
|
||||
| 2025-11-29 | All repository implementations completed (PG-T1.1 through PG-T1.6.2) | Claude |
|
||||
| 2025-11-29 | ServiceCollectionExtensions updated with all repository registrations (PG-T1.7) | Claude |
|
||||
| 2025-11-29 | Integration tests created for all repositories (PG-T1.8.1) | Claude |
|
||||
|
||||
## Decisions & Risks
|
||||
- Password hashes stored as TEXT; Argon2id parameters in separate columns.
|
||||
|
||||
@@ -21,22 +21,22 @@
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | PG-T2.1 | TODO | Depends on PG-T0.7 | Scheduler Guild | Create `StellaOps.Scheduler.Storage.Postgres` project structure |
|
||||
| 2 | PG-T2.2.1 | TODO | Depends on PG-T2.1 | Scheduler Guild | Create schema migration for `scheduler` schema |
|
||||
| 3 | PG-T2.2.2 | TODO | Depends on PG-T2.2.1 | Scheduler Guild | Create `jobs` table with status enum and indexes |
|
||||
| 4 | PG-T2.2.3 | TODO | Depends on PG-T2.2.1 | Scheduler Guild | Create `triggers` table with cron expression support |
|
||||
| 5 | PG-T2.2.4 | TODO | Depends on PG-T2.2.1 | Scheduler Guild | Create `workers`, `leases` tables |
|
||||
| 6 | PG-T2.2.5 | TODO | Depends on PG-T2.2.1 | Scheduler Guild | Create `job_history`, `metrics` tables |
|
||||
| 7 | PG-T2.3 | TODO | Depends on PG-T2.2 | Scheduler Guild | Implement `SchedulerDataSource` class |
|
||||
| 8 | PG-T2.4.1 | TODO | Depends on PG-T2.3 | Scheduler Guild | Implement `IJobRepository` with `FOR UPDATE SKIP LOCKED` |
|
||||
| 9 | PG-T2.4.2 | TODO | Depends on PG-T2.3 | Scheduler Guild | Implement `ITriggerRepository` with next-fire calculation |
|
||||
| 10 | PG-T2.4.3 | TODO | Depends on PG-T2.3 | Scheduler Guild | Implement `IWorkerRepository` for heartbeat tracking |
|
||||
| 11 | PG-T2.5.1 | TODO | Depends on PG-T2.3 | Scheduler Guild | Implement distributed lock using `pg_advisory_lock` |
|
||||
| 12 | PG-T2.5.2 | TODO | Depends on PG-T2.5.1 | Scheduler Guild | Implement `IDistributedLockRepository` interface |
|
||||
| 13 | PG-T2.6.1 | TODO | Depends on PG-T2.3 | Scheduler Guild | Implement `IJobHistoryRepository` |
|
||||
| 14 | PG-T2.6.2 | TODO | Depends on PG-T2.3 | Scheduler Guild | Implement `IMetricsRepository` |
|
||||
| 15 | PG-T2.7 | TODO | Depends on PG-T2.4-6 | Scheduler Guild | Add configuration switch in `ServiceCollectionExtensions` |
|
||||
| 16 | PG-T2.8.1 | TODO | Depends on PG-T2.7 | Scheduler Guild | Write integration tests for job queue operations |
|
||||
| 1 | PG-T2.1 | DONE | Completed in Phase 0 | Scheduler Guild | Create `StellaOps.Scheduler.Storage.Postgres` project structure |
|
||||
| 2 | PG-T2.2.1 | DONE | Completed in Phase 0 | Scheduler Guild | Create schema migration for `scheduler` schema |
|
||||
| 3 | PG-T2.2.2 | DONE | Completed in Phase 0 | Scheduler Guild | Create `jobs` table with status enum and indexes |
|
||||
| 4 | PG-T2.2.3 | DONE | Completed in Phase 0 | Scheduler Guild | Create `triggers` table with cron expression support |
|
||||
| 5 | PG-T2.2.4 | DONE | Completed in Phase 0 | Scheduler Guild | Create `workers`, `leases` tables |
|
||||
| 6 | PG-T2.2.5 | DONE | Completed in Phase 0 | Scheduler Guild | Create `job_history`, `metrics` tables |
|
||||
| 7 | PG-T2.3 | DONE | Completed in Phase 0 | Scheduler Guild | Implement `SchedulerDataSource` class |
|
||||
| 8 | PG-T2.4.1 | DONE | Completed in Phase 0 | Scheduler Guild | Implement `IJobRepository` with `FOR UPDATE SKIP LOCKED` |
|
||||
| 9 | PG-T2.4.2 | DONE | Completed 2025-11-29 | Scheduler Guild | Implement `ITriggerRepository` with next-fire calculation |
|
||||
| 10 | PG-T2.4.3 | DONE | Completed 2025-11-29 | Scheduler Guild | Implement `IWorkerRepository` for heartbeat tracking |
|
||||
| 11 | PG-T2.5.1 | DONE | Completed 2025-11-29 | Scheduler Guild | Implement distributed lock using `pg_advisory_lock` |
|
||||
| 12 | PG-T2.5.2 | DONE | Completed 2025-11-29 | Scheduler Guild | Implement `IDistributedLockRepository` interface |
|
||||
| 13 | PG-T2.6.1 | DONE | Completed 2025-11-29 | Scheduler Guild | Implement `IJobHistoryRepository` |
|
||||
| 14 | PG-T2.6.2 | DONE | Completed 2025-11-29 | Scheduler Guild | Implement `IMetricsRepository` |
|
||||
| 15 | PG-T2.7 | DONE | Completed 2025-11-29 | Scheduler Guild | Add configuration switch in `ServiceCollectionExtensions` |
|
||||
| 16 | PG-T2.8.1 | DONE | Completed 2025-11-29 | Scheduler Guild | Write integration tests for job queue operations |
|
||||
| 17 | PG-T2.8.2 | TODO | Depends on PG-T2.8.1 | Scheduler Guild | Write determinism tests for trigger calculations |
|
||||
| 18 | PG-T2.8.3 | TODO | Depends on PG-T2.8.1 | Scheduler Guild | Write concurrency tests for distributed locking |
|
||||
| 19 | PG-T2.9 | TODO | Depends on PG-T2.8 | Scheduler Guild | Run backfill from MongoDB to PostgreSQL |
|
||||
@@ -47,6 +47,9 @@
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-11-28 | Sprint file created | Planning |
|
||||
| 2025-11-29 | All repository implementations completed (PG-T2.1 through PG-T2.6.2) | Claude |
|
||||
| 2025-11-29 | ServiceCollectionExtensions updated with all repository registrations (PG-T2.7) | Claude |
|
||||
| 2025-11-29 | Integration tests created for Trigger, DistributedLock, Worker repositories (PG-T2.8.1) | Claude |
|
||||
|
||||
## Decisions & Risks
|
||||
- PostgreSQL advisory locks replace MongoDB distributed locks.
|
||||
|
||||
@@ -21,31 +21,31 @@
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | PG-T3.1 | TODO | Depends on PG-T0.7 | Notify Guild | Create `StellaOps.Notify.Storage.Postgres` project structure |
|
||||
| 2 | PG-T3.2.1 | TODO | Depends on PG-T3.1 | Notify Guild | Create schema migration for `notify` schema |
|
||||
| 3 | PG-T3.2.2 | TODO | Depends on PG-T3.2.1 | Notify Guild | Create `channels` table (email, slack, teams, webhook) |
|
||||
| 4 | PG-T3.2.3 | TODO | Depends on PG-T3.2.1 | Notify Guild | Create `rules`, `templates` tables |
|
||||
| 5 | PG-T3.2.4 | TODO | Depends on PG-T3.2.1 | Notify Guild | Create `deliveries` table with status tracking |
|
||||
| 6 | PG-T3.2.5 | TODO | Depends on PG-T3.2.1 | Notify Guild | Create `digests`, `quiet_hours`, `maintenance_windows` tables |
|
||||
| 7 | PG-T3.2.6 | TODO | Depends on PG-T3.2.1 | Notify Guild | Create `escalation_policies`, `escalation_states` tables |
|
||||
| 8 | PG-T3.2.7 | TODO | Depends on PG-T3.2.1 | Notify Guild | Create `on_call_schedules`, `inbox`, `incidents` tables |
|
||||
| 9 | PG-T3.3 | TODO | Depends on PG-T3.2 | Notify Guild | Implement `NotifyDataSource` class |
|
||||
| 10 | PG-T3.4.1 | TODO | Depends on PG-T3.3 | Notify Guild | Implement `IChannelRepository` |
|
||||
| 11 | PG-T3.4.2 | TODO | Depends on PG-T3.3 | Notify Guild | Implement `IRuleRepository` with filter JSONB |
|
||||
| 12 | PG-T3.4.3 | TODO | Depends on PG-T3.3 | Notify Guild | Implement `ITemplateRepository` with localization |
|
||||
| 13 | PG-T3.5.1 | TODO | Depends on PG-T3.3 | Notify Guild | Implement `IDeliveryRepository` with status transitions |
|
||||
| 14 | PG-T3.5.2 | TODO | Depends on PG-T3.3 | Notify Guild | Implement retry logic for failed deliveries |
|
||||
| 15 | PG-T3.6.1 | TODO | Depends on PG-T3.3 | Notify Guild | Implement `IDigestRepository` |
|
||||
| 16 | PG-T3.6.2 | TODO | Depends on PG-T3.3 | Notify Guild | Implement `IQuietHoursRepository` |
|
||||
| 17 | PG-T3.6.3 | TODO | Depends on PG-T3.3 | Notify Guild | Implement `IMaintenanceWindowRepository` |
|
||||
| 18 | PG-T3.7.1 | TODO | Depends on PG-T3.3 | Notify Guild | Implement `IEscalationPolicyRepository` |
|
||||
| 19 | PG-T3.7.2 | TODO | Depends on PG-T3.3 | Notify Guild | Implement `IEscalationStateRepository` |
|
||||
| 20 | PG-T3.7.3 | TODO | Depends on PG-T3.3 | Notify Guild | Implement `IOnCallScheduleRepository` |
|
||||
| 21 | PG-T3.8.1 | TODO | Depends on PG-T3.3 | Notify Guild | Implement `IInboxRepository` |
|
||||
| 22 | PG-T3.8.2 | TODO | Depends on PG-T3.3 | Notify Guild | Implement `IIncidentRepository` |
|
||||
| 23 | PG-T3.8.3 | TODO | Depends on PG-T3.3 | Notify Guild | Implement `IAuditRepository` |
|
||||
| 24 | PG-T3.9 | TODO | Depends on PG-T3.4-8 | Notify Guild | Add configuration switch in `ServiceCollectionExtensions` |
|
||||
| 25 | PG-T3.10.1 | TODO | Depends on PG-T3.9 | Notify Guild | Write integration tests for all repositories |
|
||||
| 1 | PG-T3.1 | DONE | Completed in Phase 0 | Notify Guild | Create `StellaOps.Notify.Storage.Postgres` project structure |
|
||||
| 2 | PG-T3.2.1 | DONE | Completed in Phase 0 | Notify Guild | Create schema migration for `notify` schema |
|
||||
| 3 | PG-T3.2.2 | DONE | Completed in Phase 0 | Notify Guild | Create `channels` table (email, slack, teams, webhook) |
|
||||
| 4 | PG-T3.2.3 | DONE | Completed in Phase 0 | Notify Guild | Create `rules`, `templates` tables |
|
||||
| 5 | PG-T3.2.4 | DONE | Completed in Phase 0 | Notify Guild | Create `deliveries` table with status tracking |
|
||||
| 6 | PG-T3.2.5 | DONE | Completed in Phase 0 | Notify Guild | Create `digests`, `quiet_hours`, `maintenance_windows` tables |
|
||||
| 7 | PG-T3.2.6 | DONE | Completed in Phase 0 | Notify Guild | Create `escalation_policies`, `escalation_states` tables |
|
||||
| 8 | PG-T3.2.7 | DONE | Completed in Phase 0 | Notify Guild | Create `on_call_schedules`, `inbox`, `incidents` tables |
|
||||
| 9 | PG-T3.3 | DONE | Completed in Phase 0 | Notify Guild | Implement `NotifyDataSource` class |
|
||||
| 10 | PG-T3.4.1 | DONE | Completed in Phase 0 | Notify Guild | Implement `IChannelRepository` |
|
||||
| 11 | PG-T3.4.2 | DONE | Completed 2025-11-29 | Notify Guild | Implement `IRuleRepository` with filter JSONB |
|
||||
| 12 | PG-T3.4.3 | DONE | Completed 2025-11-29 | Notify Guild | Implement `ITemplateRepository` with localization |
|
||||
| 13 | PG-T3.5.1 | DONE | Completed in Phase 0 | Notify Guild | Implement `IDeliveryRepository` with status transitions |
|
||||
| 14 | PG-T3.5.2 | DONE | Completed in Phase 0 | Notify Guild | Implement retry logic for failed deliveries |
|
||||
| 15 | PG-T3.6.1 | DONE | Completed 2025-11-29 | Notify Guild | Implement `IDigestRepository` |
|
||||
| 16 | PG-T3.6.2 | DONE | Completed 2025-11-29 | Notify Guild | Implement `IQuietHoursRepository` |
|
||||
| 17 | PG-T3.6.3 | DONE | Completed 2025-11-29 | Notify Guild | Implement `IMaintenanceWindowRepository` |
|
||||
| 18 | PG-T3.7.1 | DONE | Completed 2025-11-29 | Notify Guild | Implement `IEscalationPolicyRepository` |
|
||||
| 19 | PG-T3.7.2 | DONE | Completed 2025-11-29 | Notify Guild | Implement `IEscalationStateRepository` |
|
||||
| 20 | PG-T3.7.3 | DONE | Completed 2025-11-29 | Notify Guild | Implement `IOnCallScheduleRepository` |
|
||||
| 21 | PG-T3.8.1 | DONE | Completed 2025-11-29 | Notify Guild | Implement `IInboxRepository` |
|
||||
| 22 | PG-T3.8.2 | DONE | Completed 2025-11-29 | Notify Guild | Implement `IIncidentRepository` |
|
||||
| 23 | PG-T3.8.3 | DONE | Completed 2025-11-29 | Notify Guild | Implement `IAuditRepository` |
|
||||
| 24 | PG-T3.9 | DONE | Completed 2025-11-29 | Notify Guild | Add configuration switch in `ServiceCollectionExtensions` |
|
||||
| 25 | PG-T3.10.1 | DONE | Completed 2025-11-29 | Notify Guild | Write integration tests for all repositories |
|
||||
| 26 | PG-T3.10.2 | TODO | Depends on PG-T3.10.1 | Notify Guild | Test notification delivery flow end-to-end |
|
||||
| 27 | PG-T3.10.3 | TODO | Depends on PG-T3.10.1 | Notify Guild | Test escalation handling |
|
||||
| 28 | PG-T3.10.4 | TODO | Depends on PG-T3.10.1 | Notify Guild | Test digest aggregation |
|
||||
@@ -55,6 +55,9 @@
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-11-28 | Sprint file created | Planning |
|
||||
| 2025-11-29 | All repository implementations completed (PG-T3.1 through PG-T3.8.3) | Claude |
|
||||
| 2025-11-29 | ServiceCollectionExtensions updated with all repository registrations (PG-T3.9) | Claude |
|
||||
| 2025-11-29 | Integration tests created for Channel, Delivery, Rule, Template, Inbox, Digest, NotifyAudit repositories (PG-T3.10.1) | Claude |
|
||||
|
||||
## Decisions & Risks
|
||||
- Channel configurations stored as JSONB for flexibility across channel types.
|
||||
|
||||
@@ -21,26 +21,26 @@
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | PG-T4.1 | TODO | Depends on PG-T0.7 | Policy Guild | Create `StellaOps.Policy.Storage.Postgres` project structure |
|
||||
| 2 | PG-T4.2.1 | TODO | Depends on PG-T4.1 | Policy Guild | Create schema migration for `policy` schema |
|
||||
| 3 | PG-T4.2.2 | TODO | Depends on PG-T4.2.1 | Policy Guild | Create `packs`, `pack_versions` tables |
|
||||
| 4 | PG-T4.2.3 | TODO | Depends on PG-T4.2.1 | Policy Guild | Create `rules` table with Rego content |
|
||||
| 5 | PG-T4.2.4 | TODO | Depends on PG-T4.2.1 | Policy Guild | Create `risk_profiles` table with version history |
|
||||
| 6 | PG-T4.2.5 | TODO | Depends on PG-T4.2.1 | Policy Guild | Create `evaluation_runs`, `explanations` tables |
|
||||
| 7 | PG-T4.2.6 | TODO | Depends on PG-T4.2.1 | Policy Guild | Create `exceptions`, `audit` tables |
|
||||
| 8 | PG-T4.3 | TODO | Depends on PG-T4.2 | Policy Guild | Implement `PolicyDataSource` class |
|
||||
| 9 | PG-T4.4.1 | TODO | Depends on PG-T4.3 | Policy Guild | Implement `IPackRepository` with CRUD |
|
||||
| 10 | PG-T4.4.2 | TODO | Depends on PG-T4.3 | Policy Guild | Implement version management for packs |
|
||||
| 11 | PG-T4.4.3 | TODO | Depends on PG-T4.3 | Policy Guild | Implement active version promotion |
|
||||
| 12 | PG-T4.5.1 | TODO | Depends on PG-T4.3 | Policy Guild | Implement `IRiskProfileRepository` |
|
||||
| 13 | PG-T4.5.2 | TODO | Depends on PG-T4.3 | Policy Guild | Implement version history for risk profiles |
|
||||
| 14 | PG-T4.5.3 | TODO | Depends on PG-T4.3 | Policy Guild | Implement `GetVersionAsync` and `ListVersionsAsync` |
|
||||
| 15 | PG-T4.6.1 | TODO | Depends on PG-T4.3 | Policy Guild | Implement `IEvaluationRunRepository` |
|
||||
| 16 | PG-T4.6.2 | TODO | Depends on PG-T4.3 | Policy Guild | Implement `IExplanationRepository` |
|
||||
| 17 | PG-T4.6.3 | TODO | Depends on PG-T4.3 | Policy Guild | Implement `IExceptionRepository` |
|
||||
| 18 | PG-T4.6.4 | TODO | Depends on PG-T4.3 | Policy Guild | Implement `IAuditRepository` |
|
||||
| 19 | PG-T4.7 | TODO | Depends on PG-T4.4-6 | Policy Guild | Add configuration switch in `ServiceCollectionExtensions` |
|
||||
| 20 | PG-T4.8.1 | TODO | Depends on PG-T4.7 | Policy Guild | Write integration tests for all repositories |
|
||||
| 1 | PG-T4.1 | DONE | Completed in Phase 0 | Policy Guild | Create `StellaOps.Policy.Storage.Postgres` project structure |
|
||||
| 2 | PG-T4.2.1 | DONE | Completed in Phase 0 | Policy Guild | Create schema migration for `policy` schema |
|
||||
| 3 | PG-T4.2.2 | DONE | Completed in Phase 0 | Policy Guild | Create `packs`, `pack_versions` tables |
|
||||
| 4 | PG-T4.2.3 | DONE | Completed in Phase 0 | Policy Guild | Create `rules` table with Rego content |
|
||||
| 5 | PG-T4.2.4 | DONE | Completed in Phase 0 | Policy Guild | Create `risk_profiles` table with version history |
|
||||
| 6 | PG-T4.2.5 | DONE | Completed in Phase 0 | Policy Guild | Create `evaluation_runs`, `explanations` tables |
|
||||
| 7 | PG-T4.2.6 | DONE | Completed in Phase 0 | Policy Guild | Create `exceptions`, `audit` tables |
|
||||
| 8 | PG-T4.3 | DONE | Completed in Phase 0 | Policy Guild | Implement `PolicyDataSource` class |
|
||||
| 9 | PG-T4.4.1 | DONE | Completed in Phase 0 | Policy Guild | Implement `IPackRepository` with CRUD |
|
||||
| 10 | PG-T4.4.2 | DONE | Completed in Phase 0 | Policy Guild | Implement version management for packs |
|
||||
| 11 | PG-T4.4.3 | DONE | Completed in Phase 0 | Policy Guild | Implement active version promotion |
|
||||
| 12 | PG-T4.5.1 | DONE | Completed in Phase 0 | Policy Guild | Implement `IRiskProfileRepository` |
|
||||
| 13 | PG-T4.5.2 | DONE | Completed in Phase 0 | Policy Guild | Implement version history for risk profiles |
|
||||
| 14 | PG-T4.5.3 | DONE | Completed in Phase 0 | Policy Guild | Implement `GetVersionAsync` and `ListVersionsAsync` |
|
||||
| 15 | PG-T4.6.1 | DONE | Completed in Phase 0 | Policy Guild | Implement `IEvaluationRunRepository` |
|
||||
| 16 | PG-T4.6.2 | DONE | Completed 2025-11-29 | Policy Guild | Implement `IExplanationRepository` |
|
||||
| 17 | PG-T4.6.3 | DONE | Completed in Phase 0 | Policy Guild | Implement `IExceptionRepository` |
|
||||
| 18 | PG-T4.6.4 | DONE | Completed 2025-11-29 | Policy Guild | Implement `IAuditRepository` |
|
||||
| 19 | PG-T4.7 | DONE | Completed 2025-11-29 | Policy Guild | Add configuration switch in `ServiceCollectionExtensions` |
|
||||
| 20 | PG-T4.8.1 | DONE | Completed 2025-11-29 | Policy Guild | Write integration tests for all repositories |
|
||||
| 21 | PG-T4.8.2 | TODO | Depends on PG-T4.8.1 | Policy Guild | Test pack versioning workflow |
|
||||
| 22 | PG-T4.8.3 | TODO | Depends on PG-T4.8.1 | Policy Guild | Test risk profile version history |
|
||||
| 23 | PG-T4.9 | TODO | Depends on PG-T4.8 | Policy Guild | Export active packs from MongoDB |
|
||||
@@ -52,6 +52,9 @@
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-11-28 | Sprint file created | Planning |
|
||||
| 2025-11-29 | All repository implementations completed (PG-T4.1 through PG-T4.6.4) | Claude |
|
||||
| 2025-11-29 | ServiceCollectionExtensions updated with all repository registrations (PG-T4.7) | Claude |
|
||||
| 2025-11-29 | Integration tests created for Pack, Rule, Exception, EvaluationRun, RiskProfile, PolicyAudit repositories (PG-T4.8.1) | Claude |
|
||||
|
||||
## Decisions & Risks
|
||||
- Pack versions are immutable once published; new versions create new rows.
|
||||
|
||||
@@ -1,602 +0,0 @@
|
||||
Here’s a simple, low‑friction way to keep priorities fresh without constant manual grooming: **let confidence decay over time**.
|
||||
|
||||
%20=%20e^{-t/τ})
|
||||
|
||||
# Exponential confidence decay (what & why)
|
||||
|
||||
* **Idea:** Every item (task, lead, bug, doc, hypothesis) has a confidence score that **automatically shrinks with time** if you don’t touch it.
|
||||
* **Formula:** `confidence(t) = e^(−t/τ)` where `t` is days since last signal (edit, comment, commit, new data), and **τ (“tau”)** is the decay constant.
|
||||
* **Rule of thumb:** With **τ = 30 days**, at **t = 30** the confidence is **e^(−1) ≈ 0.37**—about a **63% drop**. This surfaces long‑ignored items *gradually*, not with harsh “stale/expired” flips.
|
||||
|
||||
# How to use it in practice
|
||||
|
||||
* **Signals that reset t → 0:** comment on the ticket, new benchmark, fresh log sample, doc update, CI run, new market news.
|
||||
* **Sort queues by:** `priority × confidence(t)` (or severity × confidence). Quiet items drift down; truly active ones stay up.
|
||||
* **Escalation bands:**
|
||||
|
||||
* `>0.6` = green (recently touched)
|
||||
* `0.3–0.6` = amber (review soon)
|
||||
* `<0.3` = red (poke or close)
|
||||
|
||||
# Quick presets
|
||||
|
||||
* **Fast‑moving queues (incidents, hot leads):** τ = **7–14** days
|
||||
* **Engineering tasks / product docs:** τ = **30** days
|
||||
* **Research bets / roadmaps:** τ = **60–90** days
|
||||
|
||||
# For your world (Stella Ops + ops/dev work)
|
||||
|
||||
* **Vuln tickets:** `risk_score = CVSS × reachability × e^(−t/30)`
|
||||
* **Roadmap epics:** `value_score = impact × e^(−t/60)` to re‑rank quarterly.
|
||||
* **Docs:** show a badge “freshness: 42%” derived from last edit age to nudge updates.
|
||||
|
||||
# Minimal implementation sketch
|
||||
|
||||
* Store per‑item: `last_signal_at`, `base_priority`.
|
||||
* Compute on read:
|
||||
|
||||
```
|
||||
days = (now - last_signal_at).days
|
||||
conf = exp(-days / tau)
|
||||
score = base_priority * conf
|
||||
```
|
||||
* Recompute in your API layer or materialize nightly; no cron spam needed.
|
||||
|
||||
If you want, I can draft a tiny C# helper (and SQL snippet) you can drop into your issue service to add `confidence(t)` and color bands to your lists.
|
||||
Perfect, let’s turn the idea into something your devs can actually build.
|
||||
|
||||
Below is an implementation plan you can drop into a ticketing/PRD — with clear phases, data model changes, APIs, and some sample code (C# + SQL). I’ll also sprinkle in Stella Ops–specific notes.
|
||||
|
||||
---
|
||||
|
||||
## 0. Scope & Objectives
|
||||
|
||||
**Goal:** Introduce `confidence(t)` as an automatic freshness factor that decays with time and is used to rank and highlight work.
|
||||
|
||||
We’ll apply it to:
|
||||
|
||||
* Vulnerabilities (Stella Ops)
|
||||
* General issues / tasks / epics
|
||||
* (Optional) Docs, leads, hypotheses later
|
||||
|
||||
**Core behavior:**
|
||||
|
||||
* Each item has:
|
||||
|
||||
* A base priority / risk (from severity, business impact, etc.)
|
||||
* A timestamp of last signal (meaningful activity)
|
||||
* A decay rate τ (tau) in days
|
||||
* Effective priority = `base_priority × confidence(t)`
|
||||
* `confidence(t) = exp(− t / τ)` where `t` = days since last_signal
|
||||
|
||||
---
|
||||
|
||||
## 1. Data Model Changes
|
||||
|
||||
### 1.1. Add fields to core “work item” tables
|
||||
|
||||
For each relevant table (`Issues`, `Vulnerabilities`, `Epics`, …):
|
||||
|
||||
**New columns:**
|
||||
|
||||
* `base_priority` (FLOAT or INT)
|
||||
|
||||
* Example: 1–100, or derived from severity.
|
||||
* `last_signal_at` (DATETIME, NOT NULL, default = `created_at`)
|
||||
* `tau_days` (FLOAT, nullable, falls back to type default)
|
||||
* (Optional) `confidence_score_cached` (FLOAT, for materialized score)
|
||||
* (Optional) `is_confidence_frozen` (BOOL, default FALSE)
|
||||
For pinned items that should not decay.
|
||||
|
||||
**Example Postgres migration (Issues):**
|
||||
|
||||
```sql
|
||||
ALTER TABLE issues
|
||||
ADD COLUMN base_priority DOUBLE PRECISION,
|
||||
ADD COLUMN last_signal_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
ADD COLUMN tau_days DOUBLE PRECISION,
|
||||
ADD COLUMN confidence_cached DOUBLE PRECISION,
|
||||
ADD COLUMN is_confidence_frozen BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
```
|
||||
|
||||
For Stella Ops:
|
||||
|
||||
```sql
|
||||
ALTER TABLE vulnerabilities
|
||||
ADD COLUMN base_risk DOUBLE PRECISION,
|
||||
ADD COLUMN last_signal_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
ADD COLUMN tau_days DOUBLE PRECISION,
|
||||
ADD COLUMN confidence_cached DOUBLE PRECISION,
|
||||
ADD COLUMN is_confidence_frozen BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
```
|
||||
|
||||
### 1.2. Add a config table for τ per entity type
|
||||
|
||||
```sql
|
||||
CREATE TABLE confidence_decay_config (
|
||||
id SERIAL PRIMARY KEY,
|
||||
entity_type TEXT NOT NULL, -- 'issue', 'vulnerability', 'epic', 'doc'
|
||||
tau_days_default DOUBLE PRECISION NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
INSERT INTO confidence_decay_config (entity_type, tau_days_default) VALUES
|
||||
('incident', 7),
|
||||
('vulnerability', 30),
|
||||
('issue', 30),
|
||||
('epic', 60),
|
||||
('doc', 90);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2. Define “signal” events & instrumentation
|
||||
|
||||
We need a standardized way to say: “this item got activity → reset last_signal_at”.
|
||||
|
||||
### 2.1. Signals that should reset `last_signal_at`
|
||||
|
||||
For **issues / epics:**
|
||||
|
||||
* New comment
|
||||
* Status change (e.g., Open → In Progress)
|
||||
* Field change that matters (severity, owner, milestone)
|
||||
* Attachment added
|
||||
* Link to PR added or updated
|
||||
* New CI failure linked
|
||||
|
||||
For **vulnerabilities (Stella Ops):**
|
||||
|
||||
* New scanner result attached or status updated (e.g., “Verified”, “False Positive”)
|
||||
* New evidence (PoC, exploit notes)
|
||||
* SLA override change
|
||||
* Assignment / ownership change
|
||||
* Integration events (e.g., PR merge that references the vuln)
|
||||
|
||||
For **docs (if you do it):**
|
||||
|
||||
* Any edit
|
||||
* Comment/annotation
|
||||
|
||||
### 2.2. Implement a shared helper to record a signal
|
||||
|
||||
**Service-level helper (pseudocode / C#-ish):**
|
||||
|
||||
```csharp
|
||||
public interface IConfidenceSignalService
|
||||
{
|
||||
Task RecordSignalAsync(WorkItemType type, Guid itemId, DateTime? signalTimeUtc = null);
|
||||
}
|
||||
|
||||
public class ConfidenceSignalService : IConfidenceSignalService
|
||||
{
|
||||
private readonly IWorkItemRepository _repo;
|
||||
private readonly IConfidenceConfigService _config;
|
||||
|
||||
public async Task RecordSignalAsync(WorkItemType type, Guid itemId, DateTime? signalTimeUtc = null)
|
||||
{
|
||||
var now = signalTimeUtc ?? DateTime.UtcNow;
|
||||
var item = await _repo.GetByIdAsync(type, itemId);
|
||||
if (item == null) return;
|
||||
|
||||
item.LastSignalAt = now;
|
||||
|
||||
if (item.TauDays == null)
|
||||
{
|
||||
item.TauDays = await _config.GetDefaultTauAsync(type);
|
||||
}
|
||||
|
||||
await _repo.UpdateAsync(item);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2.3. Wire signals into existing flows
|
||||
|
||||
Create small tasks for devs like:
|
||||
|
||||
* **ISS-01:** Call `RecordSignalAsync` on:
|
||||
|
||||
* New issue comment handler
|
||||
* Issue status update handler
|
||||
* Issue field update handler (severity/priority/owner)
|
||||
* **VULN-01:** Call `RecordSignalAsync` when:
|
||||
|
||||
* New scanner result ingested for a vuln
|
||||
* Vulnerability status, SLA, or owner changes
|
||||
* New exploit evidence is attached
|
||||
|
||||
---
|
||||
|
||||
## 3. Confidence & scoring calculation
|
||||
|
||||
### 3.1. Shared confidence function
|
||||
|
||||
Definition:
|
||||
|
||||
```csharp
|
||||
public static class ConfidenceMath
|
||||
{
|
||||
// t = days since last signal
|
||||
public static double ConfidenceScore(DateTime lastSignalAtUtc, double tauDays, DateTime? nowUtc = null)
|
||||
{
|
||||
var now = nowUtc ?? DateTime.UtcNow;
|
||||
var tDays = (now - lastSignalAtUtc).TotalDays;
|
||||
|
||||
if (tDays <= 0) return 1.0;
|
||||
if (tauDays <= 0) return 1.0; // guard / fallback
|
||||
|
||||
var score = Math.Exp(-tDays / tauDays);
|
||||
|
||||
// Optional: never drop below a tiny floor, so items never "disappear"
|
||||
const double floor = 0.01;
|
||||
return Math.Max(score, floor);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3.2. Effective priority formulas
|
||||
|
||||
**Generic issues / tasks:**
|
||||
|
||||
```csharp
|
||||
double effectiveScore = issue.BasePriority * ConfidenceMath.ConfidenceScore(issue.LastSignalAt, issue.TauDays ?? defaultTau);
|
||||
```
|
||||
|
||||
**Vulnerabilities (Stella Ops):**
|
||||
|
||||
Let’s define:
|
||||
|
||||
* `severity_weight`: map CVSS or severity string to numeric (e.g. Critical=100, High=80, Medium=50, Low=20).
|
||||
* `reachability`: 0–1 (e.g. from your reachability analysis).
|
||||
* `exploitability`: 0–1 (optional, based on known exploits).
|
||||
* `confidence`: as above.
|
||||
|
||||
```csharp
|
||||
double baseRisk = severityWeight * reachability * exploitability; // or simpler: severityWeight * reachability
|
||||
double conf = ConfidenceMath.ConfidenceScore(vuln.LastSignalAt, vuln.TauDays ?? defaultTau);
|
||||
double effectiveRisk = baseRisk * conf;
|
||||
```
|
||||
|
||||
Store `baseRisk` → `vulnerabilities.base_risk`, and compute `effectiveRisk` on the fly or via job.
|
||||
|
||||
### 3.3. SQL implementation (optional for server-side sorting)
|
||||
|
||||
**Postgres example:**
|
||||
|
||||
```sql
|
||||
-- t_days = age in days
|
||||
-- tau = tau_days
|
||||
-- score = exp(-t_days / tau)
|
||||
|
||||
SELECT
|
||||
i.*,
|
||||
i.base_priority *
|
||||
GREATEST(
|
||||
EXP(- EXTRACT(EPOCH FROM (NOW() - i.last_signal_at)) / (86400 * COALESCE(i.tau_days, 30))),
|
||||
0.01
|
||||
) AS effective_priority
|
||||
FROM issues i
|
||||
ORDER BY effective_priority DESC;
|
||||
```
|
||||
|
||||
You can wrap that in a view:
|
||||
|
||||
```sql
|
||||
CREATE VIEW issues_with_confidence AS
|
||||
SELECT
|
||||
i.*,
|
||||
GREATEST(
|
||||
EXP(- EXTRACT(EPOCH FROM (NOW() - i.last_signal_at)) / (86400 * COALESCE(i.tau_days, 30))),
|
||||
0.01
|
||||
) AS confidence,
|
||||
i.base_priority *
|
||||
GREATEST(
|
||||
EXP(- EXTRACT(EPOCH FROM (NOW() - i.last_signal_at)) / (86400 * COALESCE(i.tau_days, 30))),
|
||||
0.01
|
||||
) AS effective_priority
|
||||
FROM issues i;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4. Caching & performance
|
||||
|
||||
You have two options:
|
||||
|
||||
### 4.1. Compute on read (simplest to start)
|
||||
|
||||
* Use the helper function in your service layer or a DB view.
|
||||
* Pros:
|
||||
|
||||
* No jobs, always fresh.
|
||||
* Cons:
|
||||
|
||||
* Slight CPU cost on heavy lists.
|
||||
|
||||
**Plan:** Start with this. If you see perf issues, move to 4.2.
|
||||
|
||||
### 4.2. Periodic materialization job (optional later)
|
||||
|
||||
Add a scheduled job (e.g. hourly) that:
|
||||
|
||||
1. Selects all active items.
|
||||
2. Computes `confidence_score` and `effective_priority`.
|
||||
3. Writes to `confidence_cached` and `effective_priority_cached` (if you add such a column).
|
||||
|
||||
Service then sorts by cached values.
|
||||
|
||||
---
|
||||
|
||||
## 5. Backfill & migration
|
||||
|
||||
### 5.1. Initial backfill script
|
||||
|
||||
For existing records:
|
||||
|
||||
* If `last_signal_at` is NULL → set to `created_at`.
|
||||
* Derive `base_priority` / `base_risk` from existing severity fields.
|
||||
* Set `tau_days` from config.
|
||||
|
||||
**Example:**
|
||||
|
||||
```sql
|
||||
UPDATE issues
|
||||
SET last_signal_at = created_at
|
||||
WHERE last_signal_at IS NULL;
|
||||
|
||||
UPDATE issues
|
||||
SET base_priority = CASE severity
|
||||
WHEN 'critical' THEN 100
|
||||
WHEN 'high' THEN 80
|
||||
WHEN 'medium' THEN 50
|
||||
WHEN 'low' THEN 20
|
||||
ELSE 10
|
||||
END
|
||||
WHERE base_priority IS NULL;
|
||||
|
||||
UPDATE issues i
|
||||
SET tau_days = c.tau_days_default
|
||||
FROM confidence_decay_config c
|
||||
WHERE c.entity_type = 'issue'
|
||||
AND i.tau_days IS NULL;
|
||||
```
|
||||
|
||||
Do similarly for `vulnerabilities` using severity / CVSS.
|
||||
|
||||
### 5.2. Sanity checks
|
||||
|
||||
Add a small script/test to verify:
|
||||
|
||||
* Newly created items → `confidence ≈ 1.0`.
|
||||
* 30-day-old items with τ=30 → `confidence ≈ 0.37`.
|
||||
* Ordering changes when you edit/comment on items.
|
||||
|
||||
---
|
||||
|
||||
## 6. API & Query Layer
|
||||
|
||||
### 6.1. New sorting options
|
||||
|
||||
Update list APIs:
|
||||
|
||||
* Accept parameter: `sort=effective_priority` or `sort=confidence`.
|
||||
* Default sort for some views:
|
||||
|
||||
* Vulnerabilities backlog: `sort=effective_risk` (risk × confidence).
|
||||
* Issues backlog: `sort=effective_priority`.
|
||||
|
||||
**Example REST API contract:**
|
||||
|
||||
`GET /api/issues?sort=effective_priority&state=open`
|
||||
|
||||
**Response fields (additions):**
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "ISS-123",
|
||||
"title": "Fix login bug",
|
||||
"base_priority": 80,
|
||||
"last_signal_at": "2025-11-01T10:00:00Z",
|
||||
"tau_days": 30,
|
||||
"confidence": 0.63,
|
||||
"effective_priority": 50.4,
|
||||
"confidence_band": "amber"
|
||||
}
|
||||
```
|
||||
|
||||
### 6.2. Confidence banding (for UI)
|
||||
|
||||
Define bands server-side (easy to change):
|
||||
|
||||
* Green: `confidence >= 0.6`
|
||||
* Amber: `0.3 ≤ confidence < 0.6`
|
||||
* Red: `confidence < 0.3`
|
||||
|
||||
You can compute on server:
|
||||
|
||||
```csharp
|
||||
string ConfidenceBand(double confidence) =>
|
||||
confidence >= 0.6 ? "green"
|
||||
: confidence >= 0.3 ? "amber"
|
||||
: "red";
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7. UI / UX changes
|
||||
|
||||
### 7.1. List views (issues / vulns / epics)
|
||||
|
||||
For each item row:
|
||||
|
||||
* Show a small freshness pill:
|
||||
|
||||
* Text: `Active`, `Review soon`, `Stale`
|
||||
* Derived from confidence band.
|
||||
* Tooltip:
|
||||
|
||||
* “Confidence 78%. Last activity 3 days ago. τ = 30 days.”
|
||||
|
||||
* Sort default: by `effective_priority` / `effective_risk`.
|
||||
|
||||
* Filters:
|
||||
|
||||
* `Freshness: [All | Active | Review soon | Stale]`
|
||||
* Optionally: “Show stale only” toggle.
|
||||
|
||||
**Example labels:**
|
||||
|
||||
* Green: “Active (confidence 82%)”
|
||||
* Amber: “Review soon (confidence 45%)”
|
||||
* Red: “Stale (confidence 18%)”
|
||||
|
||||
### 7.2. Detail views
|
||||
|
||||
On an issue / vuln page:
|
||||
|
||||
* Add a “Confidence” section:
|
||||
|
||||
* “Confidence: **52%**”
|
||||
* “Last signal: **12 days ago**”
|
||||
* “Decay τ: **30 days**”
|
||||
* “Effective priority: **Base 80 × 0.52 = 42**”
|
||||
|
||||
* (Optional) small mini-chart (text-only or simple bar) showing approximate decay, but not necessary for first iteration.
|
||||
|
||||
### 7.3. Admin / settings UI
|
||||
|
||||
Add an internal settings page:
|
||||
|
||||
* Table of entity types with editable τ:
|
||||
|
||||
| Entity type | τ (days) | Notes |
|
||||
| ------------- | -------- | ---------------------------- |
|
||||
| Incident | 7 | Fast-moving |
|
||||
| Vulnerability | 30 | Standard risk review cadence |
|
||||
| Issue | 30 | Sprint-level decay |
|
||||
| Epic | 60 | Quarterly |
|
||||
| Doc | 90 | Slow decay |
|
||||
|
||||
* Optionally: toggle to pin item (`is_confidence_frozen`) from UI.
|
||||
|
||||
---
|
||||
|
||||
## 8. Stella Ops–specific behavior
|
||||
|
||||
For vulnerabilities:
|
||||
|
||||
### 8.1. Base risk calculation
|
||||
|
||||
Ingested fields you likely already have:
|
||||
|
||||
* `cvss_score` or `severity`
|
||||
* `reachable` (true/false or numeric)
|
||||
* (Optional) `exploit_available` (bool) or exploitability score
|
||||
* `asset_criticality` (1–5)
|
||||
|
||||
Define `base_risk` as:
|
||||
|
||||
```text
|
||||
severity_weight = f(cvss_score or severity)
|
||||
reachability = reachable ? 1.0 : 0.5 -- example
|
||||
exploitability = exploit_available ? 1.0 : 0.7
|
||||
asset_factor = 0.5 + 0.1 * asset_criticality -- 1 → 1.0, 5 → 1.5
|
||||
|
||||
base_risk = severity_weight * reachability * exploitability * asset_factor
|
||||
```
|
||||
|
||||
Store `base_risk` on vuln row.
|
||||
|
||||
Then:
|
||||
|
||||
```text
|
||||
effective_risk = base_risk * confidence(t)
|
||||
```
|
||||
|
||||
Use `effective_risk` for backlog ordering and SLAs dashboards.
|
||||
|
||||
### 8.2. Signals for vulns
|
||||
|
||||
Make sure these all call `RecordSignalAsync(Vulnerability, vulnId)`:
|
||||
|
||||
* New scan result for same vuln (re-detected).
|
||||
* Change status to “In Progress”, “Ready for Deploy”, “Verified Fixed”, etc.
|
||||
* Assigning an owner.
|
||||
* Attaching PoC / exploit details.
|
||||
|
||||
### 8.3. Vuln UI copy ideas
|
||||
|
||||
* Pill text:
|
||||
|
||||
* “Risk: 850 (confidence 68%)”
|
||||
* “Last analyst activity 11 days ago”
|
||||
|
||||
* In backlog view: show **Effective Risk** as main sort, with a smaller subtext “Base 1200 × Confidence 71%”.
|
||||
|
||||
---
|
||||
|
||||
## 9. Rollout plan
|
||||
|
||||
### Phase 1 – Infrastructure (backend-only)
|
||||
|
||||
* [ ] DB migrations & config table
|
||||
* [ ] Implement `ConfidenceMath` and helper functions
|
||||
* [ ] Implement `IConfidenceSignalService`
|
||||
* [ ] Wire signals into key flows (comments, state changes, scanner ingestion)
|
||||
* [ ] Add `confidence` and `effective_priority/risk` to API responses
|
||||
* [ ] Backfill script + dry run in staging
|
||||
|
||||
### Phase 2 – Internal UI & feature flag
|
||||
|
||||
* [ ] Add optional sorting by effective score to internal/staff views
|
||||
* [ ] Add confidence pill (hidden behind feature flag `confidence_decay_v1`)
|
||||
* [ ] Dogfood internally:
|
||||
|
||||
* Do items bubble up/down as expected?
|
||||
* Are any items “disappearing” because decay is too aggressive?
|
||||
|
||||
### Phase 3 – Parameter tuning
|
||||
|
||||
* [ ] Adjust τ per type based on feedback:
|
||||
|
||||
* If things decay too fast → increase τ
|
||||
* If queues rarely change → decrease τ
|
||||
* [ ] Decide on confidence floor (0.01? 0.05?) so nothing goes to literal 0.
|
||||
|
||||
### Phase 4 – General release
|
||||
|
||||
* [ ] Make effective score the default sort for key views:
|
||||
|
||||
* Vulnerabilities backlog
|
||||
* Issues backlog
|
||||
* [ ] Document behavior for users (help center / inline tooltip)
|
||||
* [ ] Add admin UI to tweak τ per entity type.
|
||||
|
||||
---
|
||||
|
||||
## 10. Edge cases & safeguards
|
||||
|
||||
* **New items**
|
||||
|
||||
* `last_signal_at = created_at`, confidence = 1.0.
|
||||
* **Pinned items**
|
||||
|
||||
* If `is_confidence_frozen = true` → treat confidence as 1.0.
|
||||
* **Items without τ**
|
||||
|
||||
* Always fallback to entity type default.
|
||||
* **Timezones**
|
||||
|
||||
* Always store & compute in UTC.
|
||||
* **Very old items**
|
||||
|
||||
* Floor the confidence so they’re still visible when explicitly searched.
|
||||
|
||||
---
|
||||
|
||||
If you want, I can turn this into:
|
||||
|
||||
* A short **technical design doc** (with sections: Problem, Proposal, Alternatives, Rollout).
|
||||
* Or a **set of Jira tickets** grouped by backend / frontend / infra that your team can pick up directly.
|
||||
@@ -0,0 +1,402 @@
|
||||
# CLI Developer Experience and Command UX
|
||||
|
||||
**Version:** 1.0
|
||||
**Date:** 2025-11-29
|
||||
**Status:** Canonical
|
||||
|
||||
This advisory defines the product rationale, command surface design, and implementation strategy for the Stella Ops CLI, covering developer experience, CI/CD integration, output formatting, and offline operation.
|
||||
|
||||
---
|
||||
|
||||
## 1. Executive Summary
|
||||
|
||||
The Stella Ops CLI is the **primary interface for developers and CI/CD pipelines** interacting with the platform. Key capabilities:
|
||||
|
||||
- **Native AOT Binary** - Sub-20ms startup, single binary distribution
|
||||
- **DPoP-Bound Authentication** - Secure device-code and service principal flows
|
||||
- **Deterministic Outputs** - JSON/table modes with stable exit codes for CI
|
||||
- **Buildx Integration** - SBOM generation at build time
|
||||
- **Offline Kit Management** - Air-gapped deployment support
|
||||
- **Shell Completions** - Bash/Zsh/Fish/PowerShell auto-complete
|
||||
|
||||
---
|
||||
|
||||
## 2. Market Drivers
|
||||
|
||||
### 2.1 Target Segments
|
||||
|
||||
| Segment | CLI Requirements | Use Case |
|
||||
|---------|-----------------|----------|
|
||||
| **DevSecOps** | CI integration, exit codes, JSON output | Pipeline gates |
|
||||
| **Security Engineers** | Verification commands, policy testing | Audit workflows |
|
||||
| **Platform Operators** | Offline kit, admin commands | Air-gap management |
|
||||
| **Developers** | Scan commands, buildx integration | Local development |
|
||||
|
||||
### 2.2 Competitive Positioning
|
||||
|
||||
Most CLI tools in the vulnerability space are slow or lack CI ergonomics. Stella Ops differentiates with:
|
||||
- **Native AOT** for instant startup (< 20ms vs 500ms+ for JIT)
|
||||
- **Deterministic exit codes** (12 distinct codes for CI decision trees)
|
||||
- **DPoP security** (no long-lived tokens on disk)
|
||||
- **Unified command surface** (50+ commands, consistent patterns)
|
||||
- **Offline-first design** (works without network in sealed mode)
|
||||
|
||||
---
|
||||
|
||||
## 3. Command Surface Architecture
|
||||
|
||||
### 3.1 Command Categories
|
||||
|
||||
| Category | Commands | Purpose |
|
||||
|----------|----------|---------|
|
||||
| **Auth** | `login`, `logout`, `status`, `token` | Authentication management |
|
||||
| **Scan** | `scan image`, `scan fs` | Vulnerability scanning |
|
||||
| **Export** | `export sbom`, `report final` | Artifact retrieval |
|
||||
| **Verify** | `verify attestation`, `verify referrers`, `verify image-signature` | Cryptographic verification |
|
||||
| **Policy** | `policy get`, `policy set`, `policy apply` | Policy management |
|
||||
| **Buildx** | `buildx install`, `buildx verify`, `buildx build` | Build-time SBOM |
|
||||
| **Runtime** | `runtime policy test` | Zastava integration |
|
||||
| **Offline** | `offline kit pull`, `offline kit import`, `offline kit status` | Air-gap operations |
|
||||
| **Decision** | `decision export`, `decision verify`, `decision compare` | VEX evidence management |
|
||||
| **AOC** | `sources ingest`, `aoc verify` | Aggregation-only guards |
|
||||
| **KMS** | `kms export`, `kms import` | Key management |
|
||||
| **Advise** | `advise run` | AI-powered advisory summaries |
|
||||
|
||||
### 3.2 Output Modes
|
||||
|
||||
**Human Mode (default):**
|
||||
```
|
||||
$ stella scan image nginx:latest --wait
|
||||
Scanning nginx:latest...
|
||||
Found 12 vulnerabilities (2 critical, 3 high, 5 medium, 2 low)
|
||||
Policy verdict: FAIL
|
||||
|
||||
Critical:
|
||||
- CVE-2025-12345 in openssl (fixed in 3.0.14)
|
||||
- CVE-2025-12346 in libcurl (no fix available)
|
||||
|
||||
See: https://ui.internal/scans/sha256:abc123...
|
||||
```
|
||||
|
||||
**JSON Mode (`--json`):**
|
||||
```json
|
||||
{"event":"scan.complete","status":"fail","critical":2,"high":3,"medium":5,"low":2,"url":"https://..."}
|
||||
```
|
||||
|
||||
### 3.3 Exit Codes
|
||||
|
||||
| Code | Meaning | CI Action |
|
||||
|------|---------|-----------|
|
||||
| 0 | Success | Continue |
|
||||
| 2 | Policy fail | Block deployment |
|
||||
| 3 | Verification failed | Security alert |
|
||||
| 4 | Auth error | Re-authenticate |
|
||||
| 5 | Resource not found | Check inputs |
|
||||
| 6 | Rate limited | Retry with backoff |
|
||||
| 7 | Backend unavailable | Retry |
|
||||
| 9 | Invalid arguments | Fix command |
|
||||
| 11-17 | AOC guard violations | Review ingestion |
|
||||
| 18 | Verification truncated | Increase limit |
|
||||
| 70 | Transport failure | Check network |
|
||||
| 71 | Usage error | Fix command |
|
||||
|
||||
---
|
||||
|
||||
## 4. Authentication Model
|
||||
|
||||
### 4.1 Device Code Flow (Interactive)
|
||||
|
||||
```bash
|
||||
$ stella auth login
|
||||
Opening browser for authentication...
|
||||
Device code: ABCD-EFGH
|
||||
Waiting for authorization...
|
||||
Logged in as user@example.com (tenant: acme-corp)
|
||||
```
|
||||
|
||||
### 4.2 Service Principal (CI/CD)
|
||||
|
||||
```bash
|
||||
$ stella auth login --client-credentials \
|
||||
--client-id $STELLA_CLIENT_ID \
|
||||
--private-key $STELLA_PRIVATE_KEY
|
||||
```
|
||||
|
||||
### 4.3 DPoP Key Management
|
||||
|
||||
- Ephemeral Ed25519 keypair generated on first login
|
||||
- Stored in OS keychain (Keychain/DPAPI/KWallet/Gnome Keyring)
|
||||
- Every request includes DPoP proof header
|
||||
- Tokens refreshed proactively (30s before expiry)
|
||||
|
||||
### 4.4 Token Credential Helper
|
||||
|
||||
```bash
|
||||
# Get one-shot token for curl/scripts
|
||||
TOKEN=$(stella auth token --aud scanner)
|
||||
curl -H "Authorization: Bearer $TOKEN" https://scanner.internal/api/...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. Buildx Integration
|
||||
|
||||
### 5.1 Generator Installation
|
||||
|
||||
```bash
|
||||
$ stella buildx install
|
||||
Installing SBOM generator plugin...
|
||||
Verifying signature: OK
|
||||
Generator installed at ~/.docker/cli-plugins/docker-buildx-stellaops
|
||||
|
||||
$ stella buildx verify
|
||||
Docker version: 24.0.7
|
||||
Buildx version: 0.12.1
|
||||
Generator: stellaops/sbom-indexer:v1.2.3@sha256:abc123...
|
||||
Status: Ready
|
||||
```
|
||||
|
||||
### 5.2 Build with SBOM
|
||||
|
||||
```bash
|
||||
$ stella buildx build -t myapp:v1.0.0 --push --attest
|
||||
Building myapp:v1.0.0...
|
||||
SBOM generation: enabled (stellaops/sbom-indexer)
|
||||
Provenance: enabled
|
||||
Attestation: requested
|
||||
|
||||
Build complete!
|
||||
Image: myapp:v1.0.0@sha256:def456...
|
||||
SBOM: attached as referrer
|
||||
Attestation: logged to Rekor (uuid: abc123)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6. Implementation Strategy
|
||||
|
||||
### 6.1 Phase 1: Core Commands (Complete)
|
||||
|
||||
- [x] Auth commands with DPoP
|
||||
- [x] Scan/export commands
|
||||
- [x] JSON output mode
|
||||
- [x] Exit code standardization
|
||||
- [x] Shell completions
|
||||
|
||||
### 6.2 Phase 2: Buildx & Verification (Complete)
|
||||
|
||||
- [x] Buildx plugin management
|
||||
- [x] Attestation verification
|
||||
- [x] Referrer verification
|
||||
- [x] Report commands
|
||||
|
||||
### 6.3 Phase 3: Advanced Features (In Progress)
|
||||
|
||||
- [x] Decision export/verify commands
|
||||
- [x] AOC guard helpers
|
||||
- [x] KMS management
|
||||
- [ ] Advisory AI integration (CLI-ADVISE-48-001)
|
||||
- [ ] Filesystem scanning (CLI-SCAN-49-001)
|
||||
|
||||
### 6.4 Phase 4: Distribution (Planned)
|
||||
|
||||
- [ ] Homebrew formula
|
||||
- [ ] Scoop/Winget manifests
|
||||
- [ ] Self-update mechanism
|
||||
- [ ] Cosign signature verification
|
||||
|
||||
---
|
||||
|
||||
## 7. CI/CD Integration Patterns
|
||||
|
||||
### 7.1 GitHub Actions
|
||||
|
||||
```yaml
|
||||
- name: Install Stella CLI
|
||||
run: |
|
||||
curl -sSL https://get.stella-ops.io | sh
|
||||
echo "$HOME/.stella/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Authenticate
|
||||
run: stella auth login --client-credentials
|
||||
env:
|
||||
STELLAOPS_CLIENT_ID: ${{ secrets.STELLA_CLIENT_ID }}
|
||||
STELLAOPS_PRIVATE_KEY: ${{ secrets.STELLA_PRIVATE_KEY }}
|
||||
|
||||
- name: Scan Image
|
||||
run: |
|
||||
stella scan image ${{ env.IMAGE_REF }} --wait --json > scan-results.json
|
||||
if [ $? -eq 2 ]; then
|
||||
echo "::error::Policy failed - blocking deployment"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Verify Attestation
|
||||
run: stella verify attestation --artifact ${{ env.IMAGE_DIGEST }}
|
||||
```
|
||||
|
||||
### 7.2 GitLab CI
|
||||
|
||||
```yaml
|
||||
scan:
|
||||
script:
|
||||
- stella auth login --client-credentials
|
||||
- stella buildx install
|
||||
- docker buildx build --attest=type=sbom,generator=stellaops/sbom-indexer -t $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA .
|
||||
- stella scan image $CI_REGISTRY_IMAGE@$IMAGE_DIGEST --wait --json
|
||||
artifacts:
|
||||
reports:
|
||||
container_scanning: scan-results.json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 8. Configuration Model
|
||||
|
||||
### 8.1 Precedence
|
||||
|
||||
CLI flags > Environment variables > Config file > Defaults
|
||||
|
||||
### 8.2 Config File
|
||||
|
||||
```yaml
|
||||
# ~/.config/stellaops/config.yaml
|
||||
cli:
|
||||
authority: "https://authority.example.com"
|
||||
backend:
|
||||
scanner: "https://scanner.example.com"
|
||||
attestor: "https://attestor.example.com"
|
||||
auth:
|
||||
deviceCode: true
|
||||
audienceDefault: "scanner"
|
||||
output:
|
||||
json: false
|
||||
color: auto
|
||||
tls:
|
||||
caBundle: "/etc/ssl/certs/ca-bundle.crt"
|
||||
offline:
|
||||
kitMirror: "s3://mirror/stellaops-kit"
|
||||
```
|
||||
|
||||
### 8.3 Environment Variables
|
||||
|
||||
| Variable | Purpose |
|
||||
|----------|---------|
|
||||
| `STELLAOPS_AUTHORITY` | Authority URL |
|
||||
| `STELLAOPS_SCANNER_URL` | Scanner service URL |
|
||||
| `STELLAOPS_CLIENT_ID` | Service principal ID |
|
||||
| `STELLAOPS_PRIVATE_KEY` | Service principal key |
|
||||
| `STELLAOPS_TENANT` | Default tenant |
|
||||
| `STELLAOPS_JSON` | Enable JSON output |
|
||||
|
||||
---
|
||||
|
||||
## 9. Offline Operation
|
||||
|
||||
### 9.1 Sealed Mode Detection
|
||||
|
||||
```bash
|
||||
$ stella scan image nginx:latest
|
||||
Error: Sealed mode active - external network access blocked
|
||||
Remediation: Import offline kit or disable sealed mode
|
||||
|
||||
$ stella offline kit import latest-kit.tar.gz
|
||||
Importing offline kit...
|
||||
Advisories: 45,230 records
|
||||
VEX documents: 12,450 records
|
||||
Policy packs: 3 bundles
|
||||
Import complete!
|
||||
|
||||
$ stella scan image nginx:latest
|
||||
Scanning with offline data (2025-11-28)...
|
||||
```
|
||||
|
||||
### 9.2 Air-Gap Guard
|
||||
|
||||
All HTTP flows route through `StellaOps.AirGap.Policy`. When sealed mode is active:
|
||||
- External egress is blocked with `AIRGAP_EGRESS_BLOCKED` error
|
||||
- CLI provides clear remediation guidance
|
||||
- Local verification continues to work
|
||||
|
||||
---
|
||||
|
||||
## 10. Security Considerations
|
||||
|
||||
### 10.1 Credential Protection
|
||||
|
||||
- DPoP private keys stored in OS keychain only
|
||||
- No plaintext tokens on disk
|
||||
- Short-lived OpToks held in memory only
|
||||
- Authorization headers redacted from verbose logs
|
||||
|
||||
### 10.2 Binary Verification
|
||||
|
||||
```bash
|
||||
# Verify CLI binary signature
|
||||
$ stella version --verify
|
||||
Version: 1.2.3
|
||||
Built: 2025-11-29T12:00:00Z
|
||||
Signature: Valid (cosign)
|
||||
Signer: release@stella-ops.io
|
||||
```
|
||||
|
||||
### 10.3 Hard Lines
|
||||
|
||||
- Refuse to print token values
|
||||
- Disallow `--insecure` without explicit env var opt-in
|
||||
- Enforce short token TTL with proactive refresh
|
||||
- Device-code cache bound to machine + user
|
||||
|
||||
---
|
||||
|
||||
## 11. Performance Targets
|
||||
|
||||
| Metric | Target |
|
||||
|--------|--------|
|
||||
| Startup time | < 20ms (AOT) |
|
||||
| Request overhead | < 5ms |
|
||||
| Large download (100MB) | > 80 MB/s |
|
||||
| Buildx wrapper overhead | < 1ms |
|
||||
|
||||
---
|
||||
|
||||
## 12. Related Documentation
|
||||
|
||||
| Resource | Location |
|
||||
|----------|----------|
|
||||
| CLI architecture | `docs/modules/cli/architecture.md` |
|
||||
| Policy CLI guide | `docs/modules/cli/guides/policy.md` |
|
||||
| API/CLI reference | `docs/09_API_CLI_REFERENCE.md` |
|
||||
| Offline operation | `docs/24_OFFLINE_KIT.md` |
|
||||
|
||||
---
|
||||
|
||||
## 13. Sprint Mapping
|
||||
|
||||
- **Primary Sprint:** SPRINT_0400_cli_ux.md (NEW)
|
||||
- **Related Sprints:**
|
||||
- SPRINT_210_ui_ii.md (UI integration)
|
||||
- SPRINT_0187_0001_0001_evidence_locker_cli_integration.md (Evidence CLI)
|
||||
|
||||
**Key Task IDs:**
|
||||
- `CLI-AUTH-10-001` - DPoP authentication (DONE)
|
||||
- `CLI-SCAN-20-001` - Scan commands (DONE)
|
||||
- `CLI-BUILDX-30-001` - Buildx integration (DONE)
|
||||
- `CLI-ADVISE-48-001` - Advisory AI commands (IN PROGRESS)
|
||||
- `CLI-SCAN-49-001` - Filesystem scanning (TODO)
|
||||
|
||||
---
|
||||
|
||||
## 14. Success Metrics
|
||||
|
||||
| Metric | Target |
|
||||
|--------|--------|
|
||||
| Startup latency | < 20ms p99 |
|
||||
| CI adoption | 80% of pipelines use CLI |
|
||||
| Exit code coverage | 100% of failure modes |
|
||||
| Shell completion coverage | 100% of commands |
|
||||
| Offline operation success | Works without network |
|
||||
|
||||
---
|
||||
|
||||
*Last updated: 2025-11-29*
|
||||
@@ -0,0 +1,476 @@
|
||||
# Concelier Advisory Ingestion Model
|
||||
|
||||
**Version:** 1.0
|
||||
**Date:** 2025-11-29
|
||||
**Status:** Canonical
|
||||
|
||||
This advisory defines the product rationale, ingestion semantics, and implementation strategy for the Concelier module, covering the Link-Not-Merge model, connector pipelines, observation storage, and deterministic exports.
|
||||
|
||||
---
|
||||
|
||||
## 1. Executive Summary
|
||||
|
||||
Concelier is the **advisory ingestion engine** that acquires, normalizes, and correlates vulnerability advisories from authoritative sources. Key capabilities:
|
||||
|
||||
- **Aggregation-Only Contract** - No derived semantics in ingestion
|
||||
- **Link-Not-Merge** - Observations correlated, never merged
|
||||
- **Multi-Source Connectors** - Vendor PSIRTs, distros, OSS ecosystems
|
||||
- **Deterministic Exports** - Reproducible JSON, Trivy DB bundles
|
||||
- **Conflict Detection** - Structured payloads for divergent claims
|
||||
|
||||
---
|
||||
|
||||
## 2. Market Drivers
|
||||
|
||||
### 2.1 Target Segments
|
||||
|
||||
| Segment | Ingestion Requirements | Use Case |
|
||||
|---------|------------------------|----------|
|
||||
| **Security Teams** | Authoritative data | Accurate vulnerability assessment |
|
||||
| **Compliance** | Provenance tracking | Audit trail for advisory sources |
|
||||
| **DevSecOps** | Fast updates | CI/CD pipeline integration |
|
||||
| **Air-Gap Ops** | Offline bundles | Disconnected environment support |
|
||||
|
||||
### 2.2 Competitive Positioning
|
||||
|
||||
Most vulnerability databases merge data, losing provenance. Stella Ops differentiates with:
|
||||
- **Link-Not-Merge** preserving all source claims
|
||||
- **Conflict visibility** showing where sources disagree
|
||||
- **Deterministic exports** enabling reproducible builds
|
||||
- **Multi-format support** (CSAF, OSV, GHSA, vendor-specific)
|
||||
- **Signature verification** for upstream integrity
|
||||
|
||||
---
|
||||
|
||||
## 3. Aggregation-Only Contract (AOC)
|
||||
|
||||
### 3.1 Core Principles
|
||||
|
||||
The AOC ensures ingestion purity:
|
||||
|
||||
1. **No derived semantics** - No severity consensus, merged status, or fix hints
|
||||
2. **Immutable raw docs** - Append-only with version chains
|
||||
3. **Mandatory provenance** - Source, timestamp, signature status
|
||||
4. **Linkset only** - Joins stored separately, never mutate content
|
||||
5. **Deterministic canonicalization** - Stable JSON output
|
||||
6. **Idempotent upserts** - Same hash = no new record
|
||||
7. **CI verification** - AOCVerifier enforces at runtime
|
||||
|
||||
### 3.2 Enforcement
|
||||
|
||||
```csharp
|
||||
// AOCWriteGuard checks before every write
|
||||
public class AOCWriteGuard
|
||||
{
|
||||
Task GuardAsync(AdvisoryObservation obs)
|
||||
{
|
||||
// Verify no forbidden properties
|
||||
// Validate provenance completeness
|
||||
// Check tenant claims
|
||||
// Normalize timestamps
|
||||
// Compute content hash
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Roslyn analyzers (`StellaOps.AOC.Analyzers`) scan connectors at build time to prevent forbidden property usage.
|
||||
|
||||
---
|
||||
|
||||
## 4. Advisory Observation Model
|
||||
|
||||
### 4.1 Observation Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"_id": "tenant:vendor:upstreamId:revision",
|
||||
"tenant": "acme-corp",
|
||||
"source": {
|
||||
"vendor": "OSV",
|
||||
"stream": "github",
|
||||
"api": "https://api.osv.dev/v1/.../GHSA-...",
|
||||
"collectorVersion": "concelier/1.7.3"
|
||||
},
|
||||
"upstream": {
|
||||
"upstreamId": "GHSA-xxxx-....",
|
||||
"documentVersion": "2025-09-01T12:13:14Z",
|
||||
"fetchedAt": "2025-09-01T13:04:05Z",
|
||||
"receivedAt": "2025-09-01T13:04:06Z",
|
||||
"contentHash": "sha256:...",
|
||||
"signature": {
|
||||
"present": true,
|
||||
"format": "dsse",
|
||||
"keyId": "rekor:.../key/abc"
|
||||
}
|
||||
},
|
||||
"content": {
|
||||
"format": "OSV",
|
||||
"specVersion": "1.6",
|
||||
"raw": { /* unmodified upstream document */ }
|
||||
},
|
||||
"identifiers": {
|
||||
"primary": "GHSA-xxxx-....",
|
||||
"aliases": ["CVE-2025-12345", "GHSA-xxxx-...."]
|
||||
},
|
||||
"linkset": {
|
||||
"purls": ["pkg:npm/lodash@4.17.21"],
|
||||
"cpes": ["cpe:2.3:a:lodash:lodash:4.17.21:*:*:*:*:*:*:*"],
|
||||
"references": [
|
||||
{"type": "advisory", "url": "https://..."},
|
||||
{"type": "fix", "url": "https://..."}
|
||||
]
|
||||
},
|
||||
"supersedes": "tenant:vendor:upstreamId:prev-revision",
|
||||
"createdAt": "2025-09-01T13:04:06Z"
|
||||
}
|
||||
```
|
||||
|
||||
### 4.2 Linkset Correlation
|
||||
|
||||
```json
|
||||
{
|
||||
"_id": "sha256:...",
|
||||
"tenant": "acme-corp",
|
||||
"key": {
|
||||
"vulnerabilityId": "CVE-2025-12345",
|
||||
"productKey": "pkg:npm/lodash@4.17.21",
|
||||
"confidence": "high"
|
||||
},
|
||||
"observations": [
|
||||
{
|
||||
"observationId": "tenant:osv:GHSA-...:v1",
|
||||
"sourceVendor": "OSV",
|
||||
"statement": { "severity": "high" },
|
||||
"collectedAt": "2025-09-01T13:04:06Z"
|
||||
},
|
||||
{
|
||||
"observationId": "tenant:nvd:CVE-2025-12345:v2",
|
||||
"sourceVendor": "NVD",
|
||||
"statement": { "severity": "critical" },
|
||||
"collectedAt": "2025-09-01T14:00:00Z"
|
||||
}
|
||||
],
|
||||
"conflicts": [
|
||||
{
|
||||
"conflictId": "sha256:...",
|
||||
"type": "severity-mismatch",
|
||||
"observations": [
|
||||
{ "source": "OSV", "value": "high" },
|
||||
{ "source": "NVD", "value": "critical" }
|
||||
],
|
||||
"confidence": "medium",
|
||||
"detectedAt": "2025-09-01T14:00:01Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. Source Connectors
|
||||
|
||||
### 5.1 Source Families
|
||||
|
||||
| Family | Examples | Format |
|
||||
|--------|----------|--------|
|
||||
| **Vendor PSIRTs** | Microsoft, Oracle, Cisco, Adobe | CSAF, proprietary |
|
||||
| **Linux Distros** | Red Hat, SUSE, Ubuntu, Debian, Alpine | CSAF, JSON, XML |
|
||||
| **OSS Ecosystems** | OSV, GHSA, npm, PyPI, Maven | OSV, GraphQL |
|
||||
| **CERTs** | CISA (KEV), JVN, CERT-FR | JSON, XML |
|
||||
|
||||
### 5.2 Connector Contract
|
||||
|
||||
```csharp
|
||||
public interface IFeedConnector
|
||||
{
|
||||
string SourceName { get; }
|
||||
|
||||
// Fetch signed feeds or offline mirrors
|
||||
Task FetchAsync(IServiceProvider sp, CancellationToken ct);
|
||||
|
||||
// Normalize to strongly-typed DTOs
|
||||
Task ParseAsync(IServiceProvider sp, CancellationToken ct);
|
||||
|
||||
// Build canonical records with provenance
|
||||
Task MapAsync(IServiceProvider sp, CancellationToken ct);
|
||||
}
|
||||
```
|
||||
|
||||
### 5.3 Connector Lifecycle
|
||||
|
||||
1. **Snapshot** - Fetch with cursor, ETag, rate limiting
|
||||
2. **Parse** - Schema validation, normalization
|
||||
3. **Guard** - AOCWriteGuard enforcement
|
||||
4. **Write** - Append-only insert
|
||||
5. **Event** - Emit `advisory.observation.updated`
|
||||
|
||||
---
|
||||
|
||||
## 6. Version Semantics
|
||||
|
||||
### 6.1 Ecosystem Normalization
|
||||
|
||||
| Ecosystem | Format | Normalization |
|
||||
|-----------|--------|---------------|
|
||||
| npm, PyPI, Maven | SemVer | Intervals with `<`, `>=`, `~`, `^` |
|
||||
| RPM | EVR | `epoch:version-release` with order keys |
|
||||
| DEB | dpkg | Version comparison with order keys |
|
||||
| APK | Alpine | Computed order keys |
|
||||
|
||||
### 6.2 CVSS Handling
|
||||
|
||||
- Normalize CVSS v2/v3/v4 where available
|
||||
- Track all source CVSS values
|
||||
- Effective severity = max (configurable)
|
||||
- Store KEV evidence with source and date
|
||||
|
||||
---
|
||||
|
||||
## 7. Conflict Detection
|
||||
|
||||
### 7.1 Conflict Types
|
||||
|
||||
| Type | Description | Resolution |
|
||||
|------|-------------|------------|
|
||||
| `severity-mismatch` | Different severity ratings | Policy decides |
|
||||
| `affected-range-divergence` | Different version ranges | Most specific wins |
|
||||
| `reference-clash` | Contradictory references | Surface all |
|
||||
| `alias-inconsistency` | Different alias mappings | Union with provenance |
|
||||
| `metadata-gap` | Missing information | Flag for review |
|
||||
|
||||
### 7.2 Conflict Visibility
|
||||
|
||||
Conflicts are never hidden - they are:
|
||||
- Stored in linkset documents
|
||||
- Surfaced in API responses
|
||||
- Included in exports
|
||||
- Displayed in Console UI
|
||||
|
||||
---
|
||||
|
||||
## 8. Deterministic Exports
|
||||
|
||||
### 8.1 JSON Export
|
||||
|
||||
```
|
||||
exports/json/
|
||||
├── CVE/
|
||||
│ ├── 20/
|
||||
│ │ └── CVE-2025-12345.json
|
||||
│ └── ...
|
||||
├── manifest.json
|
||||
└── export-digest.sha256
|
||||
```
|
||||
|
||||
- Deterministic folder structure
|
||||
- Canonical JSON (sorted keys, stable timestamps)
|
||||
- Manifest with SHA-256 per file
|
||||
- Reproducible across runs
|
||||
|
||||
### 8.2 Trivy DB Export
|
||||
|
||||
```
|
||||
exports/trivy/
|
||||
├── db.tar.gz
|
||||
├── metadata.json
|
||||
└── manifest.json
|
||||
```
|
||||
|
||||
- Bolt DB compatible with Trivy
|
||||
- Full and delta modes
|
||||
- ORAS push to registries
|
||||
- Mirror manifests for domains
|
||||
|
||||
### 8.3 Export Determinism
|
||||
|
||||
Running the same export against the same data must produce:
|
||||
- Identical file contents
|
||||
- Identical manifest hashes
|
||||
- Identical export digests
|
||||
|
||||
---
|
||||
|
||||
## 9. Implementation Strategy
|
||||
|
||||
### 9.1 Phase 1: Core Pipeline (Complete)
|
||||
|
||||
- [x] AOCWriteGuard implementation
|
||||
- [x] Observation storage
|
||||
- [x] Basic connectors (Red Hat, SUSE, OSV)
|
||||
- [x] JSON export
|
||||
|
||||
### 9.2 Phase 2: Link-Not-Merge (Complete)
|
||||
|
||||
- [x] Linkset correlation engine
|
||||
- [x] Conflict detection
|
||||
- [x] Event emission
|
||||
- [x] API surface
|
||||
|
||||
### 9.3 Phase 3: Expanded Sources (In Progress)
|
||||
|
||||
- [x] GHSA GraphQL connector
|
||||
- [x] Debian DSA connector
|
||||
- [ ] Alpine secdb connector (CONCELIER-CONN-50-001)
|
||||
- [ ] CISA KEV enrichment (CONCELIER-KEV-51-001)
|
||||
|
||||
### 9.4 Phase 4: Export Enhancements (Planned)
|
||||
|
||||
- [ ] Delta Trivy DB exports
|
||||
- [ ] ORAS registry push
|
||||
- [ ] Attestation hand-off
|
||||
- [ ] Mirror bundle signing
|
||||
|
||||
---
|
||||
|
||||
## 10. API Surface
|
||||
|
||||
### 10.1 Sources & Jobs
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/api/v1/concelier/sources` | GET | `concelier.read` | List sources |
|
||||
| `/api/v1/concelier/sources/{name}/trigger` | POST | `concelier.admin` | Trigger fetch |
|
||||
| `/api/v1/concelier/sources/{name}/pause` | POST | `concelier.admin` | Pause source |
|
||||
| `/api/v1/concelier/jobs/{id}` | GET | `concelier.read` | Job status |
|
||||
|
||||
### 10.2 Exports
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/api/v1/concelier/exports/json` | POST | `concelier.export` | Trigger JSON export |
|
||||
| `/api/v1/concelier/exports/trivy` | POST | `concelier.export` | Trigger Trivy export |
|
||||
| `/api/v1/concelier/exports/{id}` | GET | `concelier.read` | Export status |
|
||||
|
||||
### 10.3 Search
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/api/v1/concelier/advisories/{key}` | GET | `concelier.read` | Get advisory |
|
||||
| `/api/v1/concelier/observations/{id}` | GET | `concelier.read` | Get observation |
|
||||
| `/api/v1/concelier/linksets` | GET | `concelier.read` | Query linksets |
|
||||
|
||||
---
|
||||
|
||||
## 11. Storage Model
|
||||
|
||||
### 11.1 Collections
|
||||
|
||||
| Collection | Purpose | Key Indexes |
|
||||
|------------|---------|-------------|
|
||||
| `sources` | Connector catalog | `{_id}` |
|
||||
| `source_state` | Run state | `{sourceName}` |
|
||||
| `documents` | Raw payloads | `{sourceName, uri}` |
|
||||
| `advisory_observations` | Normalized records | `{tenant, upstream.upstreamId}` |
|
||||
| `advisory_linksets` | Correlations | `{tenant, key.vulnerabilityId, key.productKey}` |
|
||||
| `advisory_events` | Change log | `{type, occurredAt}` |
|
||||
| `export_state` | Export cursors | `{exportKind}` |
|
||||
|
||||
### 11.2 GridFS Buckets
|
||||
|
||||
- `fs.documents` - Raw payloads (immutable)
|
||||
- `fs.exports` - Historical archives
|
||||
|
||||
---
|
||||
|
||||
## 12. Event Model
|
||||
|
||||
### 12.1 Events
|
||||
|
||||
| Event | Trigger | Content |
|
||||
|-------|---------|---------|
|
||||
| `advisory.observation.updated@1` | New/superseded observation | IDs, hash, supersedes |
|
||||
| `advisory.linkset.updated@1` | Correlation change | Deltas, conflicts |
|
||||
|
||||
### 12.2 Event Transport
|
||||
|
||||
- Primary: NATS
|
||||
- Fallback: Redis Stream
|
||||
- Offline Kit captures for replay
|
||||
|
||||
---
|
||||
|
||||
## 13. Observability
|
||||
|
||||
### 13.1 Metrics
|
||||
|
||||
- `concelier.fetch.docs_total{source}`
|
||||
- `concelier.fetch.bytes_total{source}`
|
||||
- `concelier.parse.failures_total{source}`
|
||||
- `concelier.observations.write_total{result}`
|
||||
- `concelier.linksets.updated_total{result}`
|
||||
- `concelier.linksets.conflicts_total{type}`
|
||||
- `concelier.export.duration_seconds{kind}`
|
||||
|
||||
### 13.2 Performance Targets
|
||||
|
||||
| Operation | Target |
|
||||
|-----------|--------|
|
||||
| Ingest throughput | 5k docs/min |
|
||||
| Observation write | < 5ms p95 |
|
||||
| Linkset build | < 15ms p95 |
|
||||
| Export (1M advisories) | < 90 seconds |
|
||||
|
||||
---
|
||||
|
||||
## 14. Security Considerations
|
||||
|
||||
### 14.1 Outbound Security
|
||||
|
||||
- Allowlist per connector (domains, protocols)
|
||||
- Proxy support with TLS pinning
|
||||
- Rate limiting per source
|
||||
|
||||
### 14.2 Signature Verification
|
||||
|
||||
- PGP/cosign/x509 verification stored
|
||||
- Failed verification flagged, not rejected
|
||||
- Policy can down-weight unsigned sources
|
||||
|
||||
### 14.3 Determinism
|
||||
|
||||
- Canonical JSON writer
|
||||
- Stable export digests
|
||||
- Reproducible across runs
|
||||
|
||||
---
|
||||
|
||||
## 15. Related Documentation
|
||||
|
||||
| Resource | Location |
|
||||
|----------|----------|
|
||||
| Concelier architecture | `docs/modules/concelier/architecture.md` |
|
||||
| Link-Not-Merge schema | `docs/modules/concelier/link-not-merge-schema.md` |
|
||||
| Event schemas | `docs/modules/concelier/events/` |
|
||||
| Attestation guide | `docs/modules/concelier/attestation.md` |
|
||||
|
||||
---
|
||||
|
||||
## 16. Sprint Mapping
|
||||
|
||||
- **Primary Sprint:** SPRINT_0115_0001_0004_concelier_iv.md
|
||||
- **Related Sprints:**
|
||||
- SPRINT_0113_0001_0002_concelier_ii.md
|
||||
- SPRINT_0114_0001_0003_concelier_iii.md
|
||||
|
||||
**Key Task IDs:**
|
||||
- `CONCELIER-AOC-40-001` - AOC enforcement (DONE)
|
||||
- `CONCELIER-LNM-41-001` - Link-Not-Merge (DONE)
|
||||
- `CONCELIER-CONN-50-001` - Alpine connector (IN PROGRESS)
|
||||
- `CONCELIER-KEV-51-001` - KEV enrichment (TODO)
|
||||
- `CONCELIER-EXPORT-55-001` - Delta exports (TODO)
|
||||
|
||||
---
|
||||
|
||||
## 17. Success Metrics
|
||||
|
||||
| Metric | Target |
|
||||
|--------|--------|
|
||||
| Advisory freshness | < 1 hour from source |
|
||||
| Ingestion accuracy | 100% provenance retention |
|
||||
| Export determinism | 100% hash reproducibility |
|
||||
| Conflict detection | 100% of source divergence |
|
||||
| Source coverage | 20+ authoritative sources |
|
||||
|
||||
---
|
||||
|
||||
*Last updated: 2025-11-29*
|
||||
@@ -0,0 +1,449 @@
|
||||
# Export Center and Reporting Strategy
|
||||
|
||||
**Version:** 1.0
|
||||
**Date:** 2025-11-29
|
||||
**Status:** Canonical
|
||||
|
||||
This advisory defines the product rationale, profile system, and implementation strategy for the Export Center module, covering bundle generation, adapter architecture, distribution channels, and compliance reporting.
|
||||
|
||||
---
|
||||
|
||||
## 1. Executive Summary
|
||||
|
||||
The Export Center is the **dedicated service layer for packaging reproducible evidence bundles**. Key capabilities:
|
||||
|
||||
- **Profile-Based Exports** - 6+ profile types (JSON, Trivy, Mirror, DevPortal)
|
||||
- **Deterministic Bundles** - Bit-for-bit reproducible outputs with DSSE signatures
|
||||
- **Multi-Format Adapters** - Pluggable adapters for different consumer needs
|
||||
- **Distribution Channels** - HTTP download, OCI push, object storage
|
||||
- **Compliance Ready** - Provenance, signatures, audit trails for SOC 2/FedRAMP
|
||||
|
||||
---
|
||||
|
||||
## 2. Market Drivers
|
||||
|
||||
### 2.1 Target Segments
|
||||
|
||||
| Segment | Export Requirements | Use Case |
|
||||
|---------|---------------------|----------|
|
||||
| **Compliance Teams** | Signed bundles, provenance | Audit evidence |
|
||||
| **Security Vendors** | Trivy DB format | Scanner integration |
|
||||
| **Air-Gap Operators** | Offline mirrors | Disconnected environments |
|
||||
| **Development Teams** | JSON exports | CI/CD integration |
|
||||
|
||||
### 2.2 Competitive Positioning
|
||||
|
||||
Most vulnerability platforms offer basic CSV/JSON exports. Stella Ops differentiates with:
|
||||
- **Reproducible bundles** with cryptographic verification
|
||||
- **Multi-format adapters** (Trivy, CycloneDX, SPDX, custom)
|
||||
- **OCI distribution** for container-native workflows
|
||||
- **Provenance attestations** meeting SLSA Level 2+
|
||||
- **Delta exports** for bandwidth-efficient updates
|
||||
|
||||
---
|
||||
|
||||
## 3. Profile System
|
||||
|
||||
### 3.1 Built-in Profiles
|
||||
|
||||
| Profile | Variant | Description | Output Format |
|
||||
|---------|---------|-------------|---------------|
|
||||
| **JSON** | `raw` | Unprocessed advisory/VEX data | `.jsonl.zst` |
|
||||
| **JSON** | `policy` | Policy-evaluated findings | `.jsonl.zst` |
|
||||
| **Trivy** | `db` | Trivy vulnerability database | SQLite |
|
||||
| **Trivy** | `java-db` | Trivy Java advisory database | SQLite |
|
||||
| **Mirror** | `full` | Complete offline mirror | Filesystem tree |
|
||||
| **Mirror** | `delta` | Incremental updates | Filesystem tree |
|
||||
| **DevPortal** | `offline` | Developer portal assets | Archive |
|
||||
|
||||
### 3.2 Profile Configuration
|
||||
|
||||
```yaml
|
||||
apiVersion: stellaops.io/export.v1
|
||||
kind: ExportProfile
|
||||
metadata:
|
||||
name: compliance-report-monthly
|
||||
tenant: acme-corp
|
||||
|
||||
spec:
|
||||
kind: json
|
||||
variant: policy
|
||||
schedule: "0 0 1 * *" # Monthly
|
||||
|
||||
selectors:
|
||||
tenants: ["acme-corp"]
|
||||
timeWindow: "30d"
|
||||
severities: ["critical", "high"]
|
||||
ecosystems: ["npm", "maven", "pypi"]
|
||||
|
||||
options:
|
||||
compression: zstd
|
||||
encryption:
|
||||
enabled: true
|
||||
recipients: ["age1..."]
|
||||
signing:
|
||||
enabled: true
|
||||
keyRef: "kms://acme-corp/export-signing-key"
|
||||
|
||||
distribution:
|
||||
- type: http
|
||||
retention: 90d
|
||||
- type: oci
|
||||
registry: "registry.acme.com/exports"
|
||||
repository: "compliance-reports"
|
||||
```
|
||||
|
||||
### 3.3 Selector Expressions
|
||||
|
||||
| Selector | Description | Example |
|
||||
|----------|-------------|---------|
|
||||
| `tenants` | Tenant filter | `["acme-*"]` |
|
||||
| `timeWindow` | Time range | `"30d"`, `"2025-01-01/2025-12-31"` |
|
||||
| `products` | Product PURLs | `["pkg:npm/*", "pkg:maven/org.apache/*"]` |
|
||||
| `severities` | Severity filter | `["critical", "high"]` |
|
||||
| `ecosystems` | Package ecosystems | `["npm", "maven"]` |
|
||||
| `policyVersions` | Policy snapshot IDs | `["rev-42", "rev-43"]` |
|
||||
|
||||
---
|
||||
|
||||
## 4. Adapter Architecture
|
||||
|
||||
### 4.1 Adapter Contract
|
||||
|
||||
```csharp
|
||||
public interface IExportAdapter
|
||||
{
|
||||
string Kind { get; } // "json" | "trivy" | "mirror"
|
||||
string Variant { get; } // "raw" | "policy" | "db"
|
||||
|
||||
Task<ExportResult> RunAsync(
|
||||
ExportContext context,
|
||||
IAsyncEnumerable<ExportRecord> records,
|
||||
CancellationToken ct);
|
||||
}
|
||||
```
|
||||
|
||||
### 4.2 JSON Adapter
|
||||
|
||||
**Responsibilities:**
|
||||
- Canonical JSON serialization (sorted keys, RFC3339 UTC)
|
||||
- Linkset preservation for traceability
|
||||
- Zstandard compression
|
||||
- AOC guardrails (no derived modifications to raw fields)
|
||||
|
||||
**Output:**
|
||||
```
|
||||
export/
|
||||
├── advisories.jsonl.zst
|
||||
├── vex-statements.jsonl.zst
|
||||
├── findings.jsonl.zst (policy variant)
|
||||
└── manifest.json
|
||||
```
|
||||
|
||||
### 4.3 Trivy Adapter
|
||||
|
||||
**Responsibilities:**
|
||||
- Map Stella Ops advisory schema to Trivy DB format
|
||||
- Handle namespace collisions across ecosystems
|
||||
- Validate against supported Trivy schema versions
|
||||
- Generate severity distribution summary
|
||||
|
||||
**Compatibility:**
|
||||
- Trivy DB schema v2 (current)
|
||||
- Fail-fast on unsupported schema versions
|
||||
|
||||
### 4.4 Mirror Adapter
|
||||
|
||||
**Responsibilities:**
|
||||
- Build self-contained filesystem layout
|
||||
- Delta comparison against base manifest
|
||||
- Optional encryption of `/data` subtree
|
||||
- OCI layer generation
|
||||
|
||||
**Layout:**
|
||||
```
|
||||
mirror/
|
||||
├── manifests/
|
||||
│ ├── advisories.manifest.json
|
||||
│ └── vex.manifest.json
|
||||
├── data/
|
||||
│ ├── raw/
|
||||
│ │ ├── advisories/
|
||||
│ │ └── vex/
|
||||
│ └── policy/
|
||||
│ └── findings/
|
||||
├── indexes/
|
||||
│ └── by-cve.index
|
||||
└── manifest.json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. Bundle Structure
|
||||
|
||||
### 5.1 Export Manifest
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "1.0.0",
|
||||
"exportId": "export-20251129-001",
|
||||
"profile": {
|
||||
"kind": "json",
|
||||
"variant": "policy",
|
||||
"name": "compliance-report-monthly"
|
||||
},
|
||||
"tenant": "acme-corp",
|
||||
"generatedAt": "2025-11-29T12:00:00Z",
|
||||
"generatedBy": "export-center-worker-1",
|
||||
"selectors": {
|
||||
"timeWindow": "2025-11-01/2025-11-30",
|
||||
"severities": ["critical", "high"]
|
||||
},
|
||||
"contents": [
|
||||
{
|
||||
"path": "findings.jsonl.zst",
|
||||
"size": 1048576,
|
||||
"digest": "sha256:abc123...",
|
||||
"recordCount": 45230
|
||||
}
|
||||
],
|
||||
"totals": {
|
||||
"advisories": 45230,
|
||||
"vexStatements": 12450,
|
||||
"findings": 8920
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 5.2 Provenance Attestation
|
||||
|
||||
```json
|
||||
{
|
||||
"predicateType": "https://slsa.dev/provenance/v1",
|
||||
"subject": [
|
||||
{
|
||||
"name": "export-20251129-001.tar.gz",
|
||||
"digest": { "sha256": "def456..." }
|
||||
}
|
||||
],
|
||||
"predicate": {
|
||||
"buildDefinition": {
|
||||
"buildType": "https://stellaops.io/export/v1",
|
||||
"externalParameters": {
|
||||
"profile": "compliance-report-monthly",
|
||||
"selectors": { "...": "..." }
|
||||
}
|
||||
},
|
||||
"runDetails": {
|
||||
"builder": {
|
||||
"id": "https://stellaops.io/export-center",
|
||||
"version": "1.2.3"
|
||||
},
|
||||
"metadata": {
|
||||
"invocationId": "export-run-123",
|
||||
"startedOn": "2025-11-29T12:00:00Z",
|
||||
"finishedOn": "2025-11-29T12:05:00Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6. Distribution Channels
|
||||
|
||||
### 6.1 HTTP Download
|
||||
|
||||
```bash
|
||||
# Download bundle
|
||||
curl -H "Authorization: Bearer $TOKEN" \
|
||||
"https://export.stellaops.io/api/export/runs/{id}/download" \
|
||||
-o export-bundle.tar.gz
|
||||
|
||||
# Verify signature
|
||||
cosign verify-blob --key export-key.pub \
|
||||
--signature export-bundle.sig \
|
||||
export-bundle.tar.gz
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- Chunked transfer encoding
|
||||
- Range request support (resumable)
|
||||
- `X-Export-Digest` header
|
||||
- Optional encryption metadata
|
||||
|
||||
### 6.2 OCI Push
|
||||
|
||||
```bash
|
||||
# Pull from registry
|
||||
oras pull registry.example.com/exports/compliance:2025-11
|
||||
|
||||
# Verify annotations
|
||||
oras manifest fetch registry.example.com/exports/compliance:2025-11 | jq
|
||||
```
|
||||
|
||||
**Annotations:**
|
||||
- `io.stellaops.export.profile`
|
||||
- `io.stellaops.export.tenant`
|
||||
- `io.stellaops.export.manifest-digest`
|
||||
- `io.stellaops.export.provenance-ref`
|
||||
|
||||
### 6.3 Object Storage
|
||||
|
||||
```yaml
|
||||
distribution:
|
||||
- type: object
|
||||
provider: s3
|
||||
bucket: stella-exports
|
||||
prefix: "${tenant}/${exportId}"
|
||||
retention: 365d
|
||||
immutable: true
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7. Implementation Strategy
|
||||
|
||||
### 7.1 Phase 1: Core Infrastructure (Complete)
|
||||
|
||||
- [x] Profile CRUD APIs
|
||||
- [x] JSON adapter (raw, policy)
|
||||
- [x] HTTP download distribution
|
||||
- [x] Manifest generation
|
||||
|
||||
### 7.2 Phase 2: Trivy Integration (Complete)
|
||||
|
||||
- [x] Trivy DB adapter
|
||||
- [x] Trivy Java DB adapter
|
||||
- [x] Schema version validation
|
||||
- [x] Compatibility testing
|
||||
|
||||
### 7.3 Phase 3: Mirror & Distribution (In Progress)
|
||||
|
||||
- [x] Mirror full adapter
|
||||
- [x] Mirror delta adapter
|
||||
- [ ] OCI push distribution (EXPORT-OCI-45-001)
|
||||
- [ ] DevPortal adapter (EXPORT-DEV-46-001)
|
||||
|
||||
### 7.4 Phase 4: Advanced Features (Planned)
|
||||
|
||||
- [ ] Encryption at rest
|
||||
- [ ] Scheduled exports
|
||||
- [ ] Retention policies
|
||||
- [ ] Cross-tenant exports (with approval)
|
||||
|
||||
---
|
||||
|
||||
## 8. API Surface
|
||||
|
||||
### 8.1 Profile Management
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/api/export/profiles` | GET | `export:read` | List profiles |
|
||||
| `/api/export/profiles` | POST | `export:profile:manage` | Create profile |
|
||||
| `/api/export/profiles/{id}` | PATCH | `export:profile:manage` | Update profile |
|
||||
| `/api/export/profiles/{id}` | DELETE | `export:profile:manage` | Delete profile |
|
||||
|
||||
### 8.2 Export Runs
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/api/export/runs` | POST | `export:run` | Start export |
|
||||
| `/api/export/runs/{id}` | GET | `export:read` | Get status |
|
||||
| `/api/export/runs/{id}/events` | SSE | `export:read` | Stream progress |
|
||||
| `/api/export/runs/{id}/cancel` | POST | `export:run` | Cancel export |
|
||||
|
||||
### 8.3 Downloads
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/api/export/runs/{id}/download` | GET | `export:download` | Download bundle |
|
||||
| `/api/export/runs/{id}/manifest` | GET | `export:read` | Get manifest |
|
||||
| `/api/export/runs/{id}/provenance` | GET | `export:read` | Get provenance |
|
||||
|
||||
---
|
||||
|
||||
## 9. Observability
|
||||
|
||||
### 9.1 Metrics
|
||||
|
||||
- `exporter_run_duration_seconds{profile,tenant}`
|
||||
- `exporter_run_bytes_total{profile}`
|
||||
- `exporter_run_failures_total{error_code}`
|
||||
- `exporter_active_runs{tenant}`
|
||||
- `exporter_distribution_push_seconds{type}`
|
||||
|
||||
### 9.2 Logs
|
||||
|
||||
Structured fields:
|
||||
- `run_id`, `tenant`, `profile_kind`, `adapter`
|
||||
- `phase` (plan, resolve, adapter, manifest, sign, distribute)
|
||||
- `correlation_id`, `error_code`
|
||||
|
||||
---
|
||||
|
||||
## 10. Security Considerations
|
||||
|
||||
### 10.1 Access Control
|
||||
|
||||
- Tenant claim enforced at every query
|
||||
- Cross-tenant selectors rejected (unless approved)
|
||||
- RBAC scopes: `export:profile:manage`, `export:run`, `export:read`, `export:download`
|
||||
|
||||
### 10.2 Encryption
|
||||
|
||||
- Optional encryption per profile
|
||||
- Keys derived from Authority-managed KMS
|
||||
- Mirror encryption uses tenant-specific recipients
|
||||
- Transport security (TLS) always required
|
||||
|
||||
### 10.3 Signing
|
||||
|
||||
- Cosign-compatible signatures
|
||||
- SLSA Level 2 attestations by default
|
||||
- Detached signatures stored alongside manifests
|
||||
|
||||
---
|
||||
|
||||
## 11. Related Documentation
|
||||
|
||||
| Resource | Location |
|
||||
|----------|----------|
|
||||
| Export Center architecture | `docs/modules/export-center/architecture.md` |
|
||||
| Profile definitions | `docs/modules/export-center/profiles.md` |
|
||||
| API reference | `docs/modules/export-center/api.md` |
|
||||
| DevPortal bundle spec | `docs/modules/export-center/devportal-offline.md` |
|
||||
|
||||
---
|
||||
|
||||
## 12. Sprint Mapping
|
||||
|
||||
- **Primary Sprint:** SPRINT_0160_0001_0001_export_evidence.md
|
||||
- **Related Sprints:**
|
||||
- SPRINT_0161_0001_0001_evidencelocker.md
|
||||
- SPRINT_0125_0001_0001_mirror.md
|
||||
|
||||
**Key Task IDs:**
|
||||
- `EXPORT-CORE-40-001` - Profile system (DONE)
|
||||
- `EXPORT-JSON-41-001` - JSON adapters (DONE)
|
||||
- `EXPORT-TRIVY-42-001` - Trivy adapters (DONE)
|
||||
- `EXPORT-OCI-45-001` - OCI distribution (IN PROGRESS)
|
||||
- `EXPORT-DEV-46-001` - DevPortal adapter (TODO)
|
||||
|
||||
---
|
||||
|
||||
## 13. Success Metrics
|
||||
|
||||
| Metric | Target |
|
||||
|--------|--------|
|
||||
| Export reproducibility | 100% bit-identical |
|
||||
| Bundle generation time | < 5 min for 100k records |
|
||||
| Signature verification | 100% success rate |
|
||||
| Distribution availability | 99.9% uptime |
|
||||
| Retention compliance | 100% policy adherence |
|
||||
|
||||
---
|
||||
|
||||
*Last updated: 2025-11-29*
|
||||
@@ -0,0 +1,407 @@
|
||||
# Findings Ledger and Immutable Audit Trail
|
||||
|
||||
**Version:** 1.0
|
||||
**Date:** 2025-11-29
|
||||
**Status:** Canonical
|
||||
|
||||
This advisory defines the product rationale, ledger semantics, and implementation strategy for the Findings Ledger module, covering append-only events, Merkle anchoring, projections, and deterministic exports.
|
||||
|
||||
---
|
||||
|
||||
## 1. Executive Summary
|
||||
|
||||
The Findings Ledger provides **immutable, auditable records** of all vulnerability findings and their state transitions. Key capabilities:
|
||||
|
||||
- **Append-Only Events** - Every finding change recorded permanently
|
||||
- **Merkle Anchoring** - Cryptographic proof of event ordering
|
||||
- **Projections** - Materialized current state views
|
||||
- **Deterministic Exports** - Reproducible compliance archives
|
||||
- **Chain Integrity** - Hash-linked event sequences per tenant
|
||||
|
||||
---
|
||||
|
||||
## 2. Market Drivers
|
||||
|
||||
### 2.1 Target Segments
|
||||
|
||||
| Segment | Ledger Requirements | Use Case |
|
||||
|---------|---------------------|----------|
|
||||
| **Compliance** | Immutable audit trail | SOC 2, FedRAMP evidence |
|
||||
| **Security Teams** | Finding history | Investigation timelines |
|
||||
| **Legal/eDiscovery** | Tamper-proof records | Litigation support |
|
||||
| **Auditors** | Verifiable exports | Third-party attestation |
|
||||
|
||||
### 2.2 Competitive Positioning
|
||||
|
||||
Most vulnerability tools provide mutable databases. Stella Ops differentiates with:
|
||||
- **Append-only architecture** ensuring no record deletion
|
||||
- **Merkle trees** for cryptographic verification
|
||||
- **Chain integrity** with hash-linked events
|
||||
- **Deterministic exports** for reproducible audits
|
||||
- **Air-gap support** with signed bundles
|
||||
|
||||
---
|
||||
|
||||
## 3. Event Model
|
||||
|
||||
### 3.1 Ledger Event Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "uuid",
|
||||
"type": "finding.status.changed",
|
||||
"tenant": "acme-corp",
|
||||
"chainId": "chain-uuid",
|
||||
"sequence": 12345,
|
||||
"policyVersion": "sha256:abc...",
|
||||
"finding": {
|
||||
"id": "artifact:sha256:...|pkg:npm/lodash",
|
||||
"artifactId": "sha256:...",
|
||||
"vulnId": "CVE-2025-12345"
|
||||
},
|
||||
"actor": {
|
||||
"id": "user:jane@acme.com",
|
||||
"type": "human"
|
||||
},
|
||||
"occurredAt": "2025-11-29T12:00:00Z",
|
||||
"recordedAt": "2025-11-29T12:00:01Z",
|
||||
"payload": {
|
||||
"previousStatus": "open",
|
||||
"newStatus": "triaged",
|
||||
"reason": "Under investigation"
|
||||
},
|
||||
"evidenceBundleRef": "bundle://tenant/2025/11/29/...",
|
||||
"eventHash": "sha256:...",
|
||||
"previousHash": "sha256:...",
|
||||
"merkleLeafHash": "sha256:..."
|
||||
}
|
||||
```
|
||||
|
||||
### 3.2 Event Types
|
||||
|
||||
| Type | Trigger | Payload |
|
||||
|------|---------|---------|
|
||||
| `finding.discovered` | New finding | severity, purl, advisory |
|
||||
| `finding.status.changed` | State transition | old/new status, reason |
|
||||
| `finding.verdict.changed` | Policy decision | verdict, rules matched |
|
||||
| `finding.vex.applied` | VEX override | status, justification |
|
||||
| `finding.assigned` | Owner change | assignee, team |
|
||||
| `finding.commented` | Annotation | comment text (redacted) |
|
||||
| `finding.resolved` | Resolution | resolution type, version |
|
||||
|
||||
### 3.3 Chain Semantics
|
||||
|
||||
- Each tenant has one or more event chains
|
||||
- Events are strictly ordered by sequence number
|
||||
- `previousHash` links to prior event for integrity
|
||||
- Chain forks are prohibited (409 on conflict)
|
||||
|
||||
---
|
||||
|
||||
## 4. Merkle Anchoring
|
||||
|
||||
### 4.1 Tree Structure
|
||||
|
||||
```
|
||||
Root Hash
|
||||
/ \
|
||||
Hash(A+B) Hash(C+D)
|
||||
/ \ / \
|
||||
H(E1) H(E2) H(E3) H(E4)
|
||||
| | | |
|
||||
Event1 Event2 Event3 Event4
|
||||
```
|
||||
|
||||
### 4.2 Anchoring Process
|
||||
|
||||
1. **Batch collection** - Events accumulate in windows (default 15 min)
|
||||
2. **Tree construction** - Leaves are event hashes
|
||||
3. **Root computation** - Merkle root represents batch
|
||||
4. **Anchor record** - Root stored with timestamp
|
||||
5. **Optional external** - Root can be published to external ledger
|
||||
|
||||
### 4.3 Configuration
|
||||
|
||||
```yaml
|
||||
findings:
|
||||
ledger:
|
||||
merkle:
|
||||
batchSize: 1000
|
||||
windowDuration: 00:15:00
|
||||
algorithm: sha256
|
||||
externalAnchor:
|
||||
enabled: false
|
||||
type: rekor # or custom
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. Projections
|
||||
|
||||
### 5.1 Purpose
|
||||
|
||||
Projections provide **current state** views derived from event history. They are:
|
||||
- Materialized for fast queries
|
||||
- Reconstructible from events
|
||||
- Validated via `cycleHash`
|
||||
|
||||
### 5.2 Finding Projection
|
||||
|
||||
```json
|
||||
{
|
||||
"tenantId": "acme-corp",
|
||||
"findingId": "artifact:sha256:...|pkg:npm/lodash@4.17.20",
|
||||
"policyVersion": "sha256:5f38c...",
|
||||
"status": "triaged",
|
||||
"severity": 6.7,
|
||||
"riskScore": 85.2,
|
||||
"riskSeverity": "high",
|
||||
"riskProfileVersion": "v2.1",
|
||||
"labels": {
|
||||
"kev": true,
|
||||
"runtime": "exposed"
|
||||
},
|
||||
"currentEventId": "uuid",
|
||||
"cycleHash": "sha256:...",
|
||||
"policyRationale": [
|
||||
"explain://tenant/findings/...",
|
||||
"policy://tenant/policy-v1/rationale/accepted"
|
||||
],
|
||||
"updatedAt": "2025-11-29T12:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### 5.3 Projection Refresh
|
||||
|
||||
| Trigger | Action |
|
||||
|---------|--------|
|
||||
| New event | Incremental update |
|
||||
| Policy change | Full recalculation |
|
||||
| Manual request | On-demand rebuild |
|
||||
| Scheduled | Periodic validation |
|
||||
|
||||
---
|
||||
|
||||
## 6. Export Capabilities
|
||||
|
||||
### 6.1 Export Shapes
|
||||
|
||||
| Shape | Description | Use Case |
|
||||
|-------|-------------|----------|
|
||||
| `canonical` | Full event detail | Complete audit |
|
||||
| `compact` | Summary fields only | Quick reports |
|
||||
|
||||
### 6.2 Export Types
|
||||
|
||||
**Findings Export:**
|
||||
```json
|
||||
{
|
||||
"eventSequence": 12345,
|
||||
"observedAt": "2025-11-29T12:00:00Z",
|
||||
"findingId": "artifact:...|pkg:...",
|
||||
"policyVersion": "sha256:...",
|
||||
"status": "triaged",
|
||||
"severity": 6.7,
|
||||
"cycleHash": "sha256:...",
|
||||
"evidenceBundleRef": "bundle://...",
|
||||
"provenance": {
|
||||
"policyVersion": "sha256:...",
|
||||
"cycleHash": "sha256:...",
|
||||
"ledgerEventHash": "sha256:..."
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 6.3 Export Formats
|
||||
|
||||
- **JSON** - Paged API responses
|
||||
- **NDJSON** - Streaming exports
|
||||
- **Bundle** - Signed archive packages
|
||||
|
||||
---
|
||||
|
||||
## 7. Implementation Strategy
|
||||
|
||||
### 7.1 Phase 1: Core Ledger (Complete)
|
||||
|
||||
- [x] Append-only event store
|
||||
- [x] Hash-linked chains
|
||||
- [x] Basic projection engine
|
||||
- [x] REST API surface
|
||||
|
||||
### 7.2 Phase 2: Merkle & Exports (In Progress)
|
||||
|
||||
- [x] Merkle tree construction
|
||||
- [x] Batch anchoring
|
||||
- [ ] External anchor integration (LEDGER-MERKLE-50-001)
|
||||
- [ ] Deterministic NDJSON exports (LEDGER-EXPORT-51-001)
|
||||
|
||||
### 7.3 Phase 3: Advanced Features (Planned)
|
||||
|
||||
- [ ] Chain integrity verification CLI
|
||||
- [ ] Projection replay tooling
|
||||
- [ ] Cross-tenant federation
|
||||
- [ ] Long-term archival
|
||||
|
||||
---
|
||||
|
||||
## 8. API Surface
|
||||
|
||||
### 8.1 Events
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/v1/ledger/events` | GET | `vuln:audit` | List ledger events |
|
||||
| `/v1/ledger/events` | POST | `vuln:operate` | Append event |
|
||||
|
||||
### 8.2 Projections
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/v1/ledger/projections/findings` | GET | `vuln:view` | List projections |
|
||||
|
||||
### 8.3 Exports
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/v1/ledger/export/findings` | GET | `vuln:audit` | Export findings |
|
||||
| `/v1/ledger/export/vex` | GET | `vuln:audit` | Export VEX |
|
||||
| `/v1/ledger/export/advisories` | GET | `vuln:audit` | Export advisories |
|
||||
| `/v1/ledger/export/sboms` | GET | `vuln:audit` | Export SBOMs |
|
||||
|
||||
### 8.4 Attestations
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/v1/ledger/attestations` | GET | `vuln:audit` | List verifications |
|
||||
|
||||
---
|
||||
|
||||
## 9. Storage Model
|
||||
|
||||
### 9.1 Collections
|
||||
|
||||
| Collection | Purpose | Key Indexes |
|
||||
|------------|---------|-------------|
|
||||
| `ledger_events` | Append-only events | `{tenant, chainId, sequence}` |
|
||||
| `ledger_chains` | Chain metadata | `{tenant, chainId}` |
|
||||
| `ledger_merkle_roots` | Anchor records | `{tenant, batchId, anchoredAt}` |
|
||||
| `finding_projections` | Current state | `{tenant, findingId}` |
|
||||
|
||||
### 9.2 Integrity Constraints
|
||||
|
||||
- Events are append-only (no update/delete)
|
||||
- Sequence numbers strictly monotonic
|
||||
- Hash chain validated on write
|
||||
- Merkle roots immutable
|
||||
|
||||
---
|
||||
|
||||
## 10. Observability
|
||||
|
||||
### 10.1 Metrics
|
||||
|
||||
- `ledger.events.appended_total{tenant,type}`
|
||||
- `ledger.events.rejected_total{reason}`
|
||||
- `ledger.merkle.batches_total`
|
||||
- `ledger.merkle.anchor_latency_seconds`
|
||||
- `ledger.projection.updates_total`
|
||||
- `ledger.projection.staleness_seconds`
|
||||
- `ledger.export.rows_total{type,shape}`
|
||||
|
||||
### 10.2 SLO Targets
|
||||
|
||||
| Metric | Target |
|
||||
|--------|--------|
|
||||
| Event append latency | < 50ms p95 |
|
||||
| Projection freshness | < 5 seconds |
|
||||
| Merkle anchor window | 15 minutes |
|
||||
| Export throughput | 10k rows/sec |
|
||||
|
||||
---
|
||||
|
||||
## 11. Security Considerations
|
||||
|
||||
### 11.1 Immutability Guarantees
|
||||
|
||||
- No UPDATE/DELETE operations exposed
|
||||
- Admin override requires audit event
|
||||
- Merkle roots provide tamper evidence
|
||||
- External anchoring for non-repudiation
|
||||
|
||||
### 11.2 Access Control
|
||||
|
||||
- `vuln:view` - Read projections
|
||||
- `vuln:investigate` - Triage actions
|
||||
- `vuln:operate` - State transitions
|
||||
- `vuln:audit` - Export and verify
|
||||
|
||||
### 11.3 Data Protection
|
||||
|
||||
- Sensitive payloads redacted in exports
|
||||
- Comment text hashed, not stored
|
||||
- PII filtered at ingest
|
||||
- Tenant isolation enforced
|
||||
|
||||
---
|
||||
|
||||
## 12. Air-Gap Support
|
||||
|
||||
### 12.1 Offline Bundles
|
||||
|
||||
- Signed NDJSON exports
|
||||
- Merkle proofs included
|
||||
- Time anchors from trusted source
|
||||
- Bundle verification CLI
|
||||
|
||||
### 12.2 Staleness Tracking
|
||||
|
||||
```yaml
|
||||
airgap:
|
||||
staleness:
|
||||
warningThresholdDays: 7
|
||||
blockThresholdDays: 30
|
||||
riskCriticalExportsBlocked: true
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 13. Related Documentation
|
||||
|
||||
| Resource | Location |
|
||||
|----------|----------|
|
||||
| Ledger schema | `docs/modules/findings-ledger/schema.md` |
|
||||
| OpenAPI spec | `docs/modules/findings-ledger/openapi/` |
|
||||
| Export guide | `docs/modules/findings-ledger/exports.md` |
|
||||
|
||||
---
|
||||
|
||||
## 14. Sprint Mapping
|
||||
|
||||
- **Primary Sprint:** SPRINT_0186_0001_0001_record_deterministic_execution.md
|
||||
- **Related Sprints:**
|
||||
- SPRINT_0120_0000_0001_policy_reasoning.md
|
||||
- SPRINT_311_docs_tasks_md_xi.md
|
||||
|
||||
**Key Task IDs:**
|
||||
- `LEDGER-CORE-40-001` - Event store (DONE)
|
||||
- `LEDGER-PROJ-41-001` - Projections (DONE)
|
||||
- `LEDGER-MERKLE-50-001` - Merkle anchoring (IN PROGRESS)
|
||||
- `LEDGER-EXPORT-51-001` - Deterministic exports (IN PROGRESS)
|
||||
- `LEDGER-AIRGAP-56-001` - Bundle provenance (TODO)
|
||||
|
||||
---
|
||||
|
||||
## 15. Success Metrics
|
||||
|
||||
| Metric | Target |
|
||||
|--------|--------|
|
||||
| Event durability | 100% (no data loss) |
|
||||
| Chain integrity | 100% hash verification |
|
||||
| Projection accuracy | 100% event replay match |
|
||||
| Export determinism | 100% hash reproducibility |
|
||||
| Audit compliance | SOC 2 Type II |
|
||||
|
||||
---
|
||||
|
||||
*Last updated: 2025-11-29*
|
||||
@@ -0,0 +1,331 @@
|
||||
# Graph Analytics and Dependency Insights
|
||||
|
||||
**Version:** 1.0
|
||||
**Date:** 2025-11-29
|
||||
**Status:** Canonical
|
||||
|
||||
This advisory defines the product rationale, graph model, and implementation strategy for the Graph module, covering dependency analysis, impact visualization, and offline exports.
|
||||
|
||||
---
|
||||
|
||||
## 1. Executive Summary
|
||||
|
||||
The Graph module provides **dependency analysis and impact visualization** across the vulnerability landscape. Key capabilities:
|
||||
|
||||
- **Unified Graph Model** - Artifacts, components, advisories, policies linked
|
||||
- **Impact Analysis** - Blast radius, affected paths, transitive dependencies
|
||||
- **Policy Overlays** - VEX and policy decisions visualized on graph
|
||||
- **Analytics** - Clustering, centrality, community detection
|
||||
- **Offline Export** - Deterministic graph snapshots for air-gap
|
||||
|
||||
---
|
||||
|
||||
## 2. Market Drivers
|
||||
|
||||
### 2.1 Target Segments
|
||||
|
||||
| Segment | Graph Requirements | Use Case |
|
||||
|---------|-------------------|----------|
|
||||
| **Security Teams** | Impact analysis | Vulnerability prioritization |
|
||||
| **Developers** | Dependency visualization | Upgrade planning |
|
||||
| **Compliance** | Audit trails | Relationship documentation |
|
||||
| **Management** | Risk dashboards | Portfolio risk view |
|
||||
|
||||
### 2.2 Competitive Positioning
|
||||
|
||||
Most vulnerability tools show flat lists. Stella Ops differentiates with:
|
||||
- **Graph-native architecture** linking all entities
|
||||
- **Impact visualization** showing blast radius
|
||||
- **Policy overlays** embedding decisions in graph
|
||||
- **Offline-compatible** exports for air-gap analysis
|
||||
- **Analytics** for community detection and centrality
|
||||
|
||||
---
|
||||
|
||||
## 3. Graph Model
|
||||
|
||||
### 3.1 Node Types
|
||||
|
||||
| Node | Description | Key Properties |
|
||||
|------|-------------|----------------|
|
||||
| **Artifact** | Image/application digest | tenant, environment, labels |
|
||||
| **Component** | Package version | purl, ecosystem, version |
|
||||
| **File** | Source/binary path | hash, mtime |
|
||||
| **License** | License identifier | spdx-id, restrictions |
|
||||
| **Advisory** | Vulnerability record | cve-id, severity, sources |
|
||||
| **VEXStatement** | VEX decision | status, justification |
|
||||
| **PolicyVersion** | Signed policy pack | version, digest |
|
||||
|
||||
### 3.2 Edge Types
|
||||
|
||||
| Edge | From | To | Properties |
|
||||
|------|------|-----|------------|
|
||||
| `DEPENDS_ON` | Component | Component | scope, optional |
|
||||
| `BUILT_FROM` | Artifact | Component | layer, path |
|
||||
| `DECLARED_IN` | Component | File | sbom-id |
|
||||
| `AFFECTED_BY` | Component | Advisory | version-range |
|
||||
| `VEX_EXEMPTS` | VEXStatement | Advisory | justification |
|
||||
| `GOVERNS_WITH` | PolicyVersion | Artifact | run-id |
|
||||
| `OBSERVED_RUNTIME` | Artifact | Component | zastava-event-id |
|
||||
|
||||
### 3.3 Provenance
|
||||
|
||||
Every edge carries:
|
||||
- `createdAt` - UTC timestamp
|
||||
- `sourceDigest` - SRM/SBOM hash
|
||||
- `provenanceRef` - Link to source document
|
||||
|
||||
---
|
||||
|
||||
## 4. Overlay System
|
||||
|
||||
### 4.1 Overlay Types
|
||||
|
||||
| Overlay | Purpose | Content |
|
||||
|---------|---------|---------|
|
||||
| `policy.overlay.v1` | Policy decisions | verdict, severity, rules |
|
||||
| `openvex.v1` | VEX status | status, justification |
|
||||
| `reachability.v1` | Runtime reachability | state, confidence |
|
||||
| `clustering.v1` | Community detection | cluster-id, modularity |
|
||||
| `centrality.v1` | Node importance | degree, betweenness |
|
||||
|
||||
### 4.2 Overlay Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"overlayId": "sha256(tenant|nodeId|overlayKind)",
|
||||
"overlayKind": "policy.overlay.v1",
|
||||
"nodeId": "component:pkg:npm/lodash@4.17.21",
|
||||
"tenant": "acme-corp",
|
||||
"generatedAt": "2025-11-29T12:00:00Z",
|
||||
"content": {
|
||||
"verdict": "blocked",
|
||||
"severity": "critical",
|
||||
"rulesMatched": ["rule-001", "rule-002"],
|
||||
"explainTrace": "sampled trace data..."
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. Query Capabilities
|
||||
|
||||
### 5.1 Search API
|
||||
|
||||
```bash
|
||||
POST /graph/search
|
||||
{
|
||||
"tenant": "acme-corp",
|
||||
"query": "severity:critical AND ecosystem:npm",
|
||||
"nodeTypes": ["Component", "Advisory"],
|
||||
"limit": 100
|
||||
}
|
||||
```
|
||||
|
||||
### 5.2 Path Query
|
||||
|
||||
```bash
|
||||
POST /graph/paths
|
||||
{
|
||||
"source": "artifact:sha256:abc123...",
|
||||
"target": "advisory:CVE-2025-12345",
|
||||
"maxDepth": 6,
|
||||
"includeOverlays": true
|
||||
}
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"paths": [
|
||||
{
|
||||
"nodes": ["artifact:sha256:...", "component:pkg:npm/...", "advisory:CVE-..."],
|
||||
"edges": [{"type": "BUILT_FROM"}, {"type": "AFFECTED_BY"}],
|
||||
"length": 2
|
||||
}
|
||||
],
|
||||
"overlays": [
|
||||
{"nodeId": "component:...", "overlayKind": "policy.overlay.v1", "content": {...}}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 5.3 Diff Query
|
||||
|
||||
```bash
|
||||
POST /graph/diff
|
||||
{
|
||||
"snapshotA": "snapshot-2025-11-28",
|
||||
"snapshotB": "snapshot-2025-11-29",
|
||||
"includeOverlays": true
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6. Analytics Pipeline
|
||||
|
||||
### 6.1 Clustering
|
||||
|
||||
- **Algorithm:** Louvain community detection
|
||||
- **Output:** Cluster IDs per node, modularity score
|
||||
- **Use Case:** Identify tightly coupled component groups
|
||||
|
||||
### 6.2 Centrality
|
||||
|
||||
- **Degree centrality:** Most connected nodes
|
||||
- **Betweenness centrality:** Critical path nodes
|
||||
- **Use Case:** Identify high-impact components
|
||||
|
||||
### 6.3 Background Processing
|
||||
|
||||
```yaml
|
||||
analytics:
|
||||
enabled: true
|
||||
schedule: "0 */6 * * *" # Every 6 hours
|
||||
algorithms:
|
||||
- clustering
|
||||
- centrality
|
||||
snapshotRetention: 30
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7. Implementation Strategy
|
||||
|
||||
### 7.1 Phase 1: Core Model (Complete)
|
||||
|
||||
- [x] Node/edge schema
|
||||
- [x] SBOM ingestion pipeline
|
||||
- [x] Advisory/VEX linking
|
||||
- [x] Basic search API
|
||||
|
||||
### 7.2 Phase 2: Overlays (In Progress)
|
||||
|
||||
- [x] Policy overlay generation
|
||||
- [x] VEX overlay generation
|
||||
- [ ] Reachability overlay (GRAPH-REACH-50-001)
|
||||
- [ ] Inline overlay in query responses (GRAPH-QUERY-51-001)
|
||||
|
||||
### 7.3 Phase 3: Analytics (Planned)
|
||||
|
||||
- [ ] Clustering algorithm
|
||||
- [ ] Centrality calculations
|
||||
- [ ] Background worker
|
||||
- [ ] Analytics overlays export
|
||||
|
||||
### 7.4 Phase 4: Visualization (Planned)
|
||||
|
||||
- [ ] Console graph viewer
|
||||
- [ ] Impact tree visualization
|
||||
- [ ] Diff visualization
|
||||
|
||||
---
|
||||
|
||||
## 8. API Surface
|
||||
|
||||
### 8.1 Core APIs
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/graph/search` | POST | `graph:read` | Search nodes |
|
||||
| `/graph/query` | POST | `graph:read` | Complex queries |
|
||||
| `/graph/paths` | POST | `graph:read` | Path finding |
|
||||
| `/graph/diff` | POST | `graph:read` | Snapshot diff |
|
||||
| `/graph/nodes/{id}` | GET | `graph:read` | Node detail |
|
||||
|
||||
### 8.2 Export APIs
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/graph/export` | POST | `graph:export` | Start export job |
|
||||
| `/graph/export/{jobId}` | GET | `graph:read` | Job status |
|
||||
| `/graph/export/{jobId}/download` | GET | `graph:export` | Download bundle |
|
||||
|
||||
---
|
||||
|
||||
## 9. Storage Model
|
||||
|
||||
### 9.1 Collections
|
||||
|
||||
| Collection | Purpose | Key Indexes |
|
||||
|------------|---------|-------------|
|
||||
| `graph_nodes` | Node records | `{tenant, nodeType, nodeId}` |
|
||||
| `graph_edges` | Edge records | `{tenant, fromId, toId, edgeType}` |
|
||||
| `graph_overlays` | Overlay data | `{tenant, nodeId, overlayKind}` |
|
||||
| `graph_snapshots` | Point-in-time snapshots | `{tenant, snapshotId}` |
|
||||
|
||||
### 9.2 Export Format
|
||||
|
||||
```
|
||||
graph-export/
|
||||
├── nodes.jsonl # Sorted by nodeId
|
||||
├── edges.jsonl # Sorted by (from, to, type)
|
||||
├── overlays/
|
||||
│ ├── policy.jsonl
|
||||
│ ├── openvex.jsonl
|
||||
│ └── manifest.json
|
||||
└── manifest.json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 10. Observability
|
||||
|
||||
### 10.1 Metrics
|
||||
|
||||
- `graph_ingest_lag_seconds`
|
||||
- `graph_nodes_total{nodeType}`
|
||||
- `graph_edges_total{edgeType}`
|
||||
- `graph_query_latency_seconds{queryType}`
|
||||
- `graph_analytics_runs_total`
|
||||
- `graph_analytics_clusters_total`
|
||||
|
||||
### 10.2 Offline Support
|
||||
|
||||
- Graph snapshots packaged for Offline Kit
|
||||
- Deterministic NDJSON exports
|
||||
- Overlay manifests with digests
|
||||
|
||||
---
|
||||
|
||||
## 11. Related Documentation
|
||||
|
||||
| Resource | Location |
|
||||
|----------|----------|
|
||||
| Graph architecture | `docs/modules/graph/architecture.md` |
|
||||
| Query language | `docs/modules/graph/query-language.md` |
|
||||
| Overlay specification | `docs/modules/graph/overlays.md` |
|
||||
|
||||
---
|
||||
|
||||
## 12. Sprint Mapping
|
||||
|
||||
- **Primary Sprint:** SPRINT_0141_0001_0001_graph_indexer.md
|
||||
- **Related Sprints:**
|
||||
- SPRINT_0401_0001_0001_reachability_evidence_chain.md
|
||||
- SPRINT_0140_0001_0001_runtime_signals.md
|
||||
|
||||
**Key Task IDs:**
|
||||
- `GRAPH-CORE-40-001` - Core model (DONE)
|
||||
- `GRAPH-INGEST-41-001` - SBOM ingestion (DONE)
|
||||
- `GRAPH-REACH-50-001` - Reachability overlay (IN PROGRESS)
|
||||
- `GRAPH-ANALYTICS-55-001` - Clustering (TODO)
|
||||
- `GRAPH-VIZ-60-001` - Visualization (FUTURE)
|
||||
|
||||
---
|
||||
|
||||
## 13. Success Metrics
|
||||
|
||||
| Metric | Target |
|
||||
|--------|--------|
|
||||
| Query latency | < 500ms p95 |
|
||||
| Ingestion lag | < 5 minutes |
|
||||
| Path query depth | Up to 6 hops |
|
||||
| Export reproducibility | 100% deterministic |
|
||||
| Analytics freshness | < 6 hours |
|
||||
|
||||
---
|
||||
|
||||
*Last updated: 2025-11-29*
|
||||
@@ -0,0 +1,469 @@
|
||||
# Notification Rules and Alerting Engine
|
||||
|
||||
**Version:** 1.0
|
||||
**Date:** 2025-11-29
|
||||
**Status:** Canonical
|
||||
|
||||
This advisory defines the product rationale, rules engine semantics, and implementation strategy for the Notify module, covering channel connectors, throttling, digests, and delivery management.
|
||||
|
||||
---
|
||||
|
||||
## 1. Executive Summary
|
||||
|
||||
The Notify module provides **rules-driven, tenant-aware notification delivery** across security workflows. Key capabilities:
|
||||
|
||||
- **Rules Engine** - Declarative matchers for event routing
|
||||
- **Multi-Channel Delivery** - Slack, Teams, Email, Webhooks
|
||||
- **Noise Control** - Throttling, deduplication, digest windows
|
||||
- **Approval Tokens** - DSSE-signed ack tokens for one-click workflows
|
||||
- **Audit Trail** - Complete delivery history with redacted payloads
|
||||
|
||||
---
|
||||
|
||||
## 2. Market Drivers
|
||||
|
||||
### 2.1 Target Segments
|
||||
|
||||
| Segment | Notification Requirements | Use Case |
|
||||
|---------|--------------------------|----------|
|
||||
| **Security Teams** | Real-time critical alerts | Incident response |
|
||||
| **DevSecOps** | CI/CD integration | Pipeline notifications |
|
||||
| **Compliance** | Audit trails | Delivery verification |
|
||||
| **Management** | Digest summaries | Executive reporting |
|
||||
|
||||
### 2.2 Competitive Positioning
|
||||
|
||||
Most vulnerability tools offer basic email alerts. Stella Ops differentiates with:
|
||||
- **Rules-based routing** with fine-grained matchers
|
||||
- **Native Slack/Teams integration** with rich formatting
|
||||
- **Digest windows** to prevent alert fatigue
|
||||
- **Cryptographic ack tokens** for approval workflows
|
||||
- **Tenant isolation** with quota controls
|
||||
|
||||
---
|
||||
|
||||
## 3. Rules Engine
|
||||
|
||||
### 3.1 Rule Structure
|
||||
|
||||
```yaml
|
||||
name: "critical-alerts-prod"
|
||||
enabled: true
|
||||
tenant: "acme-corp"
|
||||
|
||||
match:
|
||||
eventKinds:
|
||||
- "scanner.report.ready"
|
||||
- "scheduler.rescan.delta"
|
||||
- "zastava.admission"
|
||||
namespaces: ["prod-*"]
|
||||
repos: ["ghcr.io/acme/*"]
|
||||
minSeverity: "high"
|
||||
kev: true
|
||||
verdict: ["fail", "deny"]
|
||||
vex:
|
||||
includeRejectedJustifications: false
|
||||
|
||||
actions:
|
||||
- channel: "slack:sec-alerts"
|
||||
template: "concise"
|
||||
throttle: "5m"
|
||||
|
||||
- channel: "email:soc"
|
||||
digest: "hourly"
|
||||
template: "detailed"
|
||||
```
|
||||
|
||||
### 3.2 Matcher Types
|
||||
|
||||
| Matcher | Description | Example |
|
||||
|---------|-------------|---------|
|
||||
| `eventKinds` | Event type filter | `["scanner.report.ready"]` |
|
||||
| `namespaces` | Namespace patterns | `["prod-*", "staging"]` |
|
||||
| `repos` | Repository patterns | `["ghcr.io/acme/*"]` |
|
||||
| `minSeverity` | Minimum severity | `"high"` |
|
||||
| `kev` | KEV-tagged required | `true` |
|
||||
| `verdict` | Report/admission verdict | `["fail", "deny"]` |
|
||||
| `labels` | Kubernetes labels | `{"env": "production"}` |
|
||||
|
||||
### 3.3 Evaluation Order
|
||||
|
||||
1. **Tenant check** - Discard if rule tenant ≠ event tenant
|
||||
2. **Kind filter** - Early discard for non-matching kinds
|
||||
3. **Scope match** - Namespace/repo/label matching
|
||||
4. **Delta gates** - Severity threshold evaluation
|
||||
5. **VEX gate** - Filter based on VEX status
|
||||
6. **Throttle/dedup** - Idempotency key check
|
||||
7. **Actions** - Enqueue per-channel jobs
|
||||
|
||||
---
|
||||
|
||||
## 4. Channel Connectors
|
||||
|
||||
### 4.1 Built-in Channels
|
||||
|
||||
| Channel | Features | Rate Limits |
|
||||
|---------|----------|-------------|
|
||||
| **Slack** | Blocks, threads, reactions | 1 msg/sec per channel |
|
||||
| **Teams** | Adaptive Cards, webhooks | 4 msgs/sec |
|
||||
| **Email** | HTML+text, attachments | Relay-dependent |
|
||||
| **Webhook** | JSON, HMAC signing | 10 req/sec |
|
||||
|
||||
### 4.2 Channel Configuration
|
||||
|
||||
```yaml
|
||||
channels:
|
||||
- name: "slack:sec-alerts"
|
||||
type: slack
|
||||
config:
|
||||
channel: "#security-alerts"
|
||||
workspace: "acme-corp"
|
||||
secretRef: "ref://notify/slack-token"
|
||||
|
||||
- name: "email:soc"
|
||||
type: email
|
||||
config:
|
||||
to: ["soc@acme.com"]
|
||||
from: "stellaops@acme.com"
|
||||
smtpHost: "smtp.acme.com"
|
||||
secretRef: "ref://notify/smtp-creds"
|
||||
|
||||
- name: "webhook:siem"
|
||||
type: webhook
|
||||
config:
|
||||
url: "https://siem.acme.com/api/events"
|
||||
signMethod: "ed25519"
|
||||
signKeyRef: "ref://notify/webhook-key"
|
||||
```
|
||||
|
||||
### 4.3 Connector Contract
|
||||
|
||||
```csharp
|
||||
public interface INotifyConnector
|
||||
{
|
||||
string Type { get; }
|
||||
Task<DeliveryResult> SendAsync(DeliveryContext ctx, CancellationToken ct);
|
||||
Task<HealthResult> HealthAsync(ChannelConfig cfg, CancellationToken ct);
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. Noise Control
|
||||
|
||||
### 5.1 Throttling
|
||||
|
||||
- **Per-action throttle** - Suppress duplicates within window
|
||||
- **Idempotency key** - `hash(ruleId | actionId | event.kind | scope.digest | day)`
|
||||
- **Configurable windows** - 5m, 15m, 1h, 1d
|
||||
|
||||
### 5.2 Digest Windows
|
||||
|
||||
```yaml
|
||||
actions:
|
||||
- channel: "email:weekly-summary"
|
||||
digest: "weekly"
|
||||
digestOptions:
|
||||
maxItems: 100
|
||||
groupBy: ["severity", "namespace"]
|
||||
template: "digest-summary"
|
||||
```
|
||||
|
||||
**Behavior:**
|
||||
- Coalesce events within window
|
||||
- Summarize top N items with counts
|
||||
- Flush on window close or max items
|
||||
- Safe truncation with "and X more" links
|
||||
|
||||
### 5.3 Quiet Hours
|
||||
|
||||
```yaml
|
||||
notify:
|
||||
quietHours:
|
||||
enabled: true
|
||||
window: "22:00-06:00"
|
||||
timezone: "America/New_York"
|
||||
minSeverity: "critical"
|
||||
```
|
||||
|
||||
Only critical alerts during quiet hours; others deferred to digests.
|
||||
|
||||
---
|
||||
|
||||
## 6. Templates & Rendering
|
||||
|
||||
### 6.1 Template Engine
|
||||
|
||||
- Handlebars-style safe templates
|
||||
- No arbitrary code execution
|
||||
- Deterministic outputs (stable property order)
|
||||
- Locale-aware formatting
|
||||
|
||||
### 6.2 Template Variables
|
||||
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `event.kind` | Event type |
|
||||
| `event.ts` | Timestamp |
|
||||
| `scope.namespace` | Kubernetes namespace |
|
||||
| `scope.repo` | Repository |
|
||||
| `scope.digest` | Image digest |
|
||||
| `payload.verdict` | Policy verdict |
|
||||
| `payload.delta.newCritical` | New critical count |
|
||||
| `payload.links.ui` | UI deep link |
|
||||
| `topFindings[]` | Top N findings |
|
||||
|
||||
### 6.3 Channel-Specific Rendering
|
||||
|
||||
**Slack:**
|
||||
```json
|
||||
{
|
||||
"blocks": [
|
||||
{"type": "header", "text": {"type": "plain_text", "text": "Policy FAIL: nginx:latest"}},
|
||||
{"type": "section", "text": {"type": "mrkdwn", "text": "*2 critical*, 3 high vulnerabilities"}}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Email:**
|
||||
```html
|
||||
<h2>Policy FAIL: nginx:latest</h2>
|
||||
<table>
|
||||
<tr><td>Critical</td><td>2</td></tr>
|
||||
<tr><td>High</td><td>3</td></tr>
|
||||
</table>
|
||||
<a href="https://ui.internal/reports/...">View Details</a>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7. Ack Tokens
|
||||
|
||||
### 7.1 Token Structure
|
||||
|
||||
DSSE-signed tokens for one-click acknowledgements:
|
||||
|
||||
```json
|
||||
{
|
||||
"payloadType": "application/vnd.stellaops.notify-ack-token+json",
|
||||
"payload": {
|
||||
"tenant": "acme-corp",
|
||||
"deliveryId": "delivery-123",
|
||||
"notificationId": "notif-456",
|
||||
"channel": "slack:sec-alerts",
|
||||
"webhookUrl": "https://notify.internal/ack",
|
||||
"nonce": "random-nonce",
|
||||
"actions": ["acknowledge", "escalate"],
|
||||
"expiresAt": "2025-11-29T13:00:00Z"
|
||||
},
|
||||
"signatures": [{"keyid": "notify-ack-key-01", "sig": "..."}]
|
||||
}
|
||||
```
|
||||
|
||||
### 7.2 Token Workflow
|
||||
|
||||
1. **Issue** - `POST /notify/ack-tokens/issue`
|
||||
2. **Embed** - Token included in message action button
|
||||
3. **Click** - User clicks button, token sent to webhook
|
||||
4. **Verify** - `POST /notify/ack-tokens/verify`
|
||||
5. **Audit** - Ack event recorded
|
||||
|
||||
### 7.3 Token Rotation
|
||||
|
||||
```bash
|
||||
# Rotate ack token signing key
|
||||
stella notify rotate-ack-key --key-source kms://notify/ack-key
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 8. Implementation Strategy
|
||||
|
||||
### 8.1 Phase 1: Core Engine (Complete)
|
||||
|
||||
- [x] Rules engine with matchers
|
||||
- [x] Slack connector
|
||||
- [x] Teams connector
|
||||
- [x] Email connector
|
||||
- [x] Webhook connector
|
||||
|
||||
### 8.2 Phase 2: Noise Control (Complete)
|
||||
|
||||
- [x] Throttling
|
||||
- [x] Digest windows
|
||||
- [x] Idempotency
|
||||
- [x] Quiet hours
|
||||
|
||||
### 8.3 Phase 3: Ack Tokens (In Progress)
|
||||
|
||||
- [x] Token issuance
|
||||
- [x] Token verification
|
||||
- [ ] Token rotation API (NOTIFY-ACK-45-001)
|
||||
- [ ] Escalation workflows (NOTIFY-ESC-46-001)
|
||||
|
||||
### 8.4 Phase 4: Advanced Features (Planned)
|
||||
|
||||
- [ ] PagerDuty connector
|
||||
- [ ] Jira ticket creation
|
||||
- [ ] In-app notifications
|
||||
- [ ] Anomaly suppression
|
||||
|
||||
---
|
||||
|
||||
## 9. API Surface
|
||||
|
||||
### 9.1 Channels
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/api/v1/notify/channels` | GET/POST | `notify.read/admin` | List/create channels |
|
||||
| `/api/v1/notify/channels/{id}` | GET/PATCH/DELETE | `notify.admin` | Manage channel |
|
||||
| `/api/v1/notify/channels/{id}/test` | POST | `notify.admin` | Send test message |
|
||||
| `/api/v1/notify/channels/{id}/health` | GET | `notify.read` | Health check |
|
||||
|
||||
### 9.2 Rules
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/api/v1/notify/rules` | GET/POST | `notify.read/admin` | List/create rules |
|
||||
| `/api/v1/notify/rules/{id}` | GET/PATCH/DELETE | `notify.admin` | Manage rule |
|
||||
| `/api/v1/notify/rules/{id}/test` | POST | `notify.admin` | Dry-run rule |
|
||||
|
||||
### 9.3 Deliveries
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/api/v1/notify/deliveries` | GET | `notify.read` | List deliveries |
|
||||
| `/api/v1/notify/deliveries/{id}` | GET | `notify.read` | Delivery detail |
|
||||
| `/api/v1/notify/deliveries/{id}/retry` | POST | `notify.admin` | Retry delivery |
|
||||
|
||||
---
|
||||
|
||||
## 10. Event Sources
|
||||
|
||||
### 10.1 Subscribed Events
|
||||
|
||||
| Event | Source | Typical Actions |
|
||||
|-------|--------|-----------------|
|
||||
| `scanner.scan.completed` | Scanner | Immediate/digest |
|
||||
| `scanner.report.ready` | Scanner | Immediate |
|
||||
| `scheduler.rescan.delta` | Scheduler | Immediate/digest |
|
||||
| `attestor.logged` | Attestor | Immediate |
|
||||
| `zastava.admission` | Zastava | Immediate |
|
||||
| `conselier.export.completed` | Concelier | Digest |
|
||||
| `excitor.export.completed` | Excititor | Digest |
|
||||
|
||||
### 10.2 Event Envelope
|
||||
|
||||
```json
|
||||
{
|
||||
"eventId": "uuid",
|
||||
"kind": "scanner.report.ready",
|
||||
"tenant": "acme-corp",
|
||||
"ts": "2025-11-29T12:00:00Z",
|
||||
"actor": "scanner-webservice",
|
||||
"scope": {
|
||||
"namespace": "production",
|
||||
"repo": "ghcr.io/acme/api",
|
||||
"digest": "sha256:..."
|
||||
},
|
||||
"payload": {
|
||||
"reportId": "report-123",
|
||||
"verdict": "fail",
|
||||
"summary": {"total": 12, "blocked": 2},
|
||||
"delta": {"newCritical": 1, "kev": ["CVE-2025-..."]}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 11. Observability
|
||||
|
||||
### 11.1 Metrics
|
||||
|
||||
- `notify.events_consumed_total{kind}`
|
||||
- `notify.rules_matched_total{ruleId}`
|
||||
- `notify.throttled_total{reason}`
|
||||
- `notify.digest_coalesced_total{window}`
|
||||
- `notify.sent_total{channel}`
|
||||
- `notify.failed_total{channel,code}`
|
||||
- `notify.delivery_latency_seconds{channel}`
|
||||
|
||||
### 11.2 SLO Targets
|
||||
|
||||
| Metric | Target |
|
||||
|--------|--------|
|
||||
| Event-to-delivery p95 | < 60 seconds |
|
||||
| Failure rate | < 0.5% per hour |
|
||||
| Duplicate rate | ~0% |
|
||||
|
||||
---
|
||||
|
||||
## 12. Security Considerations
|
||||
|
||||
### 12.1 Secret Management
|
||||
|
||||
- Secrets stored as references only
|
||||
- Just-in-time fetch at send time
|
||||
- No plaintext in Mongo
|
||||
|
||||
### 12.2 Webhook Signing
|
||||
|
||||
```
|
||||
X-StellaOps-Signature: t=1732881600,v1=abc123...
|
||||
X-StellaOps-Timestamp: 2025-11-29T12:00:00Z
|
||||
```
|
||||
|
||||
- HMAC-SHA256 or Ed25519
|
||||
- Replay window protection
|
||||
- Canonical body hash
|
||||
|
||||
### 12.3 Loop Prevention
|
||||
|
||||
- Webhook target allowlist
|
||||
- Event origin tags
|
||||
- Own webhooks rejected
|
||||
|
||||
---
|
||||
|
||||
## 13. Related Documentation
|
||||
|
||||
| Resource | Location |
|
||||
|----------|----------|
|
||||
| Notify architecture | `docs/modules/notify/architecture.md` |
|
||||
| Channel schemas | `docs/modules/notify/resources/schemas/` |
|
||||
| Sample payloads | `docs/modules/notify/resources/samples/` |
|
||||
| Bootstrap pack | `docs/modules/notify/bootstrap-pack.md` |
|
||||
|
||||
---
|
||||
|
||||
## 14. Sprint Mapping
|
||||
|
||||
- **Primary Sprint:** SPRINT_0170_0001_0001_notify_engine.md (NEW)
|
||||
- **Related Sprints:**
|
||||
- SPRINT_0171_0001_0002_notify_connectors.md
|
||||
- SPRINT_0172_0001_0003_notify_ack_tokens.md
|
||||
|
||||
**Key Task IDs:**
|
||||
- `NOTIFY-ENGINE-40-001` - Rules engine (DONE)
|
||||
- `NOTIFY-CONN-41-001` - Connectors (DONE)
|
||||
- `NOTIFY-NOISE-42-001` - Throttling/digests (DONE)
|
||||
- `NOTIFY-ACK-45-001` - Token rotation (IN PROGRESS)
|
||||
- `NOTIFY-ESC-46-001` - Escalation workflows (TODO)
|
||||
|
||||
---
|
||||
|
||||
## 15. Success Metrics
|
||||
|
||||
| Metric | Target |
|
||||
|--------|--------|
|
||||
| Delivery latency | < 60s p95 |
|
||||
| Delivery success rate | > 99.5% |
|
||||
| Duplicate rate | < 0.01% |
|
||||
| Rule evaluation time | < 10ms |
|
||||
| Channel health | 99.9% uptime |
|
||||
|
||||
---
|
||||
|
||||
*Last updated: 2025-11-29*
|
||||
@@ -0,0 +1,432 @@
|
||||
# Orchestrator Event Model and Job Lifecycle
|
||||
|
||||
**Version:** 1.0
|
||||
**Date:** 2025-11-29
|
||||
**Status:** Canonical
|
||||
|
||||
This advisory defines the product rationale, job lifecycle semantics, and implementation strategy for the Orchestrator module, covering event models, quota governance, replay semantics, and TaskRunner bridge.
|
||||
|
||||
---
|
||||
|
||||
## 1. Executive Summary
|
||||
|
||||
The Orchestrator is the **central job coordination layer** for all Stella Ops asynchronous operations. Key capabilities:
|
||||
|
||||
- **Unified Job Lifecycle** - Enqueue, schedule, lease, complete with audit trail
|
||||
- **Quota Governance** - Per-tenant rate limits, burst controls, circuit breakers
|
||||
- **Replay Semantics** - Deterministic job replay for audit and recovery
|
||||
- **TaskRunner Bridge** - Pack-run integration with heartbeats and artifacts
|
||||
- **Event Fan-Out** - SSE/GraphQL feeds for dashboards and notifications
|
||||
- **Offline Export** - Audit bundles for compliance and investigations
|
||||
|
||||
---
|
||||
|
||||
## 2. Market Drivers
|
||||
|
||||
### 2.1 Target Segments
|
||||
|
||||
| Segment | Orchestration Requirements | Use Case |
|
||||
|---------|---------------------------|----------|
|
||||
| **Enterprise** | Rate limiting, quota management | Multi-team resource sharing |
|
||||
| **MSP/MSSP** | Multi-tenant isolation | Managed security services |
|
||||
| **Compliance Teams** | Audit trails, replay | SOC 2, FedRAMP evidence |
|
||||
| **DevSecOps** | CI/CD integration, webhooks | Pipeline automation |
|
||||
|
||||
### 2.2 Competitive Positioning
|
||||
|
||||
Most vulnerability platforms lack sophisticated job orchestration. Stella Ops differentiates with:
|
||||
- **Deterministic replay** for audit and debugging
|
||||
- **Fine-grained quotas** per tenant/job-type
|
||||
- **Circuit breakers** for automatic failure isolation
|
||||
- **Native pack-run integration** for workflow automation
|
||||
- **Offline-compatible** audit bundles
|
||||
|
||||
---
|
||||
|
||||
## 3. Job Lifecycle Model
|
||||
|
||||
### 3.1 State Machine
|
||||
|
||||
```
|
||||
[Created] --> [Queued] --> [Leased] --> [Running] --> [Completed]
|
||||
| | | |
|
||||
| | v v
|
||||
| +-------> [Failed] <----[Canceled]
|
||||
| |
|
||||
v v
|
||||
[Throttled] [Incident]
|
||||
```
|
||||
|
||||
### 3.2 Lifecycle Phases
|
||||
|
||||
| Phase | Description | Transitions |
|
||||
|-------|-------------|-------------|
|
||||
| **Created** | Job request received | -> Queued |
|
||||
| **Queued** | Awaiting scheduling | -> Leased, Throttled |
|
||||
| **Throttled** | Rate limit applied | -> Queued (after delay) |
|
||||
| **Leased** | Worker acquired job | -> Running, Expired |
|
||||
| **Running** | Active execution | -> Completed, Failed, Canceled |
|
||||
| **Completed** | Success, archived | Terminal |
|
||||
| **Failed** | Error, may retry | -> Queued (retry), Incident |
|
||||
| **Canceled** | Operator abort | Terminal |
|
||||
| **Incident** | Escalated failure | Terminal (requires operator) |
|
||||
|
||||
### 3.3 Job Request Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"jobId": "uuid",
|
||||
"jobType": "scan|policy-run|export|pack-run|advisory-sync",
|
||||
"tenant": "tenant-id",
|
||||
"priority": "low|normal|high|emergency",
|
||||
"payloadDigest": "sha256:...",
|
||||
"payload": { "imageRef": "nginx:latest", "options": {} },
|
||||
"dependencies": ["job-id-1", "job-id-2"],
|
||||
"idempotencyKey": "unique-request-key",
|
||||
"correlationId": "trace-id",
|
||||
"requestedBy": "user-id|service-id",
|
||||
"requestedAt": "2025-11-29T12:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4. Quota Governance
|
||||
|
||||
### 4.1 Quota Model
|
||||
|
||||
```yaml
|
||||
quotas:
|
||||
- tenant: "acme-corp"
|
||||
jobType: "*"
|
||||
maxActive: 50
|
||||
maxPerHour: 500
|
||||
burst: 10
|
||||
priority:
|
||||
emergency:
|
||||
maxActive: 5
|
||||
skipQueue: true
|
||||
|
||||
- tenant: "acme-corp"
|
||||
jobType: "export"
|
||||
maxActive: 4
|
||||
maxPerHour: 100
|
||||
```
|
||||
|
||||
### 4.2 Rate Limit Enforcement
|
||||
|
||||
1. **Quota Check** - Before leasing, verify tenant hasn't exceeded limits
|
||||
2. **Burst Control** - Allow short bursts within configured window
|
||||
3. **Staging** - Jobs exceeding limits staged with `nextEligibleAt` timestamp
|
||||
4. **Priority Bypass** - Emergency jobs can skip queue (with separate limits)
|
||||
|
||||
### 4.3 Dynamic Controls
|
||||
|
||||
| Control | API | Purpose |
|
||||
|---------|-----|---------|
|
||||
| `pauseSource` | `POST /api/limits/pause` | Halt specific job sources |
|
||||
| `resumeSource` | `POST /api/limits/resume` | Resume paused sources |
|
||||
| `throttle` | `POST /api/limits/throttle` | Apply temporary throttle |
|
||||
| `updateQuota` | `PATCH /api/quotas/{id}` | Modify quota limits |
|
||||
|
||||
### 4.4 Circuit Breakers
|
||||
|
||||
- Auto-pause job types when failure rate > threshold (default 50%)
|
||||
- Incident events generated via Notify
|
||||
- Half-open testing after cooldown period
|
||||
- Manual reset via operator action
|
||||
|
||||
---
|
||||
|
||||
## 5. TaskRunner Bridge
|
||||
|
||||
### 5.1 Pack-Run Integration
|
||||
|
||||
The Orchestrator provides specialized support for TaskRunner pack executions:
|
||||
|
||||
```json
|
||||
{
|
||||
"jobType": "pack-run",
|
||||
"payload": {
|
||||
"packId": "vuln-scan-and-report",
|
||||
"packVersion": "1.2.0",
|
||||
"planHash": "sha256:...",
|
||||
"inputs": { "imageRef": "nginx:latest" },
|
||||
"artifacts": [],
|
||||
"logChannel": "sse:/runs/{runId}/logs",
|
||||
"heartbeatCadence": 30
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 5.2 Heartbeat Protocol
|
||||
|
||||
- Workers send heartbeats every `heartbeatCadence` seconds
|
||||
- Missed heartbeats trigger lease expiration
|
||||
- Lease can be extended for long-running tasks
|
||||
- Dead workers detected within 2x heartbeat interval
|
||||
|
||||
### 5.3 Artifact & Log Streaming
|
||||
|
||||
| Endpoint | Method | Purpose |
|
||||
|----------|--------|---------|
|
||||
| `/runs/{runId}/logs` | SSE | Stream execution logs |
|
||||
| `/runs/{runId}/artifacts` | GET | List produced artifacts |
|
||||
| `/runs/{runId}/artifacts/{name}` | GET | Download artifact |
|
||||
| `/runs/{runId}/heartbeat` | POST | Extend lease |
|
||||
|
||||
---
|
||||
|
||||
## 6. Event Model
|
||||
|
||||
### 6.1 Event Envelope
|
||||
|
||||
```json
|
||||
{
|
||||
"eventId": "uuid",
|
||||
"eventType": "job.queued|job.leased|job.completed|job.failed",
|
||||
"timestamp": "2025-11-29T12:00:00Z",
|
||||
"tenant": "tenant-id",
|
||||
"jobId": "job-id",
|
||||
"jobType": "scan",
|
||||
"correlationId": "trace-id",
|
||||
"idempotencyKey": "unique-key",
|
||||
"payload": {
|
||||
"status": "completed",
|
||||
"duration": 45.2,
|
||||
"result": { "verdict": "pass" }
|
||||
},
|
||||
"provenance": {
|
||||
"workerId": "worker-1",
|
||||
"leaseId": "lease-id",
|
||||
"taskRunnerId": "runner-1"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 6.2 Event Types
|
||||
|
||||
| Event | Trigger | Consumers |
|
||||
|-------|---------|-----------|
|
||||
| `job.queued` | Job enqueued | Dashboard, Notify |
|
||||
| `job.leased` | Worker acquired job | Dashboard |
|
||||
| `job.started` | Execution began | Dashboard, Notify |
|
||||
| `job.progress` | Progress update | Dashboard (SSE) |
|
||||
| `job.completed` | Success | Dashboard, Notify, Export |
|
||||
| `job.failed` | Error occurred | Dashboard, Notify, Incident |
|
||||
| `job.canceled` | Operator abort | Dashboard, Notify |
|
||||
| `job.replayed` | Replay initiated | Dashboard, Audit |
|
||||
|
||||
### 6.3 Fan-Out Channels
|
||||
|
||||
- **SSE** - Real-time dashboard feeds
|
||||
- **GraphQL Subscriptions** - Console UI
|
||||
- **Notify** - Alert routing based on rules
|
||||
- **Webhooks** - External integrations
|
||||
- **Audit Log** - Compliance storage
|
||||
|
||||
---
|
||||
|
||||
## 7. Replay Semantics
|
||||
|
||||
### 7.1 Deterministic Replay
|
||||
|
||||
Jobs can be replayed for audit, debugging, or recovery:
|
||||
|
||||
```bash
|
||||
# Replay a completed job
|
||||
stella job replay --id job-12345
|
||||
|
||||
# Replay with sealed mode (offline verification)
|
||||
stella job replay --id job-12345 --sealed --bundle output.tar.gz
|
||||
```
|
||||
|
||||
### 7.2 Replay Guarantees
|
||||
|
||||
| Property | Guarantee |
|
||||
|----------|-----------|
|
||||
| **Input preservation** | Same payloadDigest, cursors |
|
||||
| **Ordering** | Same processing order |
|
||||
| **Determinism** | Same outputs for same inputs |
|
||||
| **Provenance** | `replayOf` pointer to original |
|
||||
|
||||
### 7.3 Replay Record
|
||||
|
||||
```json
|
||||
{
|
||||
"jobId": "replay-job-id",
|
||||
"replayOf": "original-job-id",
|
||||
"priority": "high",
|
||||
"reason": "audit-verification",
|
||||
"requestedBy": "auditor@example.com",
|
||||
"cursors": {
|
||||
"advisory": "cursor-abc",
|
||||
"vex": "cursor-def"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 8. Implementation Strategy
|
||||
|
||||
### 8.1 Phase 1: Core Lifecycle (Complete)
|
||||
|
||||
- [x] Job state machine
|
||||
- [x] MongoDB queue with leasing
|
||||
- [x] Basic quota enforcement
|
||||
- [x] Dashboard SSE feeds
|
||||
|
||||
### 8.2 Phase 2: Pack-Run Bridge (In Progress)
|
||||
|
||||
- [x] Pack-run job type registration
|
||||
- [x] Log/artifact streaming
|
||||
- [ ] Heartbeat protocol (ORCH-PACK-37-001)
|
||||
- [ ] Event envelope finalization (ORCH-SVC-37-101)
|
||||
|
||||
### 8.3 Phase 3: Advanced Controls (Planned)
|
||||
|
||||
- [ ] Circuit breaker automation
|
||||
- [ ] Quota analytics dashboard
|
||||
- [ ] Replay verification tooling
|
||||
- [ ] Incident mode integration
|
||||
|
||||
---
|
||||
|
||||
## 9. API Surface
|
||||
|
||||
### 9.1 Job Management
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/api/jobs` | GET | `orch:read` | List jobs with filters |
|
||||
| `/api/jobs/{id}` | GET | `orch:read` | Job detail |
|
||||
| `/api/jobs/{id}/cancel` | POST | `orch:operate` | Cancel job |
|
||||
| `/api/jobs/{id}/replay` | POST | `orch:operate` | Schedule replay |
|
||||
|
||||
### 9.2 Quota Management
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/api/quotas` | GET | `orch:read` | List quotas |
|
||||
| `/api/quotas/{id}` | PATCH | `orch:quota` | Update quota |
|
||||
| `/api/limits/throttle` | POST | `orch:quota` | Apply throttle |
|
||||
| `/api/limits/pause` | POST | `orch:quota` | Pause source |
|
||||
| `/api/limits/resume` | POST | `orch:quota` | Resume source |
|
||||
|
||||
### 9.3 Dashboard
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/api/dashboard/metrics` | GET | `orch:read` | Aggregated metrics |
|
||||
| `/api/dashboard/events` | SSE | `orch:read` | Real-time events |
|
||||
|
||||
---
|
||||
|
||||
## 10. Storage Model
|
||||
|
||||
### 10.1 Collections
|
||||
|
||||
| Collection | Purpose | Key Fields |
|
||||
|------------|---------|------------|
|
||||
| `jobs` | Current job state | `_id`, `tenant`, `jobType`, `status`, `priority` |
|
||||
| `job_history` | Append-only audit | `jobId`, `event`, `timestamp`, `actor` |
|
||||
| `sources` | Job sources registry | `sourceId`, `tenant`, `status` |
|
||||
| `quotas` | Quota definitions | `tenant`, `jobType`, `limits` |
|
||||
| `throttles` | Active throttles | `tenant`, `source`, `until` |
|
||||
| `incidents` | Escalated failures | `jobId`, `reason`, `status` |
|
||||
|
||||
### 10.2 Indexes
|
||||
|
||||
- `{tenant, jobType, status}` on `jobs`
|
||||
- `{tenant, status, startedAt}` on `jobs`
|
||||
- `{jobId, timestamp}` on `job_history`
|
||||
- TTL index on transient lease records
|
||||
|
||||
---
|
||||
|
||||
## 11. Observability
|
||||
|
||||
### 11.1 Metrics
|
||||
|
||||
- `job_queue_depth{jobType,tenant}`
|
||||
- `job_latency_seconds{jobType,phase}`
|
||||
- `job_failures_total{jobType,reason}`
|
||||
- `job_retry_total{jobType}`
|
||||
- `lease_extensions_total{jobType}`
|
||||
- `quota_exceeded_total{tenant}`
|
||||
- `circuit_breaker_state{jobType}`
|
||||
|
||||
### 11.2 Pack-Run Metrics
|
||||
|
||||
- `pack_run_logs_stream_lag_seconds`
|
||||
- `pack_run_heartbeats_total`
|
||||
- `pack_run_artifacts_total`
|
||||
- `pack_run_duration_seconds`
|
||||
|
||||
---
|
||||
|
||||
## 12. Offline Support
|
||||
|
||||
### 12.1 Audit Bundle Export
|
||||
|
||||
```bash
|
||||
stella orch export --tenant acme-corp --since 2025-11-01 --output audit-bundle.tar.gz
|
||||
```
|
||||
|
||||
Bundle contents:
|
||||
- `jobs.jsonl` - Job records
|
||||
- `history.jsonl` - State transitions
|
||||
- `throttles.jsonl` - Throttle events
|
||||
- `manifest.json` - Bundle metadata
|
||||
- `signatures/` - DSSE signatures
|
||||
|
||||
### 12.2 Replay Verification
|
||||
|
||||
```bash
|
||||
# Verify job determinism
|
||||
stella job verify --bundle audit-bundle.tar.gz --job-id job-12345
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 13. Related Documentation
|
||||
|
||||
| Resource | Location |
|
||||
|----------|----------|
|
||||
| Orchestrator architecture | `docs/modules/orchestrator/architecture.md` |
|
||||
| Event envelope spec | `docs/modules/orchestrator/event-envelope.md` |
|
||||
| TaskRunner integration | `docs/modules/taskrunner/orchestrator-bridge.md` |
|
||||
|
||||
---
|
||||
|
||||
## 14. Sprint Mapping
|
||||
|
||||
- **Primary Sprint:** SPRINT_0151_0001_0001_orchestrator_i.md
|
||||
- **Related Sprints:**
|
||||
- SPRINT_0152_0001_0002_orchestrator_ii.md
|
||||
- SPRINT_0153_0001_0003_orchestrator_iii.md
|
||||
- SPRINT_0157_0001_0001_taskrunner_i.md
|
||||
|
||||
**Key Task IDs:**
|
||||
- `ORCH-CORE-30-001` - Job lifecycle (DONE)
|
||||
- `ORCH-QUOTA-31-001` - Quota governance (DONE)
|
||||
- `ORCH-PACK-37-001` - Pack-run bridge (IN PROGRESS)
|
||||
- `ORCH-SVC-37-101` - Event envelope (IN PROGRESS)
|
||||
- `ORCH-REPLAY-38-001` - Replay verification (TODO)
|
||||
|
||||
---
|
||||
|
||||
## 15. Success Metrics
|
||||
|
||||
| Metric | Target |
|
||||
|--------|--------|
|
||||
| Job scheduling latency | < 100ms p99 |
|
||||
| Lease acquisition time | < 50ms p99 |
|
||||
| Event fan-out delay | < 500ms |
|
||||
| Quota enforcement accuracy | 100% |
|
||||
| Replay determinism | 100% match |
|
||||
|
||||
---
|
||||
|
||||
*Last updated: 2025-11-29*
|
||||
@@ -0,0 +1,394 @@
|
||||
# Policy Simulation and Shadow Gates
|
||||
|
||||
**Version:** 1.0
|
||||
**Date:** 2025-11-29
|
||||
**Status:** Canonical
|
||||
|
||||
This advisory defines the product rationale, simulation semantics, and implementation strategy for Policy Engine simulation features, covering shadow runs, coverage fixtures, and promotion gates.
|
||||
|
||||
---
|
||||
|
||||
## 1. Executive Summary
|
||||
|
||||
Policy simulation enables **safe testing of policy changes** before production deployment. Key capabilities:
|
||||
|
||||
- **Shadow Runs** - Execute policies without enforcement
|
||||
- **Diff Summaries** - Compare old vs new policy outcomes
|
||||
- **Coverage Fixtures** - Validate expected findings
|
||||
- **Promotion Gates** - Block promotion until tests pass
|
||||
- **Deterministic Replay** - Reproduce simulation results
|
||||
|
||||
---
|
||||
|
||||
## 2. Market Drivers
|
||||
|
||||
### 2.1 Target Segments
|
||||
|
||||
| Segment | Simulation Requirements | Use Case |
|
||||
|---------|------------------------|----------|
|
||||
| **Policy Authors** | Preview changes | Development workflow |
|
||||
| **Security Leads** | Approve promotions | Change management |
|
||||
| **Compliance** | Audit trail | Policy change evidence |
|
||||
| **DevSecOps** | CI integration | Automated testing |
|
||||
|
||||
### 2.2 Competitive Positioning
|
||||
|
||||
Most vulnerability tools lack policy simulation. Stella Ops differentiates with:
|
||||
- **Shadow execution** without production impact
|
||||
- **Diff visualization** of policy changes
|
||||
- **Coverage testing** with fixture validation
|
||||
- **Promotion gates** for governance
|
||||
- **Deterministic replay** for audit
|
||||
|
||||
---
|
||||
|
||||
## 3. Simulation Modes
|
||||
|
||||
### 3.1 Shadow Run
|
||||
|
||||
Execute policy against real data without enforcement:
|
||||
|
||||
```bash
|
||||
stella policy simulate \
|
||||
--policy my-policy:v2 \
|
||||
--scope "tenant:acme-corp,namespace:production" \
|
||||
--shadow
|
||||
```
|
||||
|
||||
**Behavior:**
|
||||
- Evaluates all findings
|
||||
- Records verdicts to shadow collections
|
||||
- No enforcement actions
|
||||
- No notifications triggered
|
||||
- Metrics tagged with `shadow=true`
|
||||
|
||||
### 3.2 Diff Run
|
||||
|
||||
Compare two policy versions:
|
||||
|
||||
```bash
|
||||
stella policy diff \
|
||||
--old my-policy:v1 \
|
||||
--new my-policy:v2 \
|
||||
--scope "tenant:acme-corp"
|
||||
```
|
||||
|
||||
**Output:**
|
||||
```json
|
||||
{
|
||||
"summary": {
|
||||
"added": 12,
|
||||
"removed": 5,
|
||||
"changed": 8,
|
||||
"unchanged": 234
|
||||
},
|
||||
"changes": [
|
||||
{
|
||||
"findingId": "finding-123",
|
||||
"cve": "CVE-2025-12345",
|
||||
"oldVerdict": "warned",
|
||||
"newVerdict": "blocked",
|
||||
"reason": "rule 'critical-cves' now matches"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 3.3 Coverage Run
|
||||
|
||||
Validate policy against fixture expectations:
|
||||
|
||||
```bash
|
||||
stella policy coverage \
|
||||
--policy my-policy:v2 \
|
||||
--fixtures fixtures/policy-tests.yaml
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4. Coverage Fixtures
|
||||
|
||||
### 4.1 Fixture Format
|
||||
|
||||
```yaml
|
||||
apiVersion: stellaops.io/policy-test.v1
|
||||
kind: PolicyFixture
|
||||
metadata:
|
||||
name: critical-cve-blocking
|
||||
policy: my-policy
|
||||
|
||||
fixtures:
|
||||
- name: "Block critical CVE in production"
|
||||
input:
|
||||
finding:
|
||||
cve: "CVE-2025-12345"
|
||||
severity: critical
|
||||
ecosystem: npm
|
||||
component: "lodash@4.17.20"
|
||||
context:
|
||||
namespace: production
|
||||
labels:
|
||||
tier: frontend
|
||||
expected:
|
||||
verdict: blocked
|
||||
rulesMatched: ["critical-cves", "production-strict"]
|
||||
|
||||
- name: "Warn on high CVE in staging"
|
||||
input:
|
||||
finding:
|
||||
cve: "CVE-2025-12346"
|
||||
severity: high
|
||||
ecosystem: npm
|
||||
expected:
|
||||
verdict: warned
|
||||
|
||||
- name: "Ignore low CVE with VEX"
|
||||
input:
|
||||
finding:
|
||||
cve: "CVE-2025-12347"
|
||||
severity: low
|
||||
vexStatus: not_affected
|
||||
vexJustification: "component_not_present"
|
||||
expected:
|
||||
verdict: ignored
|
||||
```
|
||||
|
||||
### 4.2 Fixture Results
|
||||
|
||||
```json
|
||||
{
|
||||
"total": 25,
|
||||
"passed": 23,
|
||||
"failed": 2,
|
||||
"failures": [
|
||||
{
|
||||
"fixture": "Block critical CVE in production",
|
||||
"expected": {"verdict": "blocked"},
|
||||
"actual": {"verdict": "warned"},
|
||||
"diff": "rule 'critical-cves' did not match due to missing label"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. Promotion Gates
|
||||
|
||||
### 5.1 Gate Requirements
|
||||
|
||||
Before a policy can be promoted from draft to active:
|
||||
|
||||
| Gate | Requirement | Enforcement |
|
||||
|------|-------------|-------------|
|
||||
| Shadow Run | Complete without errors | Required |
|
||||
| Coverage | 100% fixtures pass | Required |
|
||||
| Diff Review | Changes reviewed | Optional |
|
||||
| Approval | Human sign-off | Configurable |
|
||||
|
||||
### 5.2 Promotion Workflow
|
||||
|
||||
```mermaid
|
||||
stateDiagram-v2
|
||||
[*] --> Draft
|
||||
Draft --> Shadow: Start shadow run
|
||||
Shadow --> Coverage: Run coverage tests
|
||||
Coverage --> Review: Pass fixtures
|
||||
Review --> Approval: Review diff
|
||||
Approval --> Active: Approve
|
||||
Coverage --> Draft: Fix failures
|
||||
Approval --> Draft: Reject
|
||||
```
|
||||
|
||||
### 5.3 CLI Commands
|
||||
|
||||
```bash
|
||||
# Start shadow run
|
||||
stella policy promote start --policy my-policy:v2
|
||||
|
||||
# Check promotion status
|
||||
stella policy promote status --policy my-policy:v2
|
||||
|
||||
# Complete promotion (requires approval)
|
||||
stella policy promote complete --policy my-policy:v2 --comment "Reviewed and approved"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6. Determinism Requirements
|
||||
|
||||
### 6.1 Simulation Guarantees
|
||||
|
||||
| Property | Guarantee |
|
||||
|----------|-----------|
|
||||
| Input ordering | Stable sort by (tenant, policyId, findingKey) |
|
||||
| Rule evaluation | First-match semantics |
|
||||
| Timestamp handling | Injected TimeProvider |
|
||||
| Random values | Injected IRandom |
|
||||
|
||||
### 6.2 Replay Hash
|
||||
|
||||
Each simulation computes:
|
||||
```
|
||||
determinismHash = SHA256(policyVersion + inputsHash + rulesHash)
|
||||
```
|
||||
|
||||
Replays with same hash must produce identical results.
|
||||
|
||||
---
|
||||
|
||||
## 7. Implementation Strategy
|
||||
|
||||
### 7.1 Phase 1: Shadow Runs (Complete)
|
||||
|
||||
- [x] Shadow collection isolation
|
||||
- [x] Shadow metrics tagging
|
||||
- [x] Shadow run API
|
||||
- [x] CLI integration
|
||||
|
||||
### 7.2 Phase 2: Diff & Coverage (In Progress)
|
||||
|
||||
- [x] Policy diff algorithm
|
||||
- [x] Diff visualization
|
||||
- [ ] Coverage fixture parser (POLICY-COV-50-001)
|
||||
- [ ] Coverage runner (POLICY-COV-50-002)
|
||||
|
||||
### 7.3 Phase 3: Promotion Gates (Planned)
|
||||
|
||||
- [ ] Gate configuration schema
|
||||
- [ ] Promotion state machine
|
||||
- [ ] Approval workflow integration
|
||||
- [ ] Console UI for review
|
||||
|
||||
---
|
||||
|
||||
## 8. API Surface
|
||||
|
||||
### 8.1 Simulation APIs
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/api/policy/simulate` | POST | `policy:simulate` | Start simulation |
|
||||
| `/api/policy/simulate/{id}` | GET | `policy:read` | Get simulation status |
|
||||
| `/api/policy/simulate/{id}/results` | GET | `policy:read` | Get results |
|
||||
|
||||
### 8.2 Diff APIs
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/api/policy/diff` | POST | `policy:read` | Compare versions |
|
||||
|
||||
### 8.3 Coverage APIs
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/api/policy/coverage` | POST | `policy:simulate` | Run coverage |
|
||||
| `/api/policy/coverage/{id}` | GET | `policy:read` | Get results |
|
||||
|
||||
### 8.4 Promotion APIs
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/api/policy/promote` | POST | `policy:promote` | Start promotion |
|
||||
| `/api/policy/promote/{id}` | GET | `policy:read` | Get status |
|
||||
| `/api/policy/promote/{id}/approve` | POST | `policy:approve` | Approve promotion |
|
||||
| `/api/policy/promote/{id}/reject` | POST | `policy:approve` | Reject promotion |
|
||||
|
||||
---
|
||||
|
||||
## 9. Storage Model
|
||||
|
||||
### 9.1 Collections
|
||||
|
||||
| Collection | Purpose |
|
||||
|------------|---------|
|
||||
| `policy_simulations` | Simulation records |
|
||||
| `policy_simulation_results` | Per-finding results |
|
||||
| `policy_coverage_runs` | Coverage executions |
|
||||
| `policy_promotions` | Promotion state |
|
||||
|
||||
### 9.2 Shadow Isolation
|
||||
|
||||
Shadow results stored in separate collections:
|
||||
- `effective_finding_{policyId}_shadow`
|
||||
- Never mixed with production data
|
||||
- TTL-based cleanup (default 7 days)
|
||||
|
||||
---
|
||||
|
||||
## 10. Observability
|
||||
|
||||
### 10.1 Metrics
|
||||
|
||||
- `policy_simulation_duration_seconds{mode}`
|
||||
- `policy_coverage_pass_rate{policy}`
|
||||
- `policy_promotion_gate_status{gate,status}`
|
||||
- `policy_diff_changes_total{changeType}`
|
||||
|
||||
### 10.2 Audit Events
|
||||
|
||||
- `policy.simulation.started`
|
||||
- `policy.simulation.completed`
|
||||
- `policy.coverage.passed`
|
||||
- `policy.coverage.failed`
|
||||
- `policy.promotion.approved`
|
||||
- `policy.promotion.rejected`
|
||||
|
||||
---
|
||||
|
||||
## 11. Console Integration
|
||||
|
||||
### 11.1 Policy Editor
|
||||
|
||||
- Inline simulation button
|
||||
- Real-time diff preview
|
||||
- Coverage status badge
|
||||
|
||||
### 11.2 Promotion Dashboard
|
||||
|
||||
- Pending promotions list
|
||||
- Gate status visualization
|
||||
- Approval/reject actions
|
||||
|
||||
---
|
||||
|
||||
## 12. Related Documentation
|
||||
|
||||
| Resource | Location |
|
||||
|----------|----------|
|
||||
| Policy architecture | `docs/modules/policy/architecture.md` |
|
||||
| DSL reference | `docs/policy/dsl.md` |
|
||||
| Lifecycle guide | `docs/policy/lifecycle.md` |
|
||||
| Runtime guide | `docs/policy/runtime.md` |
|
||||
|
||||
---
|
||||
|
||||
## 13. Sprint Mapping
|
||||
|
||||
- **Primary Sprint:** SPRINT_0185_0001_0001_policy_simulation.md (NEW)
|
||||
- **Related Sprints:**
|
||||
- SPRINT_0120_0000_0001_policy_reasoning.md
|
||||
- SPRINT_0121_0001_0001_policy_reasoning.md
|
||||
|
||||
**Key Task IDs:**
|
||||
- `POLICY-SIM-40-001` - Shadow runs (DONE)
|
||||
- `POLICY-DIFF-41-001` - Diff algorithm (DONE)
|
||||
- `POLICY-COV-50-001` - Coverage fixtures (IN PROGRESS)
|
||||
- `POLICY-COV-50-002` - Coverage runner (IN PROGRESS)
|
||||
- `POLICY-PROM-55-001` - Promotion gates (TODO)
|
||||
|
||||
---
|
||||
|
||||
## 14. Success Metrics
|
||||
|
||||
| Metric | Target |
|
||||
|--------|--------|
|
||||
| Simulation latency | < 2 min (10k findings) |
|
||||
| Coverage accuracy | 100% fixture matching |
|
||||
| Promotion gate enforcement | 100% adherence |
|
||||
| Shadow isolation | Zero production leakage |
|
||||
| Replay determinism | 100% hash match |
|
||||
|
||||
---
|
||||
|
||||
*Last updated: 2025-11-29*
|
||||
@@ -0,0 +1,444 @@
|
||||
# Runtime Posture and Observation with Zastava
|
||||
|
||||
**Version:** 1.0
|
||||
**Date:** 2025-11-29
|
||||
**Status:** Canonical
|
||||
|
||||
This advisory defines the product rationale, observation model, and implementation strategy for the Zastava module, covering runtime inspection, admission control, drift detection, and posture verification.
|
||||
|
||||
---
|
||||
|
||||
## 1. Executive Summary
|
||||
|
||||
Zastava is the **runtime inspector and enforcer** that provides ground-truth from running environments. Key capabilities:
|
||||
|
||||
- **Runtime Observation** - Inventory containers, track entrypoints, monitor loaded DSOs
|
||||
- **Admission Control** - Kubernetes ValidatingAdmissionWebhook for pre-flight gates
|
||||
- **Drift Detection** - Identify unexpected processes, libraries, and file changes
|
||||
- **Posture Verification** - Validate signatures, SBOM referrers, attestations
|
||||
- **Build-ID Tracking** - Correlate binaries to debug symbols and source
|
||||
|
||||
---
|
||||
|
||||
## 2. Market Drivers
|
||||
|
||||
### 2.1 Target Segments
|
||||
|
||||
| Segment | Runtime Requirements | Use Case |
|
||||
|---------|---------------------|----------|
|
||||
| **Enterprise Security** | Runtime visibility | Post-deploy monitoring |
|
||||
| **Platform Engineering** | Admission gates | Policy enforcement |
|
||||
| **Compliance Teams** | Continuous verification | Runtime attestation |
|
||||
| **DevSecOps** | Drift detection | Configuration management |
|
||||
|
||||
### 2.2 Competitive Positioning
|
||||
|
||||
Most vulnerability scanners focus on build-time analysis. Stella Ops differentiates with:
|
||||
- **Runtime ground-truth** from actual container execution
|
||||
- **DSO tracking** - which libraries are actually loaded
|
||||
- **Entrypoint tracing** - what programs actually run
|
||||
- **Native Kubernetes admission** with policy integration
|
||||
- **Build-ID correlation** for symbol resolution
|
||||
|
||||
---
|
||||
|
||||
## 3. Architecture Overview
|
||||
|
||||
### 3.1 Component Topology
|
||||
|
||||
**Kubernetes Deployment:**
|
||||
```
|
||||
stellaops/zastava-observer # DaemonSet on every node (read-only host mounts)
|
||||
stellaops/zastava-webhook # ValidatingAdmissionWebhook (Deployment, 2+ replicas)
|
||||
```
|
||||
|
||||
**Docker/VM Deployment:**
|
||||
```
|
||||
stellaops/zastava-agent # System service; watch Docker events; observer only
|
||||
```
|
||||
|
||||
### 3.2 Dependencies
|
||||
|
||||
| Dependency | Purpose |
|
||||
|------------|---------|
|
||||
| Authority | OpToks (DPoP/mTLS) for API calls |
|
||||
| Scanner.WebService | Event ingestion, policy decisions |
|
||||
| OCI Registry | Referrer/signature checks |
|
||||
| Container Runtime | containerd/CRI-O/Docker interfaces |
|
||||
| Kubernetes API | Pod watching, admission webhook |
|
||||
|
||||
---
|
||||
|
||||
## 4. Runtime Event Model
|
||||
|
||||
### 4.1 Event Types
|
||||
|
||||
| Kind | Trigger | Payload |
|
||||
|------|---------|---------|
|
||||
| `CONTAINER_START` | Container lifecycle | Image, entrypoint, namespace |
|
||||
| `CONTAINER_STOP` | Container termination | Exit code, duration |
|
||||
| `DRIFT` | Unexpected change | Changed files, new binaries |
|
||||
| `POLICY_VIOLATION` | Rule breach | Reason, severity |
|
||||
| `ATTESTATION_STATUS` | Verification result | Signed, SBOM present |
|
||||
|
||||
### 4.2 Event Envelope
|
||||
|
||||
```json
|
||||
{
|
||||
"eventId": "uuid",
|
||||
"when": "2025-11-29T12:00:00Z",
|
||||
"kind": "CONTAINER_START",
|
||||
"tenant": "acme-corp",
|
||||
"node": "worker-node-01",
|
||||
"runtime": {
|
||||
"engine": "containerd",
|
||||
"version": "1.7.19"
|
||||
},
|
||||
"workload": {
|
||||
"platform": "kubernetes",
|
||||
"namespace": "production",
|
||||
"pod": "api-7c9fbbd8b7-ktd84",
|
||||
"container": "api",
|
||||
"containerId": "containerd://abc123...",
|
||||
"imageRef": "ghcr.io/acme/api@sha256:def456...",
|
||||
"owner": {
|
||||
"kind": "Deployment",
|
||||
"name": "api"
|
||||
}
|
||||
},
|
||||
"process": {
|
||||
"pid": 12345,
|
||||
"entrypoint": ["/entrypoint.sh", "--serve"],
|
||||
"entryTrace": [
|
||||
{"file": "/entrypoint.sh", "line": 3, "op": "exec", "target": "/usr/bin/python3"},
|
||||
{"file": "<argv>", "op": "python", "target": "/opt/app/server.py"}
|
||||
],
|
||||
"buildId": "9f3a1cd4c0b7adfe91c0e3b51d2f45fb0f76a4c1"
|
||||
},
|
||||
"loadedLibs": [
|
||||
{"path": "/lib/x86_64-linux-gnu/libssl.so.3", "inode": 123456, "sha256": "..."},
|
||||
{"path": "/usr/lib/x86_64-linux-gnu/libcrypto.so.3", "inode": 123457, "sha256": "..."}
|
||||
],
|
||||
"posture": {
|
||||
"imageSigned": true,
|
||||
"sbomReferrer": "present",
|
||||
"attestation": {
|
||||
"uuid": "rekor-uuid",
|
||||
"verified": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. Observer Capabilities
|
||||
|
||||
### 5.1 Container Lifecycle Tracking
|
||||
|
||||
- Watch container start/stop via CRI socket
|
||||
- Resolve container to image digest
|
||||
- Map mount points and rootfs paths
|
||||
- Track container metadata (labels, annotations)
|
||||
|
||||
### 5.2 Entrypoint Tracing
|
||||
|
||||
- Attach short-lived nsenter to container PID 1
|
||||
- Parse shell scripts for exec chain
|
||||
- Record terminal program (actual binary)
|
||||
- Bounded depth to prevent infinite loops
|
||||
|
||||
### 5.3 Loaded Library Sampling
|
||||
|
||||
- Read `/proc/<pid>/maps` for loaded DSOs
|
||||
- Compute SHA-256 for each mapped file
|
||||
- Track GNU build-IDs for symbol correlation
|
||||
- Rate limits prevent resource exhaustion
|
||||
|
||||
### 5.4 Posture Verification
|
||||
|
||||
- Image signature presence (cosign policies)
|
||||
- SBOM referrers check (registry HEAD)
|
||||
- Rekor attestation lookup via Scanner.WebService
|
||||
- Policy verdict from backend
|
||||
|
||||
---
|
||||
|
||||
## 6. Admission Control
|
||||
|
||||
### 6.1 Gate Criteria
|
||||
|
||||
| Criterion | Description | Configurable |
|
||||
|-----------|-------------|--------------|
|
||||
| Image Signature | Cosign-verifiable to configured keys | Yes |
|
||||
| SBOM Availability | CycloneDX referrer or catalog entry | Yes |
|
||||
| Policy Verdict | Backend PASS required | Yes |
|
||||
| Registry Allowlist | Permitted registries | Yes |
|
||||
| Tag Bans | Reject `:latest`, etc. | Yes |
|
||||
| Base Image Allowlist | Permitted base digests | Yes |
|
||||
|
||||
### 6.2 Decision Flow
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant K8s as API Server
|
||||
participant WH as Zastava Webhook
|
||||
participant SW as Scanner.WebService
|
||||
|
||||
K8s->>WH: AdmissionReview(Pod)
|
||||
WH->>WH: Resolve images to digests
|
||||
WH->>SW: POST /policy/runtime
|
||||
SW-->>WH: {signed, hasSbom, verdict, reasons}
|
||||
alt All pass
|
||||
WH-->>K8s: Allow
|
||||
else Any fail
|
||||
WH-->>K8s: Deny (with reasons)
|
||||
end
|
||||
```
|
||||
|
||||
### 6.3 Response Caching
|
||||
|
||||
- Per-digest results cached for TTL (default 300s)
|
||||
- Fail-open or fail-closed per namespace
|
||||
- Cache invalidation on policy updates
|
||||
|
||||
---
|
||||
|
||||
## 7. Drift Detection
|
||||
|
||||
### 7.1 Signal Types
|
||||
|
||||
| Signal | Detection Method | Action |
|
||||
|--------|-----------------|--------|
|
||||
| Process Drift | Terminal program differs from EntryTrace baseline | Alert |
|
||||
| Library Drift | Loaded DSOs not in Usage SBOM | Alert, delta scan |
|
||||
| Filesystem Drift | New executables with mtime after image creation | Alert |
|
||||
| Network Drift | Unexpected listening ports | Alert (optional) |
|
||||
|
||||
### 7.2 Drift Event
|
||||
|
||||
```json
|
||||
{
|
||||
"kind": "DRIFT",
|
||||
"delta": {
|
||||
"baselineImageDigest": "sha256:abc...",
|
||||
"changedFiles": ["/opt/app/server.py"],
|
||||
"newBinaries": [
|
||||
{"path": "/usr/local/bin/helper", "sha256": "..."}
|
||||
]
|
||||
},
|
||||
"evidence": [
|
||||
{"signal": "procfs.maps", "value": "/lib/.../libssl.so.3@0x7f..."},
|
||||
{"signal": "cri.task.inspect", "value": "pid=12345"}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 8. Build-ID Workflow
|
||||
|
||||
### 8.1 Capture
|
||||
|
||||
1. Observer extracts `NT_GNU_BUILD_ID` from `/proc/<pid>/exe`
|
||||
2. Normalize to lower-case hex
|
||||
3. Include in runtime event as `process.buildId`
|
||||
|
||||
### 8.2 Correlation
|
||||
|
||||
1. Scanner.WebService persists observation
|
||||
2. Policy responses include `buildIds` list
|
||||
3. Debug files matched via `.build-id/<aa>/<rest>.debug`
|
||||
|
||||
### 8.3 Symbol Resolution
|
||||
|
||||
```bash
|
||||
# Via CLI
|
||||
stella runtime policy test --image sha256:abc123... | jq '.buildIds'
|
||||
|
||||
# Via debuginfod
|
||||
debuginfod-find debuginfo 9f3a1cd4c0b7adfe91c0e3b51d2f45fb0f76a4c1
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 9. Implementation Strategy
|
||||
|
||||
### 9.1 Phase 1: Observer Core (Complete)
|
||||
|
||||
- [x] CRI socket integration
|
||||
- [x] Container lifecycle tracking
|
||||
- [x] Entrypoint tracing
|
||||
- [x] Loaded library sampling
|
||||
- [x] Event batching and compression
|
||||
|
||||
### 9.2 Phase 2: Admission Webhook (Complete)
|
||||
|
||||
- [x] ValidatingAdmissionWebhook
|
||||
- [x] Image digest resolution
|
||||
- [x] Policy integration
|
||||
- [x] Response caching
|
||||
- [x] Fail-open/closed modes
|
||||
|
||||
### 9.3 Phase 3: Drift Detection (In Progress)
|
||||
|
||||
- [x] Process drift detection
|
||||
- [x] Library drift detection
|
||||
- [ ] Filesystem drift monitoring (ZASTAVA-DRIFT-50-001)
|
||||
- [ ] Network posture checks (ZASTAVA-NET-51-001)
|
||||
|
||||
### 9.4 Phase 4: Advanced Features (Planned)
|
||||
|
||||
- [ ] eBPF syscall tracing (optional)
|
||||
- [ ] Windows container support
|
||||
- [ ] Live used-by-entrypoint synthesis
|
||||
- [ ] Admission dry-run dashboards
|
||||
|
||||
---
|
||||
|
||||
## 10. Configuration
|
||||
|
||||
```yaml
|
||||
zastava:
|
||||
mode:
|
||||
observer: true
|
||||
webhook: true
|
||||
|
||||
backend:
|
||||
baseAddress: "https://scanner-web.internal"
|
||||
policyPath: "/api/v1/scanner/policy/runtime"
|
||||
requestTimeoutSeconds: 5
|
||||
|
||||
runtime:
|
||||
authority:
|
||||
issuer: "https://authority.internal"
|
||||
clientId: "zastava-observer"
|
||||
audience: ["scanner", "zastava"]
|
||||
scopes: ["api:scanner.runtime.write"]
|
||||
requireDpop: true
|
||||
requireMutualTls: true
|
||||
|
||||
tenant: "acme-corp"
|
||||
engine: "auto" # containerd|cri-o|docker|auto
|
||||
procfs: "/host/proc"
|
||||
|
||||
collect:
|
||||
entryTrace: true
|
||||
loadedLibs: true
|
||||
maxLibs: 256
|
||||
maxHashBytesPerContainer: 64000000
|
||||
|
||||
admission:
|
||||
enforce: true
|
||||
failOpenNamespaces: ["dev", "test"]
|
||||
verify:
|
||||
imageSignature: true
|
||||
sbomReferrer: true
|
||||
scannerPolicyPass: true
|
||||
cacheTtlSeconds: 300
|
||||
|
||||
limits:
|
||||
eventsPerSecond: 50
|
||||
burst: 200
|
||||
perNodeQueue: 10000
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 11. Security Posture
|
||||
|
||||
### 11.1 Privileges
|
||||
|
||||
| Capability | Purpose | Mode |
|
||||
|------------|---------|------|
|
||||
| `CAP_SYS_PTRACE` | nsenter trace | Optional |
|
||||
| `CAP_DAC_READ_SEARCH` | Read /proc | Required |
|
||||
| Host PID namespace | Container PIDs | Required |
|
||||
| Read-only mounts | /proc, sockets | Required |
|
||||
|
||||
### 11.2 Least Privilege
|
||||
|
||||
- No write mounts
|
||||
- No host networking
|
||||
- No privilege escalation
|
||||
- Read-only rootfs
|
||||
|
||||
### 11.3 Data Minimization
|
||||
|
||||
- No env var exfiltration
|
||||
- No command argument logging (unless diagnostic mode)
|
||||
- Rate limits prevent abuse
|
||||
|
||||
---
|
||||
|
||||
## 12. Observability
|
||||
|
||||
### 12.1 Observer Metrics
|
||||
|
||||
- `zastava.runtime.events.total{kind}`
|
||||
- `zastava.runtime.backend.latency.ms{endpoint}`
|
||||
- `zastava.proc_maps.samples.total{result}`
|
||||
- `zastava.entrytrace.depth{p99}`
|
||||
- `zastava.hash.bytes.total`
|
||||
- `zastava.buffer.drops.total`
|
||||
|
||||
### 12.2 Webhook Metrics
|
||||
|
||||
- `zastava.admission.decisions.total{decision}`
|
||||
- `zastava.admission.cache.hits.total`
|
||||
- `zastava.backend.failures.total`
|
||||
|
||||
---
|
||||
|
||||
## 13. Performance Targets
|
||||
|
||||
| Operation | Target |
|
||||
|-----------|--------|
|
||||
| `/proc/<pid>/maps` sampling | < 30ms (64 files) |
|
||||
| Full library hash set | < 200ms (256 libs) |
|
||||
| Admission with warm cache | < 8ms p95 |
|
||||
| Admission with backend call | < 50ms p95 |
|
||||
| Event throughput | 5k events/min/node |
|
||||
|
||||
---
|
||||
|
||||
## 14. Related Documentation
|
||||
|
||||
| Resource | Location |
|
||||
|----------|----------|
|
||||
| Zastava architecture | `docs/modules/zastava/architecture.md` |
|
||||
| Runtime event schema | `docs/modules/zastava/event-schema.md` |
|
||||
| Admission configuration | `docs/modules/zastava/admission-config.md` |
|
||||
| Deployment guide | `docs/modules/zastava/deployment.md` |
|
||||
|
||||
---
|
||||
|
||||
## 15. Sprint Mapping
|
||||
|
||||
- **Primary Sprint:** SPRINT_0144_0001_0001_zastava_runtime_signals.md
|
||||
- **Related Sprints:**
|
||||
- SPRINT_0140_0001_0001_runtime_signals.md
|
||||
- SPRINT_0143_0000_0001_signals.md
|
||||
|
||||
**Key Task IDs:**
|
||||
- `ZASTAVA-OBS-40-001` - Observer core (DONE)
|
||||
- `ZASTAVA-ADM-41-001` - Admission webhook (DONE)
|
||||
- `ZASTAVA-DRIFT-50-001` - Filesystem drift (IN PROGRESS)
|
||||
- `ZASTAVA-NET-51-001` - Network posture (TODO)
|
||||
- `ZASTAVA-EBPF-60-001` - eBPF integration (FUTURE)
|
||||
|
||||
---
|
||||
|
||||
## 16. Success Metrics
|
||||
|
||||
| Metric | Target |
|
||||
|--------|--------|
|
||||
| Event capture rate | 99.9% of container starts |
|
||||
| Admission latency | < 50ms p95 |
|
||||
| Drift detection rate | 100% of runtime changes |
|
||||
| False positive rate | < 1% of drift alerts |
|
||||
| Node resource usage | < 2% CPU, < 100MB RAM |
|
||||
|
||||
---
|
||||
|
||||
*Last updated: 2025-11-29*
|
||||
@@ -0,0 +1,373 @@
|
||||
# Telemetry and Observability Patterns
|
||||
|
||||
**Version:** 1.0
|
||||
**Date:** 2025-11-29
|
||||
**Status:** Canonical
|
||||
|
||||
This advisory defines the product rationale, collector topology, and implementation strategy for the Telemetry module, covering metrics, traces, logs, forensic pipelines, and offline packaging.
|
||||
|
||||
---
|
||||
|
||||
## 1. Executive Summary
|
||||
|
||||
The Telemetry module provides **unified observability infrastructure** across all Stella Ops components. Key capabilities:
|
||||
|
||||
- **OpenTelemetry Native** - OTLP collection for metrics, traces, logs
|
||||
- **Forensic Mode** - Extended retention and 100% sampling during incidents
|
||||
- **Profile-Based Configuration** - Default, forensic, and air-gap profiles
|
||||
- **Sealed-Mode Guards** - Automatic exporter restrictions in air-gap
|
||||
- **Offline Bundles** - Signed OTLP archives for compliance
|
||||
|
||||
---
|
||||
|
||||
## 2. Market Drivers
|
||||
|
||||
### 2.1 Target Segments
|
||||
|
||||
| Segment | Observability Requirements | Use Case |
|
||||
|---------|---------------------------|----------|
|
||||
| **Platform Ops** | Real-time monitoring | Operational health |
|
||||
| **Security Teams** | Forensic investigation | Incident response |
|
||||
| **Compliance** | Audit trails | SOC 2, FedRAMP |
|
||||
| **DevSecOps** | Pipeline visibility | CI/CD debugging |
|
||||
|
||||
### 2.2 Competitive Positioning
|
||||
|
||||
Most vulnerability tools provide minimal observability. Stella Ops differentiates with:
|
||||
- **Built-in OpenTelemetry** across all services
|
||||
- **Forensic mode** with automatic retention extension
|
||||
- **Sealed-mode compatibility** for air-gap
|
||||
- **Signed OTLP bundles** for compliance archives
|
||||
- **Incident-triggered sampling** escalation
|
||||
|
||||
---
|
||||
|
||||
## 3. Collector Topology
|
||||
|
||||
### 3.1 Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ Services │
|
||||
│ Scanner │ Policy │ Authority │ Orchestrator │ ... │
|
||||
└─────────────────────┬───────────────────────────────┘
|
||||
│ OTLP/gRPC
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ OpenTelemetry Collector │
|
||||
│ ┌─────────┐ ┌──────────┐ ┌─────────────────────┐ │
|
||||
│ │ Traces │ │ Metrics │ │ Logs │ │
|
||||
│ └────┬────┘ └────┬─────┘ └──────────┬──────────┘ │
|
||||
│ │ Tail │ Batch │ Redaction │
|
||||
│ │ Sampling │ │ │
|
||||
└───────┼────────────┼─────────────────┼─────────────┘
|
||||
│ │ │
|
||||
▼ ▼ ▼
|
||||
┌────────┐ ┌──────────┐ ┌────────┐
|
||||
│ Tempo │ │Prometheus│ │ Loki │
|
||||
└────────┘ └──────────┘ └────────┘
|
||||
```
|
||||
|
||||
### 3.2 Collector Profiles
|
||||
|
||||
| Profile | Use Case | Configuration |
|
||||
|---------|----------|---------------|
|
||||
| **default** | Normal operation | 10% trace sampling, 30-day retention |
|
||||
| **forensic** | Investigation mode | 100% sampling, 180-day retention |
|
||||
| **airgap** | Offline deployment | File exporters, no external network |
|
||||
|
||||
---
|
||||
|
||||
## 4. Metrics
|
||||
|
||||
### 4.1 Standard Metrics
|
||||
|
||||
| Metric | Type | Labels | Description |
|
||||
|--------|------|--------|-------------|
|
||||
| `stellaops_request_duration_seconds` | Histogram | service, endpoint | Request latency |
|
||||
| `stellaops_request_total` | Counter | service, status | Request count |
|
||||
| `stellaops_active_jobs` | Gauge | tenant, jobType | Active job count |
|
||||
| `stellaops_queue_depth` | Gauge | queue | Queue depth |
|
||||
| `stellaops_scan_duration_seconds` | Histogram | tenant | Scan duration |
|
||||
|
||||
### 4.2 Module-Specific Metrics
|
||||
|
||||
**Policy Engine:**
|
||||
- `policy_run_seconds{mode,tenant,policy}`
|
||||
- `policy_rules_fired_total{policy,rule}`
|
||||
- `policy_vex_overrides_total{policy,vendor}`
|
||||
|
||||
**Scanner:**
|
||||
- `scanner_sbom_components_total{ecosystem}`
|
||||
- `scanner_vulnerabilities_found_total{severity}`
|
||||
- `scanner_attestations_logged_total`
|
||||
|
||||
**Authority:**
|
||||
- `authority_token_issued_total{grant_type,audience}`
|
||||
- `authority_token_rejected_total{reason}`
|
||||
- `authority_dpop_nonce_miss_total`
|
||||
|
||||
---
|
||||
|
||||
## 5. Traces
|
||||
|
||||
### 5.1 Trace Context
|
||||
|
||||
All services propagate W3C Trace Context:
|
||||
- `traceparent` header
|
||||
- `tracestate` for vendor-specific data
|
||||
- `baggage` for cross-service attributes
|
||||
|
||||
### 5.2 Span Conventions
|
||||
|
||||
| Span | Attributes | Description |
|
||||
|------|------------|-------------|
|
||||
| `http.request` | url, method, status | HTTP handler |
|
||||
| `db.query` | collection, operation | MongoDB ops |
|
||||
| `policy.evaluate` | policyId, version | Policy run |
|
||||
| `scan.image` | imageRef, digest | Image scan |
|
||||
| `sign.dsse` | predicateType | DSSE signing |
|
||||
|
||||
### 5.3 Sampling Strategy
|
||||
|
||||
**Default (Tail Sampling):**
|
||||
- Error traces: 100%
|
||||
- Slow traces (>2s): 100%
|
||||
- Normal traces: 10%
|
||||
|
||||
**Forensic Mode:**
|
||||
- All traces: 100%
|
||||
- Extended attributes enabled
|
||||
|
||||
---
|
||||
|
||||
## 6. Logs
|
||||
|
||||
### 6.1 Structured Format
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2025-11-29T12:00:00.123Z",
|
||||
"level": "info",
|
||||
"message": "Scan completed",
|
||||
"service": "scanner",
|
||||
"traceId": "abc123...",
|
||||
"spanId": "def456...",
|
||||
"tenant": "acme-corp",
|
||||
"imageDigest": "sha256:...",
|
||||
"componentCount": 245,
|
||||
"vulnerabilityCount": 12
|
||||
}
|
||||
```
|
||||
|
||||
### 6.2 Redaction
|
||||
|
||||
Attribute processors strip sensitive data:
|
||||
- `authorization` headers
|
||||
- `secretRef` values
|
||||
- PII based on allowed-key policy
|
||||
|
||||
### 6.3 Log Levels
|
||||
|
||||
| Level | Purpose | Retention |
|
||||
|-------|---------|-----------|
|
||||
| `error` | Failures | 180 days |
|
||||
| `warn` | Anomalies | 90 days |
|
||||
| `info` | Operations | 30 days |
|
||||
| `debug` | Development | 7 days |
|
||||
|
||||
---
|
||||
|
||||
## 7. Forensic Mode
|
||||
|
||||
### 7.1 Activation
|
||||
|
||||
```bash
|
||||
# Activate forensic mode for tenant
|
||||
stella telemetry incident start --tenant acme-corp --reason "CVE-2025-12345 investigation"
|
||||
|
||||
# Check status
|
||||
stella telemetry incident status
|
||||
|
||||
# Deactivate
|
||||
stella telemetry incident stop --tenant acme-corp
|
||||
```
|
||||
|
||||
### 7.2 Behavior Changes
|
||||
|
||||
| Aspect | Default | Forensic |
|
||||
|--------|---------|----------|
|
||||
| Trace sampling | 10% | 100% |
|
||||
| Log level | info | debug |
|
||||
| Retention | 30 days | 180 days |
|
||||
| Attributes | Standard | Extended |
|
||||
| Export frequency | 1 minute | 10 seconds |
|
||||
|
||||
### 7.3 Automatic Triggers
|
||||
|
||||
- Orchestrator incident escalation
|
||||
- Policy violation threshold exceeded
|
||||
- Circuit breaker activation
|
||||
- Manual operator trigger
|
||||
|
||||
---
|
||||
|
||||
## 8. Implementation Strategy
|
||||
|
||||
### 8.1 Phase 1: Core Telemetry (Complete)
|
||||
|
||||
- [x] OpenTelemetry SDK integration
|
||||
- [x] Metrics exporter (Prometheus)
|
||||
- [x] Trace exporter (Tempo/Jaeger)
|
||||
- [x] Log exporter (Loki)
|
||||
|
||||
### 8.2 Phase 2: Advanced Features (Complete)
|
||||
|
||||
- [x] Tail sampling configuration
|
||||
- [x] Attribute redaction
|
||||
- [x] Profile-based configuration
|
||||
- [x] Dashboard provisioning
|
||||
|
||||
### 8.3 Phase 3: Forensic & Offline (In Progress)
|
||||
|
||||
- [x] Forensic mode toggle
|
||||
- [ ] Forensic bundle export (TELEM-FOR-50-001)
|
||||
- [ ] Sealed-mode guards (TELEM-SEAL-51-001)
|
||||
- [ ] Offline bundle signing (TELEM-SIGN-52-001)
|
||||
|
||||
---
|
||||
|
||||
## 9. API Surface
|
||||
|
||||
### 9.1 Configuration
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/telemetry/config/profile/{name}` | GET | `telemetry:read` | Download collector config |
|
||||
| `/telemetry/config/profiles` | GET | `telemetry:read` | List profiles |
|
||||
|
||||
### 9.2 Incident Mode
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/telemetry/incidents/mode` | POST | `telemetry:admin` | Toggle forensic mode |
|
||||
| `/telemetry/incidents/status` | GET | `telemetry:read` | Current mode status |
|
||||
|
||||
### 9.3 Exports
|
||||
|
||||
| Endpoint | Method | Scope | Description |
|
||||
|----------|--------|-------|-------------|
|
||||
| `/telemetry/exports/forensic/{window}` | GET | `telemetry:export` | Stream OTLP bundle |
|
||||
|
||||
---
|
||||
|
||||
## 10. Offline Support
|
||||
|
||||
### 10.1 Bundle Structure
|
||||
|
||||
```
|
||||
telemetry-bundle/
|
||||
├── otlp/
|
||||
│ ├── metrics.pb
|
||||
│ ├── traces.pb
|
||||
│ └── logs.pb
|
||||
├── config/
|
||||
│ ├── collector.yaml
|
||||
│ └── dashboards/
|
||||
├── manifest.json
|
||||
└── signatures/
|
||||
└── manifest.sig
|
||||
```
|
||||
|
||||
### 10.2 Sealed-Mode Guards
|
||||
|
||||
```csharp
|
||||
// StellaOps.Telemetry.Core enforces IEgressPolicy
|
||||
if (sealedMode.IsActive)
|
||||
{
|
||||
// Disable non-loopback exporters
|
||||
// Emit structured warning with remediation
|
||||
// Fall back to file-based export
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 11. Dashboards & Alerts
|
||||
|
||||
### 11.1 Standard Dashboards
|
||||
|
||||
| Dashboard | Purpose | Panels |
|
||||
|-----------|---------|--------|
|
||||
| Platform Health | Overall status | Request rate, error rate, latency |
|
||||
| Scan Operations | Scanner metrics | Scan rate, duration, findings |
|
||||
| Policy Engine | Policy metrics | Evaluation rate, rule hits, verdicts |
|
||||
| Job Orchestration | Queue metrics | Queue depth, job latency, failures |
|
||||
|
||||
### 11.2 Alert Rules
|
||||
|
||||
| Alert | Condition | Severity |
|
||||
|-------|-----------|----------|
|
||||
| High Error Rate | error_rate > 5% | critical |
|
||||
| Slow Scans | p95 > 5m | warning |
|
||||
| Queue Backlog | depth > 1000 | warning |
|
||||
| Circuit Open | breaker_open = 1 | critical |
|
||||
|
||||
---
|
||||
|
||||
## 12. Security Considerations
|
||||
|
||||
### 12.1 Data Protection
|
||||
|
||||
- Sensitive attributes redacted at collection
|
||||
- Encrypted in transit (TLS)
|
||||
- Encrypted at rest (storage layer)
|
||||
- Retention policies enforced
|
||||
|
||||
### 12.2 Access Control
|
||||
|
||||
- Authority scopes for API access
|
||||
- Tenant isolation in queries
|
||||
- Audit logging for forensic access
|
||||
|
||||
---
|
||||
|
||||
## 13. Related Documentation
|
||||
|
||||
| Resource | Location |
|
||||
|----------|----------|
|
||||
| Telemetry architecture | `docs/modules/telemetry/architecture.md` |
|
||||
| Collector configuration | `docs/modules/telemetry/collector-config.md` |
|
||||
| Dashboard provisioning | `docs/modules/telemetry/dashboards.md` |
|
||||
|
||||
---
|
||||
|
||||
## 14. Sprint Mapping
|
||||
|
||||
- **Primary Sprint:** SPRINT_0180_0001_0001_telemetry_core.md (NEW)
|
||||
- **Related Sprints:**
|
||||
- SPRINT_0181_0001_0002_telemetry_forensic.md
|
||||
- SPRINT_0182_0001_0003_telemetry_offline.md
|
||||
|
||||
**Key Task IDs:**
|
||||
- `TELEM-CORE-40-001` - SDK integration (DONE)
|
||||
- `TELEM-DASH-41-001` - Dashboard provisioning (DONE)
|
||||
- `TELEM-FOR-50-001` - Forensic bundles (IN PROGRESS)
|
||||
- `TELEM-SEAL-51-001` - Sealed-mode guards (TODO)
|
||||
- `TELEM-SIGN-52-001` - Bundle signing (TODO)
|
||||
|
||||
---
|
||||
|
||||
## 15. Success Metrics
|
||||
|
||||
| Metric | Target |
|
||||
|--------|--------|
|
||||
| Collection overhead | < 2% CPU |
|
||||
| Trace sampling accuracy | 100% for errors |
|
||||
| Log ingestion latency | < 5 seconds |
|
||||
| Forensic activation time | < 30 seconds |
|
||||
| Bundle export time | < 5 minutes (24h data) |
|
||||
|
||||
---
|
||||
|
||||
*Last updated: 2025-11-29*
|
||||
@@ -157,6 +157,107 @@ These are the authoritative advisories to reference for implementation:
|
||||
- `docs/security/dpop-mtls-rollout.md` - Sender constraints
|
||||
- **Status:** Fills HIGH-priority gap - consolidates token model, scopes, multi-tenant isolation
|
||||
|
||||
### CLI Developer Experience & Command UX
|
||||
- **Canonical:** `29-Nov-2025 - CLI Developer Experience and Command UX.md`
|
||||
- **Sprint:** SPRINT_0201_0001_0001_cli_i.md (PRIMARY)
|
||||
- **Related Sprints:**
|
||||
- SPRINT_203_cli_iii.md
|
||||
- SPRINT_205_cli_v.md
|
||||
- **Related Docs:**
|
||||
- `docs/modules/cli/architecture.md` - Module architecture
|
||||
- `docs/09_API_CLI_REFERENCE.md` - Command reference
|
||||
- **Status:** Fills HIGH-priority gap - covers command surface, auth model, Buildx integration
|
||||
|
||||
### Orchestrator Event Model & Job Lifecycle
|
||||
- **Canonical:** `29-Nov-2025 - Orchestrator Event Model and Job Lifecycle.md`
|
||||
- **Sprint:** SPRINT_0151_0001_0001_orchestrator_i.md (PRIMARY)
|
||||
- **Related Sprints:**
|
||||
- SPRINT_152_orchestrator_ii.md
|
||||
- SPRINT_0152_0001_0002_orchestrator_ii.md
|
||||
- **Related Docs:**
|
||||
- `docs/modules/orchestrator/architecture.md` - Module architecture
|
||||
- **Status:** Fills HIGH-priority gap - covers job lifecycle, quota governance, replay semantics
|
||||
|
||||
### Export Center & Reporting Strategy
|
||||
- **Canonical:** `29-Nov-2025 - Export Center and Reporting Strategy.md`
|
||||
- **Sprint:** SPRINT_0160_0001_0001_export_evidence.md (PRIMARY)
|
||||
- **Related Sprints:**
|
||||
- SPRINT_0161_0001_0001_evidencelocker.md
|
||||
- **Related Docs:**
|
||||
- `docs/modules/export-center/architecture.md` - Module architecture
|
||||
- **Status:** Fills MEDIUM-priority gap - covers profile system, adapters, distribution channels
|
||||
|
||||
### Runtime Posture & Observation (Zastava)
|
||||
- **Canonical:** `29-Nov-2025 - Runtime Posture and Observation with Zastava.md`
|
||||
- **Sprint:** SPRINT_0144_0001_0001_zastava_runtime_signals.md (PRIMARY)
|
||||
- **Related Sprints:**
|
||||
- SPRINT_0140_0001_0001_runtime_signals.md
|
||||
- SPRINT_0143_0000_0001_signals.md
|
||||
- **Related Docs:**
|
||||
- `docs/modules/zastava/architecture.md` - Module architecture
|
||||
- **Status:** Fills MEDIUM-priority gap - covers runtime events, admission control, drift detection
|
||||
|
||||
### Notification Rules & Alerting Engine
|
||||
- **Canonical:** `29-Nov-2025 - Notification Rules and Alerting Engine.md`
|
||||
- **Sprint:** SPRINT_0170_0001_0001_notify_engine.md (NEW)
|
||||
- **Related Sprints:**
|
||||
- SPRINT_0171_0001_0002_notify_connectors.md
|
||||
- SPRINT_0172_0001_0003_notify_ack_tokens.md
|
||||
- **Related Docs:**
|
||||
- `docs/modules/notify/architecture.md` - Module architecture
|
||||
- **Status:** Fills MEDIUM-priority gap - covers rules engine, channels, noise control, ack tokens
|
||||
|
||||
### Graph Analytics & Dependency Insights
|
||||
- **Canonical:** `29-Nov-2025 - Graph Analytics and Dependency Insights.md`
|
||||
- **Sprint:** SPRINT_0141_0001_0001_graph_indexer.md (PRIMARY)
|
||||
- **Related Sprints:**
|
||||
- SPRINT_0401_0001_0001_reachability_evidence_chain.md
|
||||
- SPRINT_0140_0001_0001_runtime_signals.md
|
||||
- **Related Docs:**
|
||||
- `docs/modules/graph/architecture.md` - Module architecture
|
||||
- **Status:** Fills MEDIUM-priority gap - covers graph model, overlays, analytics, visualization
|
||||
|
||||
### Telemetry & Observability Patterns
|
||||
- **Canonical:** `29-Nov-2025 - Telemetry and Observability Patterns.md`
|
||||
- **Sprint:** SPRINT_0180_0001_0001_telemetry_core.md (NEW)
|
||||
- **Related Sprints:**
|
||||
- SPRINT_0181_0001_0002_telemetry_forensic.md
|
||||
- SPRINT_0182_0001_0003_telemetry_offline.md
|
||||
- **Related Docs:**
|
||||
- `docs/modules/telemetry/architecture.md` - Module architecture
|
||||
- **Status:** Fills MEDIUM-priority gap - covers collector topology, forensic mode, offline bundles
|
||||
|
||||
### Policy Simulation & Shadow Gates
|
||||
- **Canonical:** `29-Nov-2025 - Policy Simulation and Shadow Gates.md`
|
||||
- **Sprint:** SPRINT_0185_0001_0001_policy_simulation.md (NEW)
|
||||
- **Related Sprints:**
|
||||
- SPRINT_0120_0000_0001_policy_reasoning.md
|
||||
- SPRINT_0121_0001_0001_policy_reasoning.md
|
||||
- **Related Docs:**
|
||||
- `docs/modules/policy/architecture.md` - Module architecture
|
||||
- **Status:** Fills MEDIUM-priority gap - covers shadow runs, coverage fixtures, promotion gates
|
||||
|
||||
### Findings Ledger & Immutable Audit Trail
|
||||
- **Canonical:** `29-Nov-2025 - Findings Ledger and Immutable Audit Trail.md`
|
||||
- **Sprint:** SPRINT_0186_0001_0001_record_deterministic_execution.md (PRIMARY)
|
||||
- **Related Sprints:**
|
||||
- SPRINT_0120_0000_0001_policy_reasoning.md
|
||||
- SPRINT_311_docs_tasks_md_xi.md
|
||||
- **Related Docs:**
|
||||
- `docs/modules/findings-ledger/openapi/findings-ledger.v1.yaml` - OpenAPI spec
|
||||
- **Status:** Fills MEDIUM-priority gap - covers append-only events, Merkle anchoring, projections
|
||||
|
||||
### Concelier Advisory Ingestion Model
|
||||
- **Canonical:** `29-Nov-2025 - Concelier Advisory Ingestion Model.md`
|
||||
- **Sprint:** SPRINT_0115_0001_0004_concelier_iv.md (PRIMARY)
|
||||
- **Related Sprints:**
|
||||
- SPRINT_0113_0001_0002_concelier_ii.md
|
||||
- SPRINT_0114_0001_0003_concelier_iii.md
|
||||
- **Related Docs:**
|
||||
- `docs/modules/concelier/architecture.md` - Module architecture
|
||||
- `docs/modules/concelier/link-not-merge-schema.md` - LNM schema
|
||||
- **Status:** Fills MEDIUM-priority gap - covers AOC, Link-Not-Merge, connectors, deterministic exports
|
||||
|
||||
## Files Archived
|
||||
|
||||
The following files have been moved to `archived/27-Nov-2025-superseded/`:
|
||||
@@ -198,6 +299,16 @@ The following issues were fixed:
|
||||
| Mirror & Offline Kit | SPRINT_0125_0001_0001 | EXISTING |
|
||||
| Task Pack Orchestration | SPRINT_0157_0001_0001 | EXISTING |
|
||||
| Auth/AuthZ Architecture | Multiple (100, 314, 0514) | EXISTING |
|
||||
| CLI Developer Experience | SPRINT_0201_0001_0001 | NEW |
|
||||
| Orchestrator Event Model | SPRINT_0151_0001_0001 | NEW |
|
||||
| Export Center Strategy | SPRINT_0160_0001_0001 | NEW |
|
||||
| Zastava Runtime Posture | SPRINT_0144_0001_0001 | NEW |
|
||||
| Notification Rules Engine | SPRINT_0170_0001_0001 | NEW |
|
||||
| Graph Analytics | SPRINT_0141_0001_0001 | NEW |
|
||||
| Telemetry & Observability | SPRINT_0180_0001_0001 | NEW |
|
||||
| Policy Simulation | SPRINT_0185_0001_0001 | NEW |
|
||||
| Findings Ledger | SPRINT_0186_0001_0001 | NEW |
|
||||
| Concelier Ingestion | SPRINT_0115_0001_0004 | NEW |
|
||||
|
||||
## Implementation Priority
|
||||
|
||||
@@ -210,11 +321,21 @@ Based on gap analysis:
|
||||
5. **P1 - Sovereign Crypto** (Sprint 0514) - Regional compliance enablement
|
||||
6. **P1 - Evidence Bundle & Replay** (Sprint 0161, 0187) - Audit/compliance critical
|
||||
7. **P1 - Mirror & Offline Kit** (Sprint 0125, 0150) - Air-gap deployment critical
|
||||
8. **P2 - Task Pack Orchestration** (Sprint 0157, 0158) - Automation foundation
|
||||
9. **P2 - Explainability** (Sprint 0401) - UX enhancement, existing tasks
|
||||
10. **P2 - Plugin Architecture** (Multiple) - Foundational extensibility patterns
|
||||
11. **P2 - Auth/AuthZ Architecture** (Multiple) - Security consolidation
|
||||
12. **P3 - Already Implemented** - Unknowns, Graph IDs, DSSE batching
|
||||
8. **P1 - CLI Developer Experience** (Sprint 0201) - Developer UX critical
|
||||
9. **P1 - Orchestrator Event Model** (Sprint 0151) - Job lifecycle foundation
|
||||
10. **P2 - Task Pack Orchestration** (Sprint 0157, 0158) - Automation foundation
|
||||
11. **P2 - Explainability** (Sprint 0401) - UX enhancement, existing tasks
|
||||
12. **P2 - Plugin Architecture** (Multiple) - Foundational extensibility patterns
|
||||
13. **P2 - Auth/AuthZ Architecture** (Multiple) - Security consolidation
|
||||
14. **P2 - Export Center** (Sprint 0160) - Reporting flexibility
|
||||
15. **P2 - Zastava Runtime** (Sprint 0144) - Runtime observability
|
||||
16. **P2 - Notification Rules** (Sprint 0170) - Alert management
|
||||
17. **P2 - Graph Analytics** (Sprint 0141) - Dependency insights
|
||||
18. **P2 - Telemetry** (Sprint 0180) - Observability infrastructure
|
||||
19. **P2 - Policy Simulation** (Sprint 0185) - Safe policy testing
|
||||
20. **P2 - Findings Ledger** (Sprint 0186) - Audit immutability
|
||||
21. **P2 - Concelier Ingestion** (Sprint 0115) - Advisory pipeline
|
||||
22. **P3 - Already Implemented** - Unknowns, Graph IDs, DSSE batching
|
||||
|
||||
## Implementer Quick Reference
|
||||
|
||||
@@ -241,6 +362,15 @@ For each topic, the implementer should read:
|
||||
| Evidence Locker | `docs/modules/evidence-locker/*.md` | `src/EvidenceLocker/*/AGENTS.md` |
|
||||
| Mirror | `docs/modules/mirror/*.md` | `src/Mirror/*/AGENTS.md` |
|
||||
| TaskRunner | `docs/modules/taskrunner/*.md` | `src/TaskRunner/*/AGENTS.md` |
|
||||
| CLI | `docs/modules/cli/architecture.md` | `src/Cli/*/AGENTS.md` |
|
||||
| Orchestrator | `docs/modules/orchestrator/architecture.md` | `src/Orchestrator/*/AGENTS.md` |
|
||||
| Export Center | `docs/modules/export-center/architecture.md` | `src/ExportCenter/*/AGENTS.md` |
|
||||
| Zastava | `docs/modules/zastava/architecture.md` | `src/Zastava/*/AGENTS.md` |
|
||||
| Notify | `docs/modules/notify/architecture.md` | `src/Notify/*/AGENTS.md` |
|
||||
| Graph | `docs/modules/graph/architecture.md` | `src/Graph/*/AGENTS.md` |
|
||||
| Telemetry | `docs/modules/telemetry/architecture.md` | `src/Telemetry/*/AGENTS.md` |
|
||||
| Findings Ledger | `docs/modules/findings-ledger/openapi/` | `src/Findings/*/AGENTS.md` |
|
||||
| Concelier | `docs/modules/concelier/architecture.md` | `src/Concelier/*/AGENTS.md` |
|
||||
|
||||
## Topical Gaps (Advisory Needed)
|
||||
|
||||
@@ -254,12 +384,17 @@ The following topics are mentioned in CLAUDE.md or module docs but lack dedicate
|
||||
| ~~Mirror/Offline Kit Strategy~~ | HIGH | **FILLED** | `29-Nov-2025 - Mirror and Offline Kit Strategy.md` |
|
||||
| ~~Task Pack Orchestration~~ | HIGH | **FILLED** | `29-Nov-2025 - Task Pack Orchestration and Automation.md` |
|
||||
| ~~Auth/AuthZ Architecture~~ | HIGH | **FILLED** | `29-Nov-2025 - Authentication and Authorization Architecture.md` |
|
||||
| ~~CLI Developer Experience~~ | HIGH | **FILLED** | `29-Nov-2025 - CLI Developer Experience and Command UX.md` |
|
||||
| ~~Orchestrator Event Model~~ | HIGH | **FILLED** | `29-Nov-2025 - Orchestrator Event Model and Job Lifecycle.md` |
|
||||
| ~~Export Center Strategy~~ | MEDIUM | **FILLED** | `29-Nov-2025 - Export Center and Reporting Strategy.md` |
|
||||
| ~~Runtime Posture & Observation~~ | MEDIUM | **FILLED** | `29-Nov-2025 - Runtime Posture and Observation with Zastava.md` |
|
||||
| ~~Notification Rules Engine~~ | MEDIUM | **FILLED** | `29-Nov-2025 - Notification Rules and Alerting Engine.md` |
|
||||
| ~~Graph Analytics & Clustering~~ | MEDIUM | **FILLED** | `29-Nov-2025 - Graph Analytics and Dependency Insights.md` |
|
||||
| ~~Telemetry & Observability~~ | MEDIUM | **FILLED** | `29-Nov-2025 - Telemetry and Observability Patterns.md` |
|
||||
| ~~Policy Simulation & Shadow Gates~~ | MEDIUM | **FILLED** | `29-Nov-2025 - Policy Simulation and Shadow Gates.md` |
|
||||
| ~~Findings Ledger & Audit Trail~~ | MEDIUM | **FILLED** | `29-Nov-2025 - Findings Ledger and Immutable Audit Trail.md` |
|
||||
| ~~Concelier Advisory Ingestion~~ | MEDIUM | **FILLED** | `29-Nov-2025 - Concelier Advisory Ingestion Model.md` |
|
||||
| **CycloneDX 1.6 .NET Integration** | LOW | Open | Deep Architecture covers generically; expand with .NET-specific guidance |
|
||||
| **Findings Ledger & Audit Trail** | MEDIUM | Open | Immutable verdict tracking; module exists but no advisory |
|
||||
| **Runtime Posture & Observation** | MEDIUM | Open | Zastava runtime signals; sprints exist but no advisory |
|
||||
| **Graph Analytics & Clustering** | MEDIUM | Open | Community detection, blast-radius; implementation underway |
|
||||
| **Policy Simulation & Shadow Gates** | MEDIUM | Open | Impact modeling; extensive sprints but no contract advisory |
|
||||
| **Notification Rules Engine** | MEDIUM | Open | Throttling, digests, templating; sprints active |
|
||||
|
||||
## Known Issues (Non-Blocking)
|
||||
|
||||
@@ -274,4 +409,4 @@ Several filenames use en-dash (U+2011) instead of regular hyphen (-). This may c
|
||||
|
||||
---
|
||||
*Index created: 2025-11-27*
|
||||
*Last updated: 2025-11-29*
|
||||
*Last updated: 2025-11-29 (added 10 new advisories filling all identified gaps)*
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Authority.Storage.Postgres.Models;
|
||||
using StellaOps.Infrastructure.Postgres;
|
||||
using StellaOps.Infrastructure.Postgres.Repositories;
|
||||
|
||||
namespace StellaOps.Authority.Storage.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL repository for API key operations.
|
||||
/// </summary>
|
||||
public sealed class ApiKeyRepository : RepositoryBase<AuthorityDataSource>, IApiKeyRepository
|
||||
{
|
||||
public ApiKeyRepository(AuthorityDataSource dataSource) : base(dataSource) { }
|
||||
public ApiKeyRepository(AuthorityDataSource dataSource, ILogger<ApiKeyRepository> logger)
|
||||
: base(dataSource, logger) { }
|
||||
|
||||
public async Task<ApiKeyEntity?> GetByIdAsync(string tenantId, Guid id, CancellationToken cancellationToken = default)
|
||||
{
|
||||
@@ -14,9 +20,9 @@ public sealed class ApiKeyRepository : RepositoryBase<AuthorityDataSource>, IApi
|
||||
FROM authority.api_keys
|
||||
WHERE tenant_id = @tenant_id AND id = @id
|
||||
""";
|
||||
return await QuerySingleOrDefaultAsync(tenantId, sql, MapApiKey,
|
||||
cmd => { cmd.Parameters.AddWithValue("id", id); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
return await QuerySingleOrDefaultAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "id", id); },
|
||||
MapApiKey, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<ApiKeyEntity?> GetByPrefixAsync(string keyPrefix, CancellationToken cancellationToken = default)
|
||||
@@ -27,9 +33,8 @@ public sealed class ApiKeyRepository : RepositoryBase<AuthorityDataSource>, IApi
|
||||
WHERE key_prefix = @key_prefix AND status = 'active'
|
||||
""";
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = connection.CreateCommand();
|
||||
command.CommandText = sql;
|
||||
command.Parameters.AddWithValue("key_prefix", keyPrefix);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "key_prefix", keyPrefix);
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
return await reader.ReadAsync(cancellationToken).ConfigureAwait(false) ? MapApiKey(reader) : null;
|
||||
}
|
||||
@@ -42,7 +47,9 @@ public sealed class ApiKeyRepository : RepositoryBase<AuthorityDataSource>, IApi
|
||||
WHERE tenant_id = @tenant_id
|
||||
ORDER BY created_at DESC
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql, MapApiKey, cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
return await QueryAsync(tenantId, sql,
|
||||
cmd => AddParameter(cmd, "tenant_id", tenantId),
|
||||
MapApiKey, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<ApiKeyEntity>> GetByUserIdAsync(string tenantId, Guid userId, CancellationToken cancellationToken = default)
|
||||
@@ -53,9 +60,9 @@ public sealed class ApiKeyRepository : RepositoryBase<AuthorityDataSource>, IApi
|
||||
WHERE tenant_id = @tenant_id AND user_id = @user_id
|
||||
ORDER BY created_at DESC
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql, MapApiKey,
|
||||
cmd => { cmd.Parameters.AddWithValue("user_id", userId); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
return await QueryAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "user_id", userId); },
|
||||
MapApiKey, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<Guid> CreateAsync(string tenantId, ApiKeyEntity apiKey, CancellationToken cancellationToken = default)
|
||||
@@ -66,25 +73,28 @@ public sealed class ApiKeyRepository : RepositoryBase<AuthorityDataSource>, IApi
|
||||
RETURNING id
|
||||
""";
|
||||
var id = apiKey.Id == Guid.Empty ? Guid.NewGuid() : apiKey.Id;
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("id", id);
|
||||
AddNullableParameter(cmd, "user_id", apiKey.UserId);
|
||||
cmd.Parameters.AddWithValue("name", apiKey.Name);
|
||||
cmd.Parameters.AddWithValue("key_hash", apiKey.KeyHash);
|
||||
cmd.Parameters.AddWithValue("key_prefix", apiKey.KeyPrefix);
|
||||
AddArrayParameter(cmd, "scopes", apiKey.Scopes);
|
||||
cmd.Parameters.AddWithValue("status", apiKey.Status);
|
||||
AddNullableParameter(cmd, "expires_at", apiKey.ExpiresAt);
|
||||
AddJsonbParameter(cmd, "metadata", apiKey.Metadata);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "id", id);
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "user_id", apiKey.UserId);
|
||||
AddParameter(command, "name", apiKey.Name);
|
||||
AddParameter(command, "key_hash", apiKey.KeyHash);
|
||||
AddParameter(command, "key_prefix", apiKey.KeyPrefix);
|
||||
AddTextArrayParameter(command, "scopes", apiKey.Scopes);
|
||||
AddParameter(command, "status", apiKey.Status);
|
||||
AddParameter(command, "expires_at", apiKey.ExpiresAt);
|
||||
AddJsonbParameter(command, "metadata", apiKey.Metadata);
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
return id;
|
||||
}
|
||||
|
||||
public async Task UpdateLastUsedAsync(string tenantId, Guid id, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = "UPDATE authority.api_keys SET last_used_at = NOW() WHERE tenant_id = @tenant_id AND id = @id";
|
||||
await ExecuteAsync(tenantId, sql, cmd => { cmd.Parameters.AddWithValue("id", id); }, cancellationToken).ConfigureAwait(false);
|
||||
await ExecuteAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "id", id); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task RevokeAsync(string tenantId, Guid id, string revokedBy, CancellationToken cancellationToken = default)
|
||||
@@ -95,32 +105,35 @@ public sealed class ApiKeyRepository : RepositoryBase<AuthorityDataSource>, IApi
|
||||
""";
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("id", id);
|
||||
cmd.Parameters.AddWithValue("revoked_by", revokedBy);
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "id", id);
|
||||
AddParameter(cmd, "revoked_by", revokedBy);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task DeleteAsync(string tenantId, Guid id, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = "DELETE FROM authority.api_keys WHERE tenant_id = @tenant_id AND id = @id";
|
||||
await ExecuteAsync(tenantId, sql, cmd => { cmd.Parameters.AddWithValue("id", id); }, cancellationToken).ConfigureAwait(false);
|
||||
await ExecuteAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "id", id); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private static ApiKeyEntity MapApiKey(System.Data.Common.DbDataReader reader) => new()
|
||||
private static ApiKeyEntity MapApiKey(NpgsqlDataReader reader) => new()
|
||||
{
|
||||
Id = reader.GetGuid(0),
|
||||
TenantId = reader.GetString(1),
|
||||
UserId = reader.IsDBNull(2) ? null : reader.GetGuid(2),
|
||||
UserId = GetNullableGuid(reader, 2),
|
||||
Name = reader.GetString(3),
|
||||
KeyHash = reader.GetString(4),
|
||||
KeyPrefix = reader.GetString(5),
|
||||
Scopes = reader.IsDBNull(6) ? [] : reader.GetFieldValue<string[]>(6),
|
||||
Status = reader.GetString(7),
|
||||
LastUsedAt = reader.IsDBNull(8) ? null : reader.GetFieldValue<DateTimeOffset>(8),
|
||||
ExpiresAt = reader.IsDBNull(9) ? null : reader.GetFieldValue<DateTimeOffset>(9),
|
||||
LastUsedAt = GetNullableDateTimeOffset(reader, 8),
|
||||
ExpiresAt = GetNullableDateTimeOffset(reader, 9),
|
||||
Metadata = reader.GetString(10),
|
||||
CreatedAt = reader.GetFieldValue<DateTimeOffset>(11),
|
||||
RevokedAt = reader.IsDBNull(12) ? null : reader.GetFieldValue<DateTimeOffset>(12),
|
||||
RevokedBy = reader.IsDBNull(13) ? null : reader.GetString(13)
|
||||
RevokedAt = GetNullableDateTimeOffset(reader, 12),
|
||||
RevokedBy = GetNullableString(reader, 13)
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Authority.Storage.Postgres.Models;
|
||||
using StellaOps.Infrastructure.Postgres;
|
||||
using StellaOps.Infrastructure.Postgres.Repositories;
|
||||
|
||||
namespace StellaOps.Authority.Storage.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL repository for audit log operations.
|
||||
/// </summary>
|
||||
public sealed class AuditRepository : RepositoryBase<AuthorityDataSource>, IAuditRepository
|
||||
{
|
||||
public AuditRepository(AuthorityDataSource dataSource) : base(dataSource) { }
|
||||
public AuditRepository(AuthorityDataSource dataSource, ILogger<AuditRepository> logger)
|
||||
: base(dataSource, logger) { }
|
||||
|
||||
public async Task<long> CreateAsync(string tenantId, AuditEntity audit, CancellationToken cancellationToken = default)
|
||||
{
|
||||
@@ -14,19 +20,18 @@ public sealed class AuditRepository : RepositoryBase<AuthorityDataSource>, IAudi
|
||||
VALUES (@tenant_id, @user_id, @action, @resource_type, @resource_id, @old_value::jsonb, @new_value::jsonb, @ip_address, @user_agent, @correlation_id)
|
||||
RETURNING id
|
||||
""";
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, DataSourceRole.Writer, cancellationToken).ConfigureAwait(false);
|
||||
await using var command = connection.CreateCommand();
|
||||
command.CommandText = sql;
|
||||
command.Parameters.AddWithValue("tenant_id", tenantId);
|
||||
AddNullableParameter(command, "user_id", audit.UserId);
|
||||
command.Parameters.AddWithValue("action", audit.Action);
|
||||
command.Parameters.AddWithValue("resource_type", audit.ResourceType);
|
||||
AddNullableParameter(command, "resource_id", audit.ResourceId);
|
||||
AddNullableJsonbParameter(command, "old_value", audit.OldValue);
|
||||
AddNullableJsonbParameter(command, "new_value", audit.NewValue);
|
||||
AddNullableParameter(command, "ip_address", audit.IpAddress);
|
||||
AddNullableParameter(command, "user_agent", audit.UserAgent);
|
||||
AddNullableParameter(command, "correlation_id", audit.CorrelationId);
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "user_id", audit.UserId);
|
||||
AddParameter(command, "action", audit.Action);
|
||||
AddParameter(command, "resource_type", audit.ResourceType);
|
||||
AddParameter(command, "resource_id", audit.ResourceId);
|
||||
AddJsonbParameter(command, "old_value", audit.OldValue);
|
||||
AddJsonbParameter(command, "new_value", audit.NewValue);
|
||||
AddParameter(command, "ip_address", audit.IpAddress);
|
||||
AddParameter(command, "user_agent", audit.UserAgent);
|
||||
AddParameter(command, "correlation_id", audit.CorrelationId);
|
||||
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
|
||||
return (long)result!;
|
||||
}
|
||||
@@ -40,11 +45,12 @@ public sealed class AuditRepository : RepositoryBase<AuthorityDataSource>, IAudi
|
||||
ORDER BY created_at DESC
|
||||
LIMIT @limit OFFSET @offset
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql, MapAudit, cmd =>
|
||||
return await QueryAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("limit", limit);
|
||||
cmd.Parameters.AddWithValue("offset", offset);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "limit", limit);
|
||||
AddParameter(cmd, "offset", offset);
|
||||
}, MapAudit, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<AuditEntity>> GetByUserIdAsync(string tenantId, Guid userId, int limit = 100, CancellationToken cancellationToken = default)
|
||||
@@ -56,29 +62,31 @@ public sealed class AuditRepository : RepositoryBase<AuthorityDataSource>, IAudi
|
||||
ORDER BY created_at DESC
|
||||
LIMIT @limit
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql, MapAudit, cmd =>
|
||||
return await QueryAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("user_id", userId);
|
||||
cmd.Parameters.AddWithValue("limit", limit);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "user_id", userId);
|
||||
AddParameter(cmd, "limit", limit);
|
||||
}, MapAudit, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<AuditEntity>> GetByResourceAsync(string tenantId, string resourceType, string? resourceId, int limit = 100, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var sql = $"""
|
||||
var sql = """
|
||||
SELECT id, tenant_id, user_id, action, resource_type, resource_id, old_value, new_value, ip_address, user_agent, correlation_id, created_at
|
||||
FROM authority.audit
|
||||
WHERE tenant_id = @tenant_id AND resource_type = @resource_type
|
||||
{(resourceId != null ? "AND resource_id = @resource_id" : "")}
|
||||
ORDER BY created_at DESC
|
||||
LIMIT @limit
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql, MapAudit, cmd =>
|
||||
if (resourceId != null) sql += " AND resource_id = @resource_id";
|
||||
sql += " ORDER BY created_at DESC LIMIT @limit";
|
||||
|
||||
return await QueryAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("resource_type", resourceType);
|
||||
if (resourceId != null) cmd.Parameters.AddWithValue("resource_id", resourceId);
|
||||
cmd.Parameters.AddWithValue("limit", limit);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "resource_type", resourceType);
|
||||
if (resourceId != null) AddParameter(cmd, "resource_id", resourceId);
|
||||
AddParameter(cmd, "limit", limit);
|
||||
}, MapAudit, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<AuditEntity>> GetByCorrelationIdAsync(string tenantId, string correlationId, CancellationToken cancellationToken = default)
|
||||
@@ -89,9 +97,11 @@ public sealed class AuditRepository : RepositoryBase<AuthorityDataSource>, IAudi
|
||||
WHERE tenant_id = @tenant_id AND correlation_id = @correlation_id
|
||||
ORDER BY created_at
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql, MapAudit,
|
||||
cmd => { cmd.Parameters.AddWithValue("correlation_id", correlationId); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
return await QueryAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "correlation_id", correlationId);
|
||||
}, MapAudit, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<AuditEntity>> GetByActionAsync(string tenantId, string action, int limit = 100, CancellationToken cancellationToken = default)
|
||||
@@ -103,34 +113,27 @@ public sealed class AuditRepository : RepositoryBase<AuthorityDataSource>, IAudi
|
||||
ORDER BY created_at DESC
|
||||
LIMIT @limit
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql, MapAudit, cmd =>
|
||||
return await QueryAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("action", action);
|
||||
cmd.Parameters.AddWithValue("limit", limit);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "action", action);
|
||||
AddParameter(cmd, "limit", limit);
|
||||
}, MapAudit, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private void AddNullableJsonbParameter(Npgsql.NpgsqlCommand cmd, string name, string? value)
|
||||
{
|
||||
if (value == null)
|
||||
cmd.Parameters.AddWithValue(name, DBNull.Value);
|
||||
else
|
||||
AddJsonbParameter(cmd, name, value);
|
||||
}
|
||||
|
||||
private static AuditEntity MapAudit(System.Data.Common.DbDataReader reader) => new()
|
||||
private static AuditEntity MapAudit(NpgsqlDataReader reader) => new()
|
||||
{
|
||||
Id = reader.GetInt64(0),
|
||||
TenantId = reader.GetString(1),
|
||||
UserId = reader.IsDBNull(2) ? null : reader.GetGuid(2),
|
||||
UserId = GetNullableGuid(reader, 2),
|
||||
Action = reader.GetString(3),
|
||||
ResourceType = reader.GetString(4),
|
||||
ResourceId = reader.IsDBNull(5) ? null : reader.GetString(5),
|
||||
OldValue = reader.IsDBNull(6) ? null : reader.GetString(6),
|
||||
NewValue = reader.IsDBNull(7) ? null : reader.GetString(7),
|
||||
IpAddress = reader.IsDBNull(8) ? null : reader.GetString(8),
|
||||
UserAgent = reader.IsDBNull(9) ? null : reader.GetString(9),
|
||||
CorrelationId = reader.IsDBNull(10) ? null : reader.GetString(10),
|
||||
ResourceId = GetNullableString(reader, 5),
|
||||
OldValue = GetNullableString(reader, 6),
|
||||
NewValue = GetNullableString(reader, 7),
|
||||
IpAddress = GetNullableString(reader, 8),
|
||||
UserAgent = GetNullableString(reader, 9),
|
||||
CorrelationId = GetNullableString(reader, 10),
|
||||
CreatedAt = reader.GetFieldValue<DateTimeOffset>(11)
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Authority.Storage.Postgres.Models;
|
||||
using StellaOps.Infrastructure.Postgres;
|
||||
using StellaOps.Infrastructure.Postgres.Repositories;
|
||||
|
||||
namespace StellaOps.Authority.Storage.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL repository for permission operations.
|
||||
/// </summary>
|
||||
public sealed class PermissionRepository : RepositoryBase<AuthorityDataSource>, IPermissionRepository
|
||||
{
|
||||
public PermissionRepository(AuthorityDataSource dataSource) : base(dataSource) { }
|
||||
public PermissionRepository(AuthorityDataSource dataSource, ILogger<PermissionRepository> logger)
|
||||
: base(dataSource, logger) { }
|
||||
|
||||
public async Task<PermissionEntity?> GetByIdAsync(string tenantId, Guid id, CancellationToken cancellationToken = default)
|
||||
{
|
||||
@@ -14,9 +20,9 @@ public sealed class PermissionRepository : RepositoryBase<AuthorityDataSource>,
|
||||
FROM authority.permissions
|
||||
WHERE tenant_id = @tenant_id AND id = @id
|
||||
""";
|
||||
return await QuerySingleOrDefaultAsync(tenantId, sql, MapPermission,
|
||||
cmd => { cmd.Parameters.AddWithValue("id", id); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
return await QuerySingleOrDefaultAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "id", id); },
|
||||
MapPermission, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<PermissionEntity?> GetByNameAsync(string tenantId, string name, CancellationToken cancellationToken = default)
|
||||
@@ -26,9 +32,9 @@ public sealed class PermissionRepository : RepositoryBase<AuthorityDataSource>,
|
||||
FROM authority.permissions
|
||||
WHERE tenant_id = @tenant_id AND name = @name
|
||||
""";
|
||||
return await QuerySingleOrDefaultAsync(tenantId, sql, MapPermission,
|
||||
cmd => { cmd.Parameters.AddWithValue("name", name); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
return await QuerySingleOrDefaultAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "name", name); },
|
||||
MapPermission, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<PermissionEntity>> ListAsync(string tenantId, CancellationToken cancellationToken = default)
|
||||
@@ -39,7 +45,9 @@ public sealed class PermissionRepository : RepositoryBase<AuthorityDataSource>,
|
||||
WHERE tenant_id = @tenant_id
|
||||
ORDER BY resource, action
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql, MapPermission, cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
return await QueryAsync(tenantId, sql,
|
||||
cmd => AddParameter(cmd, "tenant_id", tenantId),
|
||||
MapPermission, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<PermissionEntity>> GetByResourceAsync(string tenantId, string resource, CancellationToken cancellationToken = default)
|
||||
@@ -50,9 +58,9 @@ public sealed class PermissionRepository : RepositoryBase<AuthorityDataSource>,
|
||||
WHERE tenant_id = @tenant_id AND resource = @resource
|
||||
ORDER BY action
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql, MapPermission,
|
||||
cmd => { cmd.Parameters.AddWithValue("resource", resource); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
return await QueryAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "resource", resource); },
|
||||
MapPermission, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<PermissionEntity>> GetRolePermissionsAsync(string tenantId, Guid roleId, CancellationToken cancellationToken = default)
|
||||
@@ -64,9 +72,9 @@ public sealed class PermissionRepository : RepositoryBase<AuthorityDataSource>,
|
||||
WHERE p.tenant_id = @tenant_id AND rp.role_id = @role_id
|
||||
ORDER BY p.resource, p.action
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql, MapPermission,
|
||||
cmd => { cmd.Parameters.AddWithValue("role_id", roleId); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
return await QueryAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "role_id", roleId); },
|
||||
MapPermission, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<PermissionEntity>> GetUserPermissionsAsync(string tenantId, Guid userId, CancellationToken cancellationToken = default)
|
||||
@@ -80,9 +88,9 @@ public sealed class PermissionRepository : RepositoryBase<AuthorityDataSource>,
|
||||
AND (ur.expires_at IS NULL OR ur.expires_at > NOW())
|
||||
ORDER BY p.resource, p.action
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql, MapPermission,
|
||||
cmd => { cmd.Parameters.AddWithValue("user_id", userId); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
return await QueryAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "user_id", userId); },
|
||||
MapPermission, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<Guid> CreateAsync(string tenantId, PermissionEntity permission, CancellationToken cancellationToken = default)
|
||||
@@ -93,21 +101,24 @@ public sealed class PermissionRepository : RepositoryBase<AuthorityDataSource>,
|
||||
RETURNING id
|
||||
""";
|
||||
var id = permission.Id == Guid.Empty ? Guid.NewGuid() : permission.Id;
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("id", id);
|
||||
cmd.Parameters.AddWithValue("name", permission.Name);
|
||||
cmd.Parameters.AddWithValue("resource", permission.Resource);
|
||||
cmd.Parameters.AddWithValue("action", permission.Action);
|
||||
AddNullableParameter(cmd, "description", permission.Description);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "id", id);
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "name", permission.Name);
|
||||
AddParameter(command, "resource", permission.Resource);
|
||||
AddParameter(command, "action", permission.Action);
|
||||
AddParameter(command, "description", permission.Description);
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
return id;
|
||||
}
|
||||
|
||||
public async Task DeleteAsync(string tenantId, Guid id, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = "DELETE FROM authority.permissions WHERE tenant_id = @tenant_id AND id = @id";
|
||||
await ExecuteAsync(tenantId, sql, cmd => { cmd.Parameters.AddWithValue("id", id); }, cancellationToken).ConfigureAwait(false);
|
||||
await ExecuteAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "id", id); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task AssignToRoleAsync(string tenantId, Guid roleId, Guid permissionId, CancellationToken cancellationToken = default)
|
||||
@@ -119,8 +130,8 @@ public sealed class PermissionRepository : RepositoryBase<AuthorityDataSource>,
|
||||
""";
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("role_id", roleId);
|
||||
cmd.Parameters.AddWithValue("permission_id", permissionId);
|
||||
AddParameter(cmd, "role_id", roleId);
|
||||
AddParameter(cmd, "permission_id", permissionId);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
@@ -129,19 +140,19 @@ public sealed class PermissionRepository : RepositoryBase<AuthorityDataSource>,
|
||||
const string sql = "DELETE FROM authority.role_permissions WHERE role_id = @role_id AND permission_id = @permission_id";
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("role_id", roleId);
|
||||
cmd.Parameters.AddWithValue("permission_id", permissionId);
|
||||
AddParameter(cmd, "role_id", roleId);
|
||||
AddParameter(cmd, "permission_id", permissionId);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private static PermissionEntity MapPermission(System.Data.Common.DbDataReader reader) => new()
|
||||
private static PermissionEntity MapPermission(NpgsqlDataReader reader) => new()
|
||||
{
|
||||
Id = reader.GetGuid(0),
|
||||
TenantId = reader.GetString(1),
|
||||
Name = reader.GetString(2),
|
||||
Resource = reader.GetString(3),
|
||||
Action = reader.GetString(4),
|
||||
Description = reader.IsDBNull(5) ? null : reader.GetString(5),
|
||||
Description = GetNullableString(reader, 5),
|
||||
CreatedAt = reader.GetFieldValue<DateTimeOffset>(6)
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Authority.Storage.Postgres.Models;
|
||||
using StellaOps.Infrastructure.Postgres;
|
||||
using StellaOps.Infrastructure.Postgres.Repositories;
|
||||
|
||||
namespace StellaOps.Authority.Storage.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL repository for role operations.
|
||||
/// </summary>
|
||||
public sealed class RoleRepository : RepositoryBase<AuthorityDataSource>, IRoleRepository
|
||||
{
|
||||
public RoleRepository(AuthorityDataSource dataSource) : base(dataSource) { }
|
||||
public RoleRepository(AuthorityDataSource dataSource, ILogger<RoleRepository> logger)
|
||||
: base(dataSource, logger) { }
|
||||
|
||||
public async Task<RoleEntity?> GetByIdAsync(string tenantId, Guid id, CancellationToken cancellationToken = default)
|
||||
{
|
||||
@@ -14,9 +20,9 @@ public sealed class RoleRepository : RepositoryBase<AuthorityDataSource>, IRoleR
|
||||
FROM authority.roles
|
||||
WHERE tenant_id = @tenant_id AND id = @id
|
||||
""";
|
||||
return await QuerySingleOrDefaultAsync(tenantId, sql, MapRole,
|
||||
cmd => { cmd.Parameters.AddWithValue("id", id); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
return await QuerySingleOrDefaultAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "id", id); },
|
||||
MapRole, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<RoleEntity?> GetByNameAsync(string tenantId, string name, CancellationToken cancellationToken = default)
|
||||
@@ -26,9 +32,9 @@ public sealed class RoleRepository : RepositoryBase<AuthorityDataSource>, IRoleR
|
||||
FROM authority.roles
|
||||
WHERE tenant_id = @tenant_id AND name = @name
|
||||
""";
|
||||
return await QuerySingleOrDefaultAsync(tenantId, sql, MapRole,
|
||||
cmd => { cmd.Parameters.AddWithValue("name", name); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
return await QuerySingleOrDefaultAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "name", name); },
|
||||
MapRole, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<RoleEntity>> ListAsync(string tenantId, CancellationToken cancellationToken = default)
|
||||
@@ -39,7 +45,9 @@ public sealed class RoleRepository : RepositoryBase<AuthorityDataSource>, IRoleR
|
||||
WHERE tenant_id = @tenant_id
|
||||
ORDER BY name
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql, MapRole, cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
return await QueryAsync(tenantId, sql,
|
||||
cmd => AddParameter(cmd, "tenant_id", tenantId),
|
||||
MapRole, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<RoleEntity>> GetUserRolesAsync(string tenantId, Guid userId, CancellationToken cancellationToken = default)
|
||||
@@ -52,9 +60,9 @@ public sealed class RoleRepository : RepositoryBase<AuthorityDataSource>, IRoleR
|
||||
AND (ur.expires_at IS NULL OR ur.expires_at > NOW())
|
||||
ORDER BY r.name
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql, MapRole,
|
||||
cmd => { cmd.Parameters.AddWithValue("user_id", userId); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
return await QueryAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "user_id", userId); },
|
||||
MapRole, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<Guid> CreateAsync(string tenantId, RoleEntity role, CancellationToken cancellationToken = default)
|
||||
@@ -65,15 +73,16 @@ public sealed class RoleRepository : RepositoryBase<AuthorityDataSource>, IRoleR
|
||||
RETURNING id
|
||||
""";
|
||||
var id = role.Id == Guid.Empty ? Guid.NewGuid() : role.Id;
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("id", id);
|
||||
cmd.Parameters.AddWithValue("name", role.Name);
|
||||
AddNullableParameter(cmd, "display_name", role.DisplayName);
|
||||
AddNullableParameter(cmd, "description", role.Description);
|
||||
cmd.Parameters.AddWithValue("is_system", role.IsSystem);
|
||||
AddJsonbParameter(cmd, "metadata", role.Metadata);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "id", id);
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "name", role.Name);
|
||||
AddParameter(command, "display_name", role.DisplayName);
|
||||
AddParameter(command, "description", role.Description);
|
||||
AddParameter(command, "is_system", role.IsSystem);
|
||||
AddJsonbParameter(command, "metadata", role.Metadata);
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
return id;
|
||||
}
|
||||
|
||||
@@ -87,11 +96,12 @@ public sealed class RoleRepository : RepositoryBase<AuthorityDataSource>, IRoleR
|
||||
""";
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("id", role.Id);
|
||||
cmd.Parameters.AddWithValue("name", role.Name);
|
||||
AddNullableParameter(cmd, "display_name", role.DisplayName);
|
||||
AddNullableParameter(cmd, "description", role.Description);
|
||||
cmd.Parameters.AddWithValue("is_system", role.IsSystem);
|
||||
AddParameter(cmd, "id", role.Id);
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "name", role.Name);
|
||||
AddParameter(cmd, "display_name", role.DisplayName);
|
||||
AddParameter(cmd, "description", role.Description);
|
||||
AddParameter(cmd, "is_system", role.IsSystem);
|
||||
AddJsonbParameter(cmd, "metadata", role.Metadata);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
@@ -99,7 +109,9 @@ public sealed class RoleRepository : RepositoryBase<AuthorityDataSource>, IRoleR
|
||||
public async Task DeleteAsync(string tenantId, Guid id, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = "DELETE FROM authority.roles WHERE tenant_id = @tenant_id AND id = @id";
|
||||
await ExecuteAsync(tenantId, sql, cmd => { cmd.Parameters.AddWithValue("id", id); }, cancellationToken).ConfigureAwait(false);
|
||||
await ExecuteAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "id", id); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task AssignToUserAsync(string tenantId, Guid userId, Guid roleId, string? grantedBy, DateTimeOffset? expiresAt, CancellationToken cancellationToken = default)
|
||||
@@ -112,10 +124,10 @@ public sealed class RoleRepository : RepositoryBase<AuthorityDataSource>, IRoleR
|
||||
""";
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("user_id", userId);
|
||||
cmd.Parameters.AddWithValue("role_id", roleId);
|
||||
AddNullableParameter(cmd, "granted_by", grantedBy);
|
||||
AddNullableParameter(cmd, "expires_at", expiresAt);
|
||||
AddParameter(cmd, "user_id", userId);
|
||||
AddParameter(cmd, "role_id", roleId);
|
||||
AddParameter(cmd, "granted_by", grantedBy);
|
||||
AddParameter(cmd, "expires_at", expiresAt);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
@@ -124,18 +136,18 @@ public sealed class RoleRepository : RepositoryBase<AuthorityDataSource>, IRoleR
|
||||
const string sql = "DELETE FROM authority.user_roles WHERE user_id = @user_id AND role_id = @role_id";
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("user_id", userId);
|
||||
cmd.Parameters.AddWithValue("role_id", roleId);
|
||||
AddParameter(cmd, "user_id", userId);
|
||||
AddParameter(cmd, "role_id", roleId);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private static RoleEntity MapRole(System.Data.Common.DbDataReader reader) => new()
|
||||
private static RoleEntity MapRole(NpgsqlDataReader reader) => new()
|
||||
{
|
||||
Id = reader.GetGuid(0),
|
||||
TenantId = reader.GetString(1),
|
||||
Name = reader.GetString(2),
|
||||
DisplayName = reader.IsDBNull(3) ? null : reader.GetString(3),
|
||||
Description = reader.IsDBNull(4) ? null : reader.GetString(4),
|
||||
DisplayName = GetNullableString(reader, 3),
|
||||
Description = GetNullableString(reader, 4),
|
||||
IsSystem = reader.GetBoolean(5),
|
||||
Metadata = reader.GetString(6),
|
||||
CreatedAt = reader.GetFieldValue<DateTimeOffset>(7),
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Authority.Storage.Postgres.Models;
|
||||
using StellaOps.Infrastructure.Postgres;
|
||||
using StellaOps.Infrastructure.Postgres.Repositories;
|
||||
|
||||
namespace StellaOps.Authority.Storage.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL repository for session operations.
|
||||
/// </summary>
|
||||
public sealed class SessionRepository : RepositoryBase<AuthorityDataSource>, ISessionRepository
|
||||
{
|
||||
public SessionRepository(AuthorityDataSource dataSource) : base(dataSource) { }
|
||||
public SessionRepository(AuthorityDataSource dataSource, ILogger<SessionRepository> logger)
|
||||
: base(dataSource, logger) { }
|
||||
|
||||
public async Task<SessionEntity?> GetByIdAsync(string tenantId, Guid id, CancellationToken cancellationToken = default)
|
||||
{
|
||||
@@ -14,9 +20,9 @@ public sealed class SessionRepository : RepositoryBase<AuthorityDataSource>, ISe
|
||||
FROM authority.sessions
|
||||
WHERE tenant_id = @tenant_id AND id = @id
|
||||
""";
|
||||
return await QuerySingleOrDefaultAsync(tenantId, sql, MapSession,
|
||||
cmd => { cmd.Parameters.AddWithValue("id", id); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
return await QuerySingleOrDefaultAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "id", id); },
|
||||
MapSession, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<SessionEntity?> GetByTokenHashAsync(string sessionTokenHash, CancellationToken cancellationToken = default)
|
||||
@@ -27,25 +33,25 @@ public sealed class SessionRepository : RepositoryBase<AuthorityDataSource>, ISe
|
||||
WHERE session_token_hash = @session_token_hash AND ended_at IS NULL AND expires_at > NOW()
|
||||
""";
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = connection.CreateCommand();
|
||||
command.CommandText = sql;
|
||||
command.Parameters.AddWithValue("session_token_hash", sessionTokenHash);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "session_token_hash", sessionTokenHash);
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
return await reader.ReadAsync(cancellationToken).ConfigureAwait(false) ? MapSession(reader) : null;
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<SessionEntity>> GetByUserIdAsync(string tenantId, Guid userId, bool activeOnly = true, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var sql = $"""
|
||||
var sql = """
|
||||
SELECT id, tenant_id, user_id, session_token_hash, ip_address, user_agent, started_at, last_activity_at, expires_at, ended_at, end_reason, metadata
|
||||
FROM authority.sessions
|
||||
WHERE tenant_id = @tenant_id AND user_id = @user_id
|
||||
{(activeOnly ? "AND ended_at IS NULL AND expires_at > NOW()" : "")}
|
||||
ORDER BY started_at DESC
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql, MapSession,
|
||||
cmd => { cmd.Parameters.AddWithValue("user_id", userId); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
if (activeOnly) sql += " AND ended_at IS NULL AND expires_at > NOW()";
|
||||
sql += " ORDER BY started_at DESC";
|
||||
|
||||
return await QueryAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "user_id", userId); },
|
||||
MapSession, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<Guid> CreateAsync(string tenantId, SessionEntity session, CancellationToken cancellationToken = default)
|
||||
@@ -56,23 +62,26 @@ public sealed class SessionRepository : RepositoryBase<AuthorityDataSource>, ISe
|
||||
RETURNING id
|
||||
""";
|
||||
var id = session.Id == Guid.Empty ? Guid.NewGuid() : session.Id;
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("id", id);
|
||||
cmd.Parameters.AddWithValue("user_id", session.UserId);
|
||||
cmd.Parameters.AddWithValue("session_token_hash", session.SessionTokenHash);
|
||||
AddNullableParameter(cmd, "ip_address", session.IpAddress);
|
||||
AddNullableParameter(cmd, "user_agent", session.UserAgent);
|
||||
cmd.Parameters.AddWithValue("expires_at", session.ExpiresAt);
|
||||
AddJsonbParameter(cmd, "metadata", session.Metadata);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "id", id);
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "user_id", session.UserId);
|
||||
AddParameter(command, "session_token_hash", session.SessionTokenHash);
|
||||
AddParameter(command, "ip_address", session.IpAddress);
|
||||
AddParameter(command, "user_agent", session.UserAgent);
|
||||
AddParameter(command, "expires_at", session.ExpiresAt);
|
||||
AddJsonbParameter(command, "metadata", session.Metadata);
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
return id;
|
||||
}
|
||||
|
||||
public async Task UpdateLastActivityAsync(string tenantId, Guid id, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = "UPDATE authority.sessions SET last_activity_at = NOW() WHERE tenant_id = @tenant_id AND id = @id AND ended_at IS NULL";
|
||||
await ExecuteAsync(tenantId, sql, cmd => { cmd.Parameters.AddWithValue("id", id); }, cancellationToken).ConfigureAwait(false);
|
||||
await ExecuteAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "id", id); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task EndAsync(string tenantId, Guid id, string reason, CancellationToken cancellationToken = default)
|
||||
@@ -83,8 +92,9 @@ public sealed class SessionRepository : RepositoryBase<AuthorityDataSource>, ISe
|
||||
""";
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("id", id);
|
||||
cmd.Parameters.AddWithValue("end_reason", reason);
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "id", id);
|
||||
AddParameter(cmd, "end_reason", reason);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
@@ -96,8 +106,9 @@ public sealed class SessionRepository : RepositoryBase<AuthorityDataSource>, ISe
|
||||
""";
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("user_id", userId);
|
||||
cmd.Parameters.AddWithValue("end_reason", reason);
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "user_id", userId);
|
||||
AddParameter(cmd, "end_reason", reason);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
@@ -105,24 +116,23 @@ public sealed class SessionRepository : RepositoryBase<AuthorityDataSource>, ISe
|
||||
{
|
||||
const string sql = "DELETE FROM authority.sessions WHERE expires_at < NOW() - INTERVAL '30 days'";
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = connection.CreateCommand();
|
||||
command.CommandText = sql;
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private static SessionEntity MapSession(System.Data.Common.DbDataReader reader) => new()
|
||||
private static SessionEntity MapSession(NpgsqlDataReader reader) => new()
|
||||
{
|
||||
Id = reader.GetGuid(0),
|
||||
TenantId = reader.GetString(1),
|
||||
UserId = reader.GetGuid(2),
|
||||
SessionTokenHash = reader.GetString(3),
|
||||
IpAddress = reader.IsDBNull(4) ? null : reader.GetString(4),
|
||||
UserAgent = reader.IsDBNull(5) ? null : reader.GetString(5),
|
||||
IpAddress = GetNullableString(reader, 4),
|
||||
UserAgent = GetNullableString(reader, 5),
|
||||
StartedAt = reader.GetFieldValue<DateTimeOffset>(6),
|
||||
LastActivityAt = reader.GetFieldValue<DateTimeOffset>(7),
|
||||
ExpiresAt = reader.GetFieldValue<DateTimeOffset>(8),
|
||||
EndedAt = reader.IsDBNull(9) ? null : reader.GetFieldValue<DateTimeOffset>(9),
|
||||
EndReason = reader.IsDBNull(10) ? null : reader.GetString(10),
|
||||
EndedAt = GetNullableDateTimeOffset(reader, 9),
|
||||
EndReason = GetNullableString(reader, 10),
|
||||
Metadata = reader.GetString(11)
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Authority.Storage.Postgres.Models;
|
||||
using StellaOps.Infrastructure.Postgres;
|
||||
using StellaOps.Infrastructure.Postgres.Repositories;
|
||||
|
||||
namespace StellaOps.Authority.Storage.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL repository for access token operations.
|
||||
/// </summary>
|
||||
public sealed class TokenRepository : RepositoryBase<AuthorityDataSource>, ITokenRepository
|
||||
{
|
||||
public TokenRepository(AuthorityDataSource dataSource) : base(dataSource) { }
|
||||
public TokenRepository(AuthorityDataSource dataSource, ILogger<TokenRepository> logger)
|
||||
: base(dataSource, logger) { }
|
||||
|
||||
public async Task<TokenEntity?> GetByIdAsync(string tenantId, Guid id, CancellationToken cancellationToken = default)
|
||||
{
|
||||
@@ -14,9 +20,9 @@ public sealed class TokenRepository : RepositoryBase<AuthorityDataSource>, IToke
|
||||
FROM authority.tokens
|
||||
WHERE tenant_id = @tenant_id AND id = @id
|
||||
""";
|
||||
return await QuerySingleOrDefaultAsync(tenantId, sql, MapToken,
|
||||
cmd => { cmd.Parameters.AddWithValue("id", id); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
return await QuerySingleOrDefaultAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "id", id); },
|
||||
MapToken, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<TokenEntity?> GetByHashAsync(string tokenHash, CancellationToken cancellationToken = default)
|
||||
@@ -27,9 +33,8 @@ public sealed class TokenRepository : RepositoryBase<AuthorityDataSource>, IToke
|
||||
WHERE token_hash = @token_hash AND revoked_at IS NULL AND expires_at > NOW()
|
||||
""";
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = connection.CreateCommand();
|
||||
command.CommandText = sql;
|
||||
command.Parameters.AddWithValue("token_hash", tokenHash);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "token_hash", tokenHash);
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
return await reader.ReadAsync(cancellationToken).ConfigureAwait(false) ? MapToken(reader) : null;
|
||||
}
|
||||
@@ -42,9 +47,9 @@ public sealed class TokenRepository : RepositoryBase<AuthorityDataSource>, IToke
|
||||
WHERE tenant_id = @tenant_id AND user_id = @user_id AND revoked_at IS NULL
|
||||
ORDER BY issued_at DESC
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql, MapToken,
|
||||
cmd => { cmd.Parameters.AddWithValue("user_id", userId); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
return await QueryAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "user_id", userId); },
|
||||
MapToken, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<Guid> CreateAsync(string tenantId, TokenEntity token, CancellationToken cancellationToken = default)
|
||||
@@ -55,17 +60,18 @@ public sealed class TokenRepository : RepositoryBase<AuthorityDataSource>, IToke
|
||||
RETURNING id
|
||||
""";
|
||||
var id = token.Id == Guid.Empty ? Guid.NewGuid() : token.Id;
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("id", id);
|
||||
AddNullableParameter(cmd, "user_id", token.UserId);
|
||||
cmd.Parameters.AddWithValue("token_hash", token.TokenHash);
|
||||
cmd.Parameters.AddWithValue("token_type", token.TokenType);
|
||||
AddArrayParameter(cmd, "scopes", token.Scopes);
|
||||
AddNullableParameter(cmd, "client_id", token.ClientId);
|
||||
cmd.Parameters.AddWithValue("expires_at", token.ExpiresAt);
|
||||
AddJsonbParameter(cmd, "metadata", token.Metadata);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "id", id);
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "user_id", token.UserId);
|
||||
AddParameter(command, "token_hash", token.TokenHash);
|
||||
AddParameter(command, "token_type", token.TokenType);
|
||||
AddTextArrayParameter(command, "scopes", token.Scopes);
|
||||
AddParameter(command, "client_id", token.ClientId);
|
||||
AddParameter(command, "expires_at", token.ExpiresAt);
|
||||
AddJsonbParameter(command, "metadata", token.Metadata);
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
return id;
|
||||
}
|
||||
|
||||
@@ -77,8 +83,9 @@ public sealed class TokenRepository : RepositoryBase<AuthorityDataSource>, IToke
|
||||
""";
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("id", id);
|
||||
cmd.Parameters.AddWithValue("revoked_by", revokedBy);
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "id", id);
|
||||
AddParameter(cmd, "revoked_by", revokedBy);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
@@ -90,8 +97,9 @@ public sealed class TokenRepository : RepositoryBase<AuthorityDataSource>, IToke
|
||||
""";
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("user_id", userId);
|
||||
cmd.Parameters.AddWithValue("revoked_by", revokedBy);
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "user_id", userId);
|
||||
AddParameter(cmd, "revoked_by", revokedBy);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
@@ -99,31 +107,34 @@ public sealed class TokenRepository : RepositoryBase<AuthorityDataSource>, IToke
|
||||
{
|
||||
const string sql = "DELETE FROM authority.tokens WHERE expires_at < NOW() - INTERVAL '7 days'";
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = connection.CreateCommand();
|
||||
command.CommandText = sql;
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private static TokenEntity MapToken(System.Data.Common.DbDataReader reader) => new()
|
||||
private static TokenEntity MapToken(NpgsqlDataReader reader) => new()
|
||||
{
|
||||
Id = reader.GetGuid(0),
|
||||
TenantId = reader.GetString(1),
|
||||
UserId = reader.IsDBNull(2) ? null : reader.GetGuid(2),
|
||||
UserId = GetNullableGuid(reader, 2),
|
||||
TokenHash = reader.GetString(3),
|
||||
TokenType = reader.GetString(4),
|
||||
Scopes = reader.IsDBNull(5) ? [] : reader.GetFieldValue<string[]>(5),
|
||||
ClientId = reader.IsDBNull(6) ? null : reader.GetString(6),
|
||||
ClientId = GetNullableString(reader, 6),
|
||||
IssuedAt = reader.GetFieldValue<DateTimeOffset>(7),
|
||||
ExpiresAt = reader.GetFieldValue<DateTimeOffset>(8),
|
||||
RevokedAt = reader.IsDBNull(9) ? null : reader.GetFieldValue<DateTimeOffset>(9),
|
||||
RevokedBy = reader.IsDBNull(10) ? null : reader.GetString(10),
|
||||
RevokedAt = GetNullableDateTimeOffset(reader, 9),
|
||||
RevokedBy = GetNullableString(reader, 10),
|
||||
Metadata = reader.GetString(11)
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL repository for refresh token operations.
|
||||
/// </summary>
|
||||
public sealed class RefreshTokenRepository : RepositoryBase<AuthorityDataSource>, IRefreshTokenRepository
|
||||
{
|
||||
public RefreshTokenRepository(AuthorityDataSource dataSource) : base(dataSource) { }
|
||||
public RefreshTokenRepository(AuthorityDataSource dataSource, ILogger<RefreshTokenRepository> logger)
|
||||
: base(dataSource, logger) { }
|
||||
|
||||
public async Task<RefreshTokenEntity?> GetByIdAsync(string tenantId, Guid id, CancellationToken cancellationToken = default)
|
||||
{
|
||||
@@ -132,9 +143,9 @@ public sealed class RefreshTokenRepository : RepositoryBase<AuthorityDataSource>
|
||||
FROM authority.refresh_tokens
|
||||
WHERE tenant_id = @tenant_id AND id = @id
|
||||
""";
|
||||
return await QuerySingleOrDefaultAsync(tenantId, sql, MapRefreshToken,
|
||||
cmd => { cmd.Parameters.AddWithValue("id", id); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
return await QuerySingleOrDefaultAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "id", id); },
|
||||
MapRefreshToken, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<RefreshTokenEntity?> GetByHashAsync(string tokenHash, CancellationToken cancellationToken = default)
|
||||
@@ -145,9 +156,8 @@ public sealed class RefreshTokenRepository : RepositoryBase<AuthorityDataSource>
|
||||
WHERE token_hash = @token_hash AND revoked_at IS NULL AND expires_at > NOW()
|
||||
""";
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = connection.CreateCommand();
|
||||
command.CommandText = sql;
|
||||
command.Parameters.AddWithValue("token_hash", tokenHash);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "token_hash", tokenHash);
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
return await reader.ReadAsync(cancellationToken).ConfigureAwait(false) ? MapRefreshToken(reader) : null;
|
||||
}
|
||||
@@ -160,9 +170,9 @@ public sealed class RefreshTokenRepository : RepositoryBase<AuthorityDataSource>
|
||||
WHERE tenant_id = @tenant_id AND user_id = @user_id AND revoked_at IS NULL
|
||||
ORDER BY issued_at DESC
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql, MapRefreshToken,
|
||||
cmd => { cmd.Parameters.AddWithValue("user_id", userId); },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
return await QueryAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "user_id", userId); },
|
||||
MapRefreshToken, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<Guid> CreateAsync(string tenantId, RefreshTokenEntity token, CancellationToken cancellationToken = default)
|
||||
@@ -173,16 +183,17 @@ public sealed class RefreshTokenRepository : RepositoryBase<AuthorityDataSource>
|
||||
RETURNING id
|
||||
""";
|
||||
var id = token.Id == Guid.Empty ? Guid.NewGuid() : token.Id;
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("id", id);
|
||||
cmd.Parameters.AddWithValue("user_id", token.UserId);
|
||||
cmd.Parameters.AddWithValue("token_hash", token.TokenHash);
|
||||
AddNullableParameter(cmd, "access_token_id", token.AccessTokenId);
|
||||
AddNullableParameter(cmd, "client_id", token.ClientId);
|
||||
cmd.Parameters.AddWithValue("expires_at", token.ExpiresAt);
|
||||
AddJsonbParameter(cmd, "metadata", token.Metadata);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "id", id);
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "user_id", token.UserId);
|
||||
AddParameter(command, "token_hash", token.TokenHash);
|
||||
AddParameter(command, "access_token_id", token.AccessTokenId);
|
||||
AddParameter(command, "client_id", token.ClientId);
|
||||
AddParameter(command, "expires_at", token.ExpiresAt);
|
||||
AddJsonbParameter(command, "metadata", token.Metadata);
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
return id;
|
||||
}
|
||||
|
||||
@@ -194,9 +205,10 @@ public sealed class RefreshTokenRepository : RepositoryBase<AuthorityDataSource>
|
||||
""";
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("id", id);
|
||||
cmd.Parameters.AddWithValue("revoked_by", revokedBy);
|
||||
AddNullableParameter(cmd, "replaced_by", replacedBy);
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "id", id);
|
||||
AddParameter(cmd, "revoked_by", revokedBy);
|
||||
AddParameter(cmd, "replaced_by", replacedBy);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
@@ -208,8 +220,9 @@ public sealed class RefreshTokenRepository : RepositoryBase<AuthorityDataSource>
|
||||
""";
|
||||
await ExecuteAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
cmd.Parameters.AddWithValue("user_id", userId);
|
||||
cmd.Parameters.AddWithValue("revoked_by", revokedBy);
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "user_id", userId);
|
||||
AddParameter(cmd, "revoked_by", revokedBy);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
@@ -217,24 +230,23 @@ public sealed class RefreshTokenRepository : RepositoryBase<AuthorityDataSource>
|
||||
{
|
||||
const string sql = "DELETE FROM authority.refresh_tokens WHERE expires_at < NOW() - INTERVAL '30 days'";
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = connection.CreateCommand();
|
||||
command.CommandText = sql;
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private static RefreshTokenEntity MapRefreshToken(System.Data.Common.DbDataReader reader) => new()
|
||||
private static RefreshTokenEntity MapRefreshToken(NpgsqlDataReader reader) => new()
|
||||
{
|
||||
Id = reader.GetGuid(0),
|
||||
TenantId = reader.GetString(1),
|
||||
UserId = reader.GetGuid(2),
|
||||
TokenHash = reader.GetString(3),
|
||||
AccessTokenId = reader.IsDBNull(4) ? null : reader.GetGuid(4),
|
||||
ClientId = reader.IsDBNull(5) ? null : reader.GetString(5),
|
||||
AccessTokenId = GetNullableGuid(reader, 4),
|
||||
ClientId = GetNullableString(reader, 5),
|
||||
IssuedAt = reader.GetFieldValue<DateTimeOffset>(6),
|
||||
ExpiresAt = reader.GetFieldValue<DateTimeOffset>(7),
|
||||
RevokedAt = reader.IsDBNull(8) ? null : reader.GetFieldValue<DateTimeOffset>(8),
|
||||
RevokedBy = reader.IsDBNull(9) ? null : reader.GetString(9),
|
||||
ReplacedBy = reader.IsDBNull(10) ? null : reader.GetGuid(10),
|
||||
RevokedAt = GetNullableDateTimeOffset(reader, 8),
|
||||
RevokedBy = GetNullableString(reader, 9),
|
||||
ReplacedBy = GetNullableGuid(reader, 10),
|
||||
Metadata = reader.GetString(11)
|
||||
};
|
||||
}
|
||||
|
||||
@@ -29,6 +29,13 @@ public static class ServiceCollectionExtensions
|
||||
// Register repositories
|
||||
services.AddScoped<ITenantRepository, TenantRepository>();
|
||||
services.AddScoped<IUserRepository, UserRepository>();
|
||||
services.AddScoped<IRoleRepository, RoleRepository>();
|
||||
services.AddScoped<IPermissionRepository, PermissionRepository>();
|
||||
services.AddScoped<ITokenRepository, TokenRepository>();
|
||||
services.AddScoped<IRefreshTokenRepository, RefreshTokenRepository>();
|
||||
services.AddScoped<IApiKeyRepository, ApiKeyRepository>();
|
||||
services.AddScoped<ISessionRepository, SessionRepository>();
|
||||
services.AddScoped<IAuditRepository, AuditRepository>();
|
||||
|
||||
return services;
|
||||
}
|
||||
@@ -49,6 +56,13 @@ public static class ServiceCollectionExtensions
|
||||
// Register repositories
|
||||
services.AddScoped<ITenantRepository, TenantRepository>();
|
||||
services.AddScoped<IUserRepository, UserRepository>();
|
||||
services.AddScoped<IRoleRepository, RoleRepository>();
|
||||
services.AddScoped<IPermissionRepository, PermissionRepository>();
|
||||
services.AddScoped<ITokenRepository, TokenRepository>();
|
||||
services.AddScoped<IRefreshTokenRepository, RefreshTokenRepository>();
|
||||
services.AddScoped<IApiKeyRepository, ApiKeyRepository>();
|
||||
services.AddScoped<ISessionRepository, SessionRepository>();
|
||||
services.AddScoped<IAuditRepository, AuditRepository>();
|
||||
|
||||
return services;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,167 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Authority.Storage.Postgres.Models;
|
||||
using StellaOps.Authority.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Authority.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(AuthorityPostgresCollection.Name)]
|
||||
public sealed class ApiKeyRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly AuthorityPostgresFixture _fixture;
|
||||
private readonly ApiKeyRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public ApiKeyRepositoryTests(AuthorityPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new AuthorityDataSource(Options.Create(options), NullLogger<AuthorityDataSource>.Instance);
|
||||
_repository = new ApiKeyRepository(dataSource, NullLogger<ApiKeyRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task CreateAndGetByPrefix_RoundTripsApiKey()
|
||||
{
|
||||
// Arrange
|
||||
var keyPrefix = "sk_live_" + Guid.NewGuid().ToString("N")[..8];
|
||||
var apiKey = new ApiKeyEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
UserId = Guid.NewGuid(),
|
||||
Name = "CI/CD Key",
|
||||
KeyHash = "sha256_key_" + Guid.NewGuid().ToString("N"),
|
||||
KeyPrefix = keyPrefix,
|
||||
Scopes = ["scan:read", "scan:write"],
|
||||
Status = ApiKeyStatus.Active,
|
||||
ExpiresAt = DateTimeOffset.UtcNow.AddYears(1)
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(_tenantId, apiKey);
|
||||
var fetched = await _repository.GetByPrefixAsync(keyPrefix);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(apiKey.Id);
|
||||
fetched.Name.Should().Be("CI/CD Key");
|
||||
fetched.Scopes.Should().BeEquivalentTo(["scan:read", "scan:write"]);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetById_ReturnsApiKey()
|
||||
{
|
||||
// Arrange
|
||||
var apiKey = CreateApiKey(Guid.NewGuid(), "Test Key");
|
||||
await _repository.CreateAsync(_tenantId, apiKey);
|
||||
|
||||
// Act
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, apiKey.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Name.Should().Be("Test Key");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByUserId_ReturnsUserApiKeys()
|
||||
{
|
||||
// Arrange
|
||||
var userId = Guid.NewGuid();
|
||||
var key1 = CreateApiKey(userId, "Key 1");
|
||||
var key2 = CreateApiKey(userId, "Key 2");
|
||||
await _repository.CreateAsync(_tenantId, key1);
|
||||
await _repository.CreateAsync(_tenantId, key2);
|
||||
|
||||
// Act
|
||||
var keys = await _repository.GetByUserIdAsync(_tenantId, userId);
|
||||
|
||||
// Assert
|
||||
keys.Should().HaveCount(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task List_ReturnsAllKeysForTenant()
|
||||
{
|
||||
// Arrange
|
||||
var key1 = CreateApiKey(Guid.NewGuid(), "Key A");
|
||||
var key2 = CreateApiKey(Guid.NewGuid(), "Key B");
|
||||
await _repository.CreateAsync(_tenantId, key1);
|
||||
await _repository.CreateAsync(_tenantId, key2);
|
||||
|
||||
// Act
|
||||
var keys = await _repository.ListAsync(_tenantId);
|
||||
|
||||
// Assert
|
||||
keys.Should().HaveCount(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Revoke_UpdatesStatusAndRevokedFields()
|
||||
{
|
||||
// Arrange
|
||||
var apiKey = CreateApiKey(Guid.NewGuid(), "ToRevoke");
|
||||
await _repository.CreateAsync(_tenantId, apiKey);
|
||||
|
||||
// Act
|
||||
await _repository.RevokeAsync(_tenantId, apiKey.Id, "security@test.com");
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, apiKey.Id);
|
||||
|
||||
// Assert
|
||||
fetched!.Status.Should().Be(ApiKeyStatus.Revoked);
|
||||
fetched.RevokedAt.Should().NotBeNull();
|
||||
fetched.RevokedBy.Should().Be("security@test.com");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task UpdateLastUsed_SetsLastUsedAt()
|
||||
{
|
||||
// Arrange
|
||||
var apiKey = CreateApiKey(Guid.NewGuid(), "Usage Test");
|
||||
await _repository.CreateAsync(_tenantId, apiKey);
|
||||
|
||||
// Act
|
||||
await _repository.UpdateLastUsedAsync(_tenantId, apiKey.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, apiKey.Id);
|
||||
|
||||
// Assert
|
||||
fetched!.LastUsedAt.Should().NotBeNull();
|
||||
fetched.LastUsedAt.Should().BeCloseTo(DateTimeOffset.UtcNow, TimeSpan.FromSeconds(5));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_RemovesApiKey()
|
||||
{
|
||||
// Arrange
|
||||
var apiKey = CreateApiKey(Guid.NewGuid(), "ToDelete");
|
||||
await _repository.CreateAsync(_tenantId, apiKey);
|
||||
|
||||
// Act
|
||||
await _repository.DeleteAsync(_tenantId, apiKey.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, apiKey.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().BeNull();
|
||||
}
|
||||
|
||||
private ApiKeyEntity CreateApiKey(Guid userId, string name) => new()
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
UserId = userId,
|
||||
Name = name,
|
||||
KeyHash = $"sha256_{Guid.NewGuid():N}",
|
||||
KeyPrefix = $"sk_test_{Guid.NewGuid():N}"[..16],
|
||||
Scopes = ["read"],
|
||||
Status = ApiKeyStatus.Active,
|
||||
ExpiresAt = DateTimeOffset.UtcNow.AddYears(1)
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,192 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Authority.Storage.Postgres.Models;
|
||||
using StellaOps.Authority.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Authority.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(AuthorityPostgresCollection.Name)]
|
||||
public sealed class AuditRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly AuthorityPostgresFixture _fixture;
|
||||
private readonly AuditRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public AuditRepositoryTests(AuthorityPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new AuthorityDataSource(Options.Create(options), NullLogger<AuthorityDataSource>.Instance);
|
||||
_repository = new AuditRepository(dataSource, NullLogger<AuditRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task Create_ReturnsGeneratedId()
|
||||
{
|
||||
// Arrange
|
||||
var audit = new AuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
UserId = Guid.NewGuid(),
|
||||
Action = "user.login",
|
||||
ResourceType = "user",
|
||||
ResourceId = Guid.NewGuid().ToString(),
|
||||
IpAddress = "192.168.1.1",
|
||||
UserAgent = "Mozilla/5.0",
|
||||
CorrelationId = Guid.NewGuid().ToString()
|
||||
};
|
||||
|
||||
// Act
|
||||
var id = await _repository.CreateAsync(_tenantId, audit);
|
||||
|
||||
// Assert
|
||||
id.Should().BeGreaterThan(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task List_ReturnsAuditEntriesOrderedByCreatedAtDesc()
|
||||
{
|
||||
// Arrange
|
||||
var audit1 = CreateAudit("action1");
|
||||
var audit2 = CreateAudit("action2");
|
||||
await _repository.CreateAsync(_tenantId, audit1);
|
||||
await Task.Delay(10); // Ensure different timestamps
|
||||
await _repository.CreateAsync(_tenantId, audit2);
|
||||
|
||||
// Act
|
||||
var audits = await _repository.ListAsync(_tenantId, limit: 10);
|
||||
|
||||
// Assert
|
||||
audits.Should().HaveCount(2);
|
||||
audits[0].Action.Should().Be("action2"); // Most recent first
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByUserId_ReturnsUserAudits()
|
||||
{
|
||||
// Arrange
|
||||
var userId = Guid.NewGuid();
|
||||
var audit = new AuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
UserId = userId,
|
||||
Action = "user.action",
|
||||
ResourceType = "test"
|
||||
};
|
||||
await _repository.CreateAsync(_tenantId, audit);
|
||||
|
||||
// Act
|
||||
var audits = await _repository.GetByUserIdAsync(_tenantId, userId);
|
||||
|
||||
// Assert
|
||||
audits.Should().HaveCount(1);
|
||||
audits[0].UserId.Should().Be(userId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByResource_ReturnsResourceAudits()
|
||||
{
|
||||
// Arrange
|
||||
var resourceId = Guid.NewGuid().ToString();
|
||||
var audit = new AuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
Action = "resource.update",
|
||||
ResourceType = "role",
|
||||
ResourceId = resourceId
|
||||
};
|
||||
await _repository.CreateAsync(_tenantId, audit);
|
||||
|
||||
// Act
|
||||
var audits = await _repository.GetByResourceAsync(_tenantId, "role", resourceId);
|
||||
|
||||
// Assert
|
||||
audits.Should().HaveCount(1);
|
||||
audits[0].ResourceId.Should().Be(resourceId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByCorrelationId_ReturnsCorrelatedAudits()
|
||||
{
|
||||
// Arrange
|
||||
var correlationId = Guid.NewGuid().ToString();
|
||||
var audit1 = new AuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
Action = "step1",
|
||||
ResourceType = "test",
|
||||
CorrelationId = correlationId
|
||||
};
|
||||
var audit2 = new AuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
Action = "step2",
|
||||
ResourceType = "test",
|
||||
CorrelationId = correlationId
|
||||
};
|
||||
await _repository.CreateAsync(_tenantId, audit1);
|
||||
await _repository.CreateAsync(_tenantId, audit2);
|
||||
|
||||
// Act
|
||||
var audits = await _repository.GetByCorrelationIdAsync(_tenantId, correlationId);
|
||||
|
||||
// Assert
|
||||
audits.Should().HaveCount(2);
|
||||
audits.Should().AllSatisfy(a => a.CorrelationId.Should().Be(correlationId));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByAction_ReturnsMatchingAudits()
|
||||
{
|
||||
// Arrange
|
||||
await _repository.CreateAsync(_tenantId, CreateAudit("user.login"));
|
||||
await _repository.CreateAsync(_tenantId, CreateAudit("user.logout"));
|
||||
await _repository.CreateAsync(_tenantId, CreateAudit("user.login"));
|
||||
|
||||
// Act
|
||||
var audits = await _repository.GetByActionAsync(_tenantId, "user.login");
|
||||
|
||||
// Assert
|
||||
audits.Should().HaveCount(2);
|
||||
audits.Should().AllSatisfy(a => a.Action.Should().Be("user.login"));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Create_StoresJsonbValues()
|
||||
{
|
||||
// Arrange
|
||||
var audit = new AuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
Action = "config.update",
|
||||
ResourceType = "config",
|
||||
OldValue = "{\"setting\": \"old\"}",
|
||||
NewValue = "{\"setting\": \"new\"}"
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(_tenantId, audit);
|
||||
var audits = await _repository.GetByActionAsync(_tenantId, "config.update");
|
||||
|
||||
// Assert
|
||||
audits.Should().HaveCount(1);
|
||||
audits[0].OldValue.Should().Contain("old");
|
||||
audits[0].NewValue.Should().Contain("new");
|
||||
}
|
||||
|
||||
private AuditEntity CreateAudit(string action) => new()
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
UserId = Guid.NewGuid(),
|
||||
Action = action,
|
||||
ResourceType = "test",
|
||||
ResourceId = Guid.NewGuid().ToString()
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,133 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Authority.Storage.Postgres.Models;
|
||||
using StellaOps.Authority.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Authority.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(AuthorityPostgresCollection.Name)]
|
||||
public sealed class PermissionRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly AuthorityPostgresFixture _fixture;
|
||||
private readonly PermissionRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public PermissionRepositoryTests(AuthorityPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new AuthorityDataSource(Options.Create(options), NullLogger<AuthorityDataSource>.Instance);
|
||||
_repository = new PermissionRepository(dataSource, NullLogger<PermissionRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task CreateAndGet_RoundTripsPermission()
|
||||
{
|
||||
// Arrange
|
||||
var permission = new PermissionEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "users:read",
|
||||
Resource = "users",
|
||||
Action = "read",
|
||||
Description = "Read user data"
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(_tenantId, permission);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, permission.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Name.Should().Be("users:read");
|
||||
fetched.Resource.Should().Be("users");
|
||||
fetched.Action.Should().Be("read");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByName_ReturnsCorrectPermission()
|
||||
{
|
||||
// Arrange
|
||||
var permission = new PermissionEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "roles:write",
|
||||
Resource = "roles",
|
||||
Action = "write"
|
||||
};
|
||||
await _repository.CreateAsync(_tenantId, permission);
|
||||
|
||||
// Act
|
||||
var fetched = await _repository.GetByNameAsync(_tenantId, "roles:write");
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(permission.Id);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task List_ReturnsAllPermissionsForTenant()
|
||||
{
|
||||
// Arrange
|
||||
var perm1 = new PermissionEntity { Id = Guid.NewGuid(), TenantId = _tenantId, Name = "p1", Resource = "r1", Action = "a1" };
|
||||
var perm2 = new PermissionEntity { Id = Guid.NewGuid(), TenantId = _tenantId, Name = "p2", Resource = "r2", Action = "a2" };
|
||||
await _repository.CreateAsync(_tenantId, perm1);
|
||||
await _repository.CreateAsync(_tenantId, perm2);
|
||||
|
||||
// Act
|
||||
var permissions = await _repository.ListAsync(_tenantId);
|
||||
|
||||
// Assert
|
||||
permissions.Should().HaveCount(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByResource_ReturnsResourcePermissions()
|
||||
{
|
||||
// Arrange
|
||||
var perm1 = new PermissionEntity { Id = Guid.NewGuid(), TenantId = _tenantId, Name = "scans:read", Resource = "scans", Action = "read" };
|
||||
var perm2 = new PermissionEntity { Id = Guid.NewGuid(), TenantId = _tenantId, Name = "scans:write", Resource = "scans", Action = "write" };
|
||||
var perm3 = new PermissionEntity { Id = Guid.NewGuid(), TenantId = _tenantId, Name = "users:read", Resource = "users", Action = "read" };
|
||||
await _repository.CreateAsync(_tenantId, perm1);
|
||||
await _repository.CreateAsync(_tenantId, perm2);
|
||||
await _repository.CreateAsync(_tenantId, perm3);
|
||||
|
||||
// Act
|
||||
var permissions = await _repository.GetByResourceAsync(_tenantId, "scans");
|
||||
|
||||
// Assert
|
||||
permissions.Should().HaveCount(2);
|
||||
permissions.Should().AllSatisfy(p => p.Resource.Should().Be("scans"));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_RemovesPermission()
|
||||
{
|
||||
// Arrange
|
||||
var permission = new PermissionEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "temp:delete",
|
||||
Resource = "temp",
|
||||
Action = "delete"
|
||||
};
|
||||
await _repository.CreateAsync(_tenantId, permission);
|
||||
|
||||
// Act
|
||||
await _repository.DeleteAsync(_tenantId, permission.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, permission.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().BeNull();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,148 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Authority.Storage.Postgres.Models;
|
||||
using StellaOps.Authority.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Authority.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(AuthorityPostgresCollection.Name)]
|
||||
public sealed class RefreshTokenRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly AuthorityPostgresFixture _fixture;
|
||||
private readonly RefreshTokenRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public RefreshTokenRepositoryTests(AuthorityPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new AuthorityDataSource(Options.Create(options), NullLogger<AuthorityDataSource>.Instance);
|
||||
_repository = new RefreshTokenRepository(dataSource, NullLogger<RefreshTokenRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task CreateAndGetByHash_RoundTripsRefreshToken()
|
||||
{
|
||||
// Arrange
|
||||
var token = new RefreshTokenEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
UserId = Guid.NewGuid(),
|
||||
TokenHash = "refresh_hash_" + Guid.NewGuid().ToString("N"),
|
||||
AccessTokenId = Guid.NewGuid(),
|
||||
ClientId = "web-app",
|
||||
IssuedAt = DateTimeOffset.UtcNow,
|
||||
ExpiresAt = DateTimeOffset.UtcNow.AddDays(30)
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(_tenantId, token);
|
||||
var fetched = await _repository.GetByHashAsync(token.TokenHash);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(token.Id);
|
||||
fetched.ClientId.Should().Be("web-app");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetById_ReturnsToken()
|
||||
{
|
||||
// Arrange
|
||||
var token = CreateRefreshToken(Guid.NewGuid());
|
||||
await _repository.CreateAsync(_tenantId, token);
|
||||
|
||||
// Act
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, token.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(token.Id);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByUserId_ReturnsUserTokens()
|
||||
{
|
||||
// Arrange
|
||||
var userId = Guid.NewGuid();
|
||||
var token1 = CreateRefreshToken(userId);
|
||||
var token2 = CreateRefreshToken(userId);
|
||||
await _repository.CreateAsync(_tenantId, token1);
|
||||
await _repository.CreateAsync(_tenantId, token2);
|
||||
|
||||
// Act
|
||||
var tokens = await _repository.GetByUserIdAsync(_tenantId, userId);
|
||||
|
||||
// Assert
|
||||
tokens.Should().HaveCount(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Revoke_SetsRevokedFields()
|
||||
{
|
||||
// Arrange
|
||||
var token = CreateRefreshToken(Guid.NewGuid());
|
||||
await _repository.CreateAsync(_tenantId, token);
|
||||
|
||||
// Act
|
||||
await _repository.RevokeAsync(_tenantId, token.Id, "admin@test.com", null);
|
||||
var fetched = await _repository.GetByHashAsync(token.TokenHash);
|
||||
|
||||
// Assert
|
||||
fetched!.RevokedAt.Should().NotBeNull();
|
||||
fetched.RevokedBy.Should().Be("admin@test.com");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Revoke_WithReplacedBy_SetsReplacedByField()
|
||||
{
|
||||
// Arrange
|
||||
var token = CreateRefreshToken(Guid.NewGuid());
|
||||
await _repository.CreateAsync(_tenantId, token);
|
||||
var newTokenId = Guid.NewGuid();
|
||||
|
||||
// Act
|
||||
await _repository.RevokeAsync(_tenantId, token.Id, "rotation", newTokenId);
|
||||
var fetched = await _repository.GetByHashAsync(token.TokenHash);
|
||||
|
||||
// Assert
|
||||
fetched!.RevokedAt.Should().NotBeNull();
|
||||
fetched.ReplacedBy.Should().Be(newTokenId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task RevokeByUserId_RevokesAllUserTokens()
|
||||
{
|
||||
// Arrange
|
||||
var userId = Guid.NewGuid();
|
||||
var token1 = CreateRefreshToken(userId);
|
||||
var token2 = CreateRefreshToken(userId);
|
||||
await _repository.CreateAsync(_tenantId, token1);
|
||||
await _repository.CreateAsync(_tenantId, token2);
|
||||
|
||||
// Act
|
||||
await _repository.RevokeByUserIdAsync(_tenantId, userId, "security_action");
|
||||
var tokens = await _repository.GetByUserIdAsync(_tenantId, userId);
|
||||
|
||||
// Assert
|
||||
tokens.Should().AllSatisfy(t => t.RevokedAt.Should().NotBeNull());
|
||||
}
|
||||
|
||||
private RefreshTokenEntity CreateRefreshToken(Guid userId) => new()
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
UserId = userId,
|
||||
TokenHash = $"refresh_{Guid.NewGuid():N}",
|
||||
IssuedAt = DateTimeOffset.UtcNow,
|
||||
ExpiresAt = DateTimeOffset.UtcNow.AddDays(30)
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,140 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Authority.Storage.Postgres.Models;
|
||||
using StellaOps.Authority.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Authority.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(AuthorityPostgresCollection.Name)]
|
||||
public sealed class RoleRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly AuthorityPostgresFixture _fixture;
|
||||
private readonly RoleRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public RoleRepositoryTests(AuthorityPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new AuthorityDataSource(Options.Create(options), NullLogger<AuthorityDataSource>.Instance);
|
||||
_repository = new RoleRepository(dataSource, NullLogger<RoleRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task CreateAndGet_RoundTripsRole()
|
||||
{
|
||||
// Arrange
|
||||
var role = new RoleEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "admin",
|
||||
DisplayName = "Administrator",
|
||||
Description = "Full system access",
|
||||
IsSystem = true,
|
||||
Metadata = "{\"level\": 1}"
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(_tenantId, role);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, role.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(role.Id);
|
||||
fetched.Name.Should().Be("admin");
|
||||
fetched.DisplayName.Should().Be("Administrator");
|
||||
fetched.IsSystem.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByName_ReturnsCorrectRole()
|
||||
{
|
||||
// Arrange
|
||||
var role = new RoleEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "viewer",
|
||||
DisplayName = "Viewer",
|
||||
Description = "Read-only access"
|
||||
};
|
||||
await _repository.CreateAsync(_tenantId, role);
|
||||
|
||||
// Act
|
||||
var fetched = await _repository.GetByNameAsync(_tenantId, "viewer");
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(role.Id);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task List_ReturnsAllRolesForTenant()
|
||||
{
|
||||
// Arrange
|
||||
var role1 = new RoleEntity { Id = Guid.NewGuid(), TenantId = _tenantId, Name = "role1" };
|
||||
var role2 = new RoleEntity { Id = Guid.NewGuid(), TenantId = _tenantId, Name = "role2" };
|
||||
await _repository.CreateAsync(_tenantId, role1);
|
||||
await _repository.CreateAsync(_tenantId, role2);
|
||||
|
||||
// Act
|
||||
var roles = await _repository.ListAsync(_tenantId);
|
||||
|
||||
// Assert
|
||||
roles.Should().HaveCount(2);
|
||||
roles.Select(r => r.Name).Should().Contain(["role1", "role2"]);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Update_ModifiesRole()
|
||||
{
|
||||
// Arrange
|
||||
var role = new RoleEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "editor",
|
||||
DisplayName = "Editor"
|
||||
};
|
||||
await _repository.CreateAsync(_tenantId, role);
|
||||
|
||||
// Act
|
||||
var updated = new RoleEntity
|
||||
{
|
||||
Id = role.Id,
|
||||
TenantId = _tenantId,
|
||||
Name = "editor",
|
||||
DisplayName = "Content Editor",
|
||||
Description = "Updated description"
|
||||
};
|
||||
await _repository.UpdateAsync(_tenantId, updated);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, role.Id);
|
||||
|
||||
// Assert
|
||||
fetched!.DisplayName.Should().Be("Content Editor");
|
||||
fetched.Description.Should().Be("Updated description");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_RemovesRole()
|
||||
{
|
||||
// Arrange
|
||||
var role = new RoleEntity { Id = Guid.NewGuid(), TenantId = _tenantId, Name = "temp" };
|
||||
await _repository.CreateAsync(_tenantId, role);
|
||||
|
||||
// Act
|
||||
await _repository.DeleteAsync(_tenantId, role.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, role.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().BeNull();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,179 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Authority.Storage.Postgres.Models;
|
||||
using StellaOps.Authority.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Authority.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(AuthorityPostgresCollection.Name)]
|
||||
public sealed class SessionRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly AuthorityPostgresFixture _fixture;
|
||||
private readonly SessionRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public SessionRepositoryTests(AuthorityPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new AuthorityDataSource(Options.Create(options), NullLogger<AuthorityDataSource>.Instance);
|
||||
_repository = new SessionRepository(dataSource, NullLogger<SessionRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task CreateAndGet_RoundTripsSession()
|
||||
{
|
||||
// Arrange
|
||||
var session = new SessionEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
UserId = Guid.NewGuid(),
|
||||
SessionTokenHash = "session_hash_" + Guid.NewGuid().ToString("N"),
|
||||
IpAddress = "192.168.1.1",
|
||||
UserAgent = "Mozilla/5.0",
|
||||
StartedAt = DateTimeOffset.UtcNow,
|
||||
LastActivityAt = DateTimeOffset.UtcNow,
|
||||
ExpiresAt = DateTimeOffset.UtcNow.AddDays(7)
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(_tenantId, session);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, session.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(session.Id);
|
||||
fetched.IpAddress.Should().Be("192.168.1.1");
|
||||
fetched.UserAgent.Should().Be("Mozilla/5.0");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByTokenHash_ReturnsSession()
|
||||
{
|
||||
// Arrange
|
||||
var tokenHash = "lookup_hash_" + Guid.NewGuid().ToString("N");
|
||||
var session = new SessionEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
UserId = Guid.NewGuid(),
|
||||
SessionTokenHash = tokenHash,
|
||||
StartedAt = DateTimeOffset.UtcNow,
|
||||
LastActivityAt = DateTimeOffset.UtcNow,
|
||||
ExpiresAt = DateTimeOffset.UtcNow.AddDays(7)
|
||||
};
|
||||
await _repository.CreateAsync(_tenantId, session);
|
||||
|
||||
// Act
|
||||
var fetched = await _repository.GetByTokenHashAsync(tokenHash);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(session.Id);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByUserId_WithActiveOnly_ReturnsOnlyActiveSessions()
|
||||
{
|
||||
// Arrange
|
||||
var userId = Guid.NewGuid();
|
||||
var activeSession = CreateSession(userId);
|
||||
var endedSession = new SessionEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
UserId = userId,
|
||||
SessionTokenHash = "ended_" + Guid.NewGuid().ToString("N"),
|
||||
StartedAt = DateTimeOffset.UtcNow.AddHours(-2),
|
||||
LastActivityAt = DateTimeOffset.UtcNow.AddHours(-1),
|
||||
ExpiresAt = DateTimeOffset.UtcNow.AddDays(7),
|
||||
EndedAt = DateTimeOffset.UtcNow,
|
||||
EndReason = "logout"
|
||||
};
|
||||
|
||||
await _repository.CreateAsync(_tenantId, activeSession);
|
||||
await _repository.CreateAsync(_tenantId, endedSession);
|
||||
|
||||
// Act
|
||||
var activeSessions = await _repository.GetByUserIdAsync(_tenantId, userId, activeOnly: true);
|
||||
var allSessions = await _repository.GetByUserIdAsync(_tenantId, userId, activeOnly: false);
|
||||
|
||||
// Assert
|
||||
activeSessions.Should().HaveCount(1);
|
||||
allSessions.Should().HaveCount(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task UpdateLastActivity_UpdatesTimestamp()
|
||||
{
|
||||
// Arrange
|
||||
var session = CreateSession(Guid.NewGuid());
|
||||
await _repository.CreateAsync(_tenantId, session);
|
||||
|
||||
// Act
|
||||
await Task.Delay(100); // Ensure time difference
|
||||
await _repository.UpdateLastActivityAsync(_tenantId, session.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, session.Id);
|
||||
|
||||
// Assert
|
||||
fetched!.LastActivityAt.Should().BeCloseTo(DateTimeOffset.UtcNow, TimeSpan.FromSeconds(5));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task End_SetsEndFieldsCorrectly()
|
||||
{
|
||||
// Arrange
|
||||
var session = CreateSession(Guid.NewGuid());
|
||||
await _repository.CreateAsync(_tenantId, session);
|
||||
|
||||
// Act
|
||||
await _repository.EndAsync(_tenantId, session.Id, "session_timeout");
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, session.Id);
|
||||
|
||||
// Assert
|
||||
fetched!.EndedAt.Should().NotBeNull();
|
||||
fetched.EndReason.Should().Be("session_timeout");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EndByUserId_EndsAllUserSessions()
|
||||
{
|
||||
// Arrange
|
||||
var userId = Guid.NewGuid();
|
||||
var session1 = CreateSession(userId);
|
||||
var session2 = CreateSession(userId);
|
||||
await _repository.CreateAsync(_tenantId, session1);
|
||||
await _repository.CreateAsync(_tenantId, session2);
|
||||
|
||||
// Act
|
||||
await _repository.EndByUserIdAsync(_tenantId, userId, "forced_logout");
|
||||
var sessions = await _repository.GetByUserIdAsync(_tenantId, userId, activeOnly: false);
|
||||
|
||||
// Assert
|
||||
sessions.Should().HaveCount(2);
|
||||
sessions.Should().AllSatisfy(s =>
|
||||
{
|
||||
s.EndedAt.Should().NotBeNull();
|
||||
s.EndReason.Should().Be("forced_logout");
|
||||
});
|
||||
}
|
||||
|
||||
private SessionEntity CreateSession(Guid userId) => new()
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
UserId = userId,
|
||||
SessionTokenHash = $"session_{Guid.NewGuid():N}",
|
||||
StartedAt = DateTimeOffset.UtcNow,
|
||||
LastActivityAt = DateTimeOffset.UtcNow,
|
||||
ExpiresAt = DateTimeOffset.UtcNow.AddDays(7)
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,135 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Authority.Storage.Postgres.Models;
|
||||
using StellaOps.Authority.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Authority.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(AuthorityPostgresCollection.Name)]
|
||||
public sealed class TokenRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly AuthorityPostgresFixture _fixture;
|
||||
private readonly TokenRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public TokenRepositoryTests(AuthorityPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new AuthorityDataSource(Options.Create(options), NullLogger<AuthorityDataSource>.Instance);
|
||||
_repository = new TokenRepository(dataSource, NullLogger<TokenRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task CreateAndGetByHash_RoundTripsToken()
|
||||
{
|
||||
// Arrange
|
||||
var token = new TokenEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
UserId = Guid.NewGuid(),
|
||||
TokenHash = "sha256_hash_" + Guid.NewGuid().ToString("N"),
|
||||
TokenType = TokenType.Access,
|
||||
Scopes = ["read", "write"],
|
||||
ClientId = "web-app",
|
||||
IssuedAt = DateTimeOffset.UtcNow,
|
||||
ExpiresAt = DateTimeOffset.UtcNow.AddHours(1)
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(_tenantId, token);
|
||||
var fetched = await _repository.GetByHashAsync(token.TokenHash);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(token.Id);
|
||||
fetched.TokenType.Should().Be(TokenType.Access);
|
||||
fetched.Scopes.Should().BeEquivalentTo(["read", "write"]);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetById_ReturnsToken()
|
||||
{
|
||||
// Arrange
|
||||
var token = CreateToken(Guid.NewGuid());
|
||||
await _repository.CreateAsync(_tenantId, token);
|
||||
|
||||
// Act
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, token.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(token.Id);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByUserId_ReturnsUserTokens()
|
||||
{
|
||||
// Arrange
|
||||
var userId = Guid.NewGuid();
|
||||
var token1 = CreateToken(userId);
|
||||
var token2 = CreateToken(userId);
|
||||
await _repository.CreateAsync(_tenantId, token1);
|
||||
await _repository.CreateAsync(_tenantId, token2);
|
||||
|
||||
// Act
|
||||
var tokens = await _repository.GetByUserIdAsync(_tenantId, userId);
|
||||
|
||||
// Assert
|
||||
tokens.Should().HaveCount(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Revoke_SetsRevokedFields()
|
||||
{
|
||||
// Arrange
|
||||
var token = CreateToken(Guid.NewGuid());
|
||||
await _repository.CreateAsync(_tenantId, token);
|
||||
|
||||
// Act
|
||||
await _repository.RevokeAsync(_tenantId, token.Id, "admin@test.com");
|
||||
var fetched = await _repository.GetByHashAsync(token.TokenHash);
|
||||
|
||||
// Assert
|
||||
fetched!.RevokedAt.Should().NotBeNull();
|
||||
fetched.RevokedBy.Should().Be("admin@test.com");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task RevokeByUserId_RevokesAllUserTokens()
|
||||
{
|
||||
// Arrange
|
||||
var userId = Guid.NewGuid();
|
||||
var token1 = CreateToken(userId);
|
||||
var token2 = CreateToken(userId);
|
||||
await _repository.CreateAsync(_tenantId, token1);
|
||||
await _repository.CreateAsync(_tenantId, token2);
|
||||
|
||||
// Act
|
||||
await _repository.RevokeByUserIdAsync(_tenantId, userId, "security_action");
|
||||
var tokens = await _repository.GetByUserIdAsync(_tenantId, userId);
|
||||
|
||||
// Assert
|
||||
tokens.Should().AllSatisfy(t => t.RevokedAt.Should().NotBeNull());
|
||||
}
|
||||
|
||||
private TokenEntity CreateToken(Guid userId) => new()
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
UserId = userId,
|
||||
TokenHash = $"sha256_{Guid.NewGuid():N}",
|
||||
TokenType = TokenType.Access,
|
||||
Scopes = ["read"],
|
||||
IssuedAt = DateTimeOffset.UtcNow,
|
||||
ExpiresAt = DateTimeOffset.UtcNow.AddHours(1)
|
||||
};
|
||||
}
|
||||
@@ -29,6 +29,17 @@ public static class ServiceCollectionExtensions
|
||||
// Register repositories
|
||||
services.AddScoped<IChannelRepository, ChannelRepository>();
|
||||
services.AddScoped<IDeliveryRepository, DeliveryRepository>();
|
||||
services.AddScoped<IRuleRepository, RuleRepository>();
|
||||
services.AddScoped<ITemplateRepository, TemplateRepository>();
|
||||
services.AddScoped<IDigestRepository, DigestRepository>();
|
||||
services.AddScoped<IQuietHoursRepository, QuietHoursRepository>();
|
||||
services.AddScoped<IMaintenanceWindowRepository, MaintenanceWindowRepository>();
|
||||
services.AddScoped<IEscalationPolicyRepository, EscalationPolicyRepository>();
|
||||
services.AddScoped<IEscalationStateRepository, EscalationStateRepository>();
|
||||
services.AddScoped<IOnCallScheduleRepository, OnCallScheduleRepository>();
|
||||
services.AddScoped<IInboxRepository, InboxRepository>();
|
||||
services.AddScoped<IIncidentRepository, IncidentRepository>();
|
||||
services.AddScoped<INotifyAuditRepository, NotifyAuditRepository>();
|
||||
|
||||
return services;
|
||||
}
|
||||
@@ -49,6 +60,17 @@ public static class ServiceCollectionExtensions
|
||||
// Register repositories
|
||||
services.AddScoped<IChannelRepository, ChannelRepository>();
|
||||
services.AddScoped<IDeliveryRepository, DeliveryRepository>();
|
||||
services.AddScoped<IRuleRepository, RuleRepository>();
|
||||
services.AddScoped<ITemplateRepository, TemplateRepository>();
|
||||
services.AddScoped<IDigestRepository, DigestRepository>();
|
||||
services.AddScoped<IQuietHoursRepository, QuietHoursRepository>();
|
||||
services.AddScoped<IMaintenanceWindowRepository, MaintenanceWindowRepository>();
|
||||
services.AddScoped<IEscalationPolicyRepository, EscalationPolicyRepository>();
|
||||
services.AddScoped<IEscalationStateRepository, EscalationStateRepository>();
|
||||
services.AddScoped<IOnCallScheduleRepository, OnCallScheduleRepository>();
|
||||
services.AddScoped<IInboxRepository, InboxRepository>();
|
||||
services.AddScoped<IIncidentRepository, IncidentRepository>();
|
||||
services.AddScoped<INotifyAuditRepository, NotifyAuditRepository>();
|
||||
|
||||
return services;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,204 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Notify.Storage.Postgres.Models;
|
||||
using StellaOps.Notify.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Notify.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(NotifyPostgresCollection.Name)]
|
||||
public sealed class ChannelRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly NotifyPostgresFixture _fixture;
|
||||
private readonly ChannelRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public ChannelRepositoryTests(NotifyPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new NotifyDataSource(Options.Create(options), NullLogger<NotifyDataSource>.Instance);
|
||||
_repository = new ChannelRepository(dataSource, NullLogger<ChannelRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task CreateAndGetById_RoundTripsChannel()
|
||||
{
|
||||
// Arrange
|
||||
var channel = new ChannelEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "email-primary",
|
||||
ChannelType = ChannelType.Email,
|
||||
Enabled = true,
|
||||
Config = "{\"smtpHost\": \"smtp.example.com\"}"
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(channel);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, channel.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(channel.Id);
|
||||
fetched.Name.Should().Be("email-primary");
|
||||
fetched.ChannelType.Should().Be(ChannelType.Email);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByName_ReturnsCorrectChannel()
|
||||
{
|
||||
// Arrange
|
||||
var channel = CreateChannel("slack-alerts", ChannelType.Slack);
|
||||
await _repository.CreateAsync(channel);
|
||||
|
||||
// Act
|
||||
var fetched = await _repository.GetByNameAsync(_tenantId, "slack-alerts");
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(channel.Id);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetAll_ReturnsAllChannelsForTenant()
|
||||
{
|
||||
// Arrange
|
||||
var channel1 = CreateChannel("channel1", ChannelType.Email);
|
||||
var channel2 = CreateChannel("channel2", ChannelType.Slack);
|
||||
await _repository.CreateAsync(channel1);
|
||||
await _repository.CreateAsync(channel2);
|
||||
|
||||
// Act
|
||||
var channels = await _repository.GetAllAsync(_tenantId);
|
||||
|
||||
// Assert
|
||||
channels.Should().HaveCount(2);
|
||||
channels.Select(c => c.Name).Should().Contain(["channel1", "channel2"]);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetAll_FiltersByEnabled()
|
||||
{
|
||||
// Arrange
|
||||
var enabledChannel = CreateChannel("enabled", ChannelType.Email);
|
||||
var disabledChannel = new ChannelEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "disabled",
|
||||
ChannelType = ChannelType.Email,
|
||||
Enabled = false
|
||||
};
|
||||
await _repository.CreateAsync(enabledChannel);
|
||||
await _repository.CreateAsync(disabledChannel);
|
||||
|
||||
// Act
|
||||
var enabledChannels = await _repository.GetAllAsync(_tenantId, enabled: true);
|
||||
|
||||
// Assert
|
||||
enabledChannels.Should().HaveCount(1);
|
||||
enabledChannels[0].Name.Should().Be("enabled");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetAll_FiltersByChannelType()
|
||||
{
|
||||
// Arrange
|
||||
var emailChannel = CreateChannel("email", ChannelType.Email);
|
||||
var slackChannel = CreateChannel("slack", ChannelType.Slack);
|
||||
await _repository.CreateAsync(emailChannel);
|
||||
await _repository.CreateAsync(slackChannel);
|
||||
|
||||
// Act
|
||||
var slackChannels = await _repository.GetAllAsync(_tenantId, channelType: ChannelType.Slack);
|
||||
|
||||
// Assert
|
||||
slackChannels.Should().HaveCount(1);
|
||||
slackChannels[0].Name.Should().Be("slack");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Update_ModifiesChannel()
|
||||
{
|
||||
// Arrange
|
||||
var channel = CreateChannel("update-test", ChannelType.Email);
|
||||
await _repository.CreateAsync(channel);
|
||||
|
||||
// Act
|
||||
var updated = new ChannelEntity
|
||||
{
|
||||
Id = channel.Id,
|
||||
TenantId = _tenantId,
|
||||
Name = "update-test",
|
||||
ChannelType = ChannelType.Email,
|
||||
Enabled = false,
|
||||
Config = "{\"updated\": true}"
|
||||
};
|
||||
var result = await _repository.UpdateAsync(updated);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, channel.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.Enabled.Should().BeFalse();
|
||||
fetched.Config.Should().Contain("updated");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_RemovesChannel()
|
||||
{
|
||||
// Arrange
|
||||
var channel = CreateChannel("delete-test", ChannelType.Email);
|
||||
await _repository.CreateAsync(channel);
|
||||
|
||||
// Act
|
||||
var result = await _repository.DeleteAsync(_tenantId, channel.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, channel.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetEnabledByType_ReturnsOnlyEnabledChannelsOfType()
|
||||
{
|
||||
// Arrange
|
||||
var enabledEmail = CreateChannel("enabled-email", ChannelType.Email);
|
||||
var disabledEmail = new ChannelEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "disabled-email",
|
||||
ChannelType = ChannelType.Email,
|
||||
Enabled = false
|
||||
};
|
||||
var enabledSlack = CreateChannel("enabled-slack", ChannelType.Slack);
|
||||
await _repository.CreateAsync(enabledEmail);
|
||||
await _repository.CreateAsync(disabledEmail);
|
||||
await _repository.CreateAsync(enabledSlack);
|
||||
|
||||
// Act
|
||||
var channels = await _repository.GetEnabledByTypeAsync(_tenantId, ChannelType.Email);
|
||||
|
||||
// Assert
|
||||
channels.Should().HaveCount(1);
|
||||
channels[0].Name.Should().Be("enabled-email");
|
||||
}
|
||||
|
||||
private ChannelEntity CreateChannel(string name, ChannelType type) => new()
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = name,
|
||||
ChannelType = type,
|
||||
Enabled = true
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,204 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Notify.Storage.Postgres.Models;
|
||||
using StellaOps.Notify.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Notify.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(NotifyPostgresCollection.Name)]
|
||||
public sealed class DeliveryRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly NotifyPostgresFixture _fixture;
|
||||
private readonly DeliveryRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public DeliveryRepositoryTests(NotifyPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new NotifyDataSource(Options.Create(options), NullLogger<NotifyDataSource>.Instance);
|
||||
_repository = new DeliveryRepository(dataSource, NullLogger<DeliveryRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task CreateAndGetById_RoundTripsDelivery()
|
||||
{
|
||||
// Arrange
|
||||
var delivery = CreateDelivery();
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(delivery);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, delivery.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(delivery.Id);
|
||||
fetched.Recipient.Should().Be("user@example.com");
|
||||
fetched.Status.Should().Be(DeliveryStatus.Pending);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetPending_ReturnsPendingDeliveries()
|
||||
{
|
||||
// Arrange
|
||||
var pending = CreateDelivery();
|
||||
await _repository.CreateAsync(pending);
|
||||
|
||||
// Act
|
||||
var pendingDeliveries = await _repository.GetPendingAsync(_tenantId);
|
||||
|
||||
// Assert
|
||||
pendingDeliveries.Should().HaveCount(1);
|
||||
pendingDeliveries[0].Id.Should().Be(pending.Id);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByStatus_ReturnsDeliveriesWithStatus()
|
||||
{
|
||||
// Arrange
|
||||
var delivery = CreateDelivery();
|
||||
await _repository.CreateAsync(delivery);
|
||||
|
||||
// Act
|
||||
var deliveries = await _repository.GetByStatusAsync(_tenantId, DeliveryStatus.Pending);
|
||||
|
||||
// Assert
|
||||
deliveries.Should().HaveCount(1);
|
||||
deliveries[0].Status.Should().Be(DeliveryStatus.Pending);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByCorrelationId_ReturnsCorrelatedDeliveries()
|
||||
{
|
||||
// Arrange
|
||||
var correlationId = Guid.NewGuid().ToString();
|
||||
var delivery = new DeliveryEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
ChannelId = Guid.NewGuid(),
|
||||
Recipient = "user@example.com",
|
||||
EventType = "scan.completed",
|
||||
CorrelationId = correlationId
|
||||
};
|
||||
await _repository.CreateAsync(delivery);
|
||||
|
||||
// Act
|
||||
var deliveries = await _repository.GetByCorrelationIdAsync(_tenantId, correlationId);
|
||||
|
||||
// Assert
|
||||
deliveries.Should().HaveCount(1);
|
||||
deliveries[0].CorrelationId.Should().Be(correlationId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MarkQueued_UpdatesStatus()
|
||||
{
|
||||
// Arrange
|
||||
var delivery = CreateDelivery();
|
||||
await _repository.CreateAsync(delivery);
|
||||
|
||||
// Act
|
||||
var result = await _repository.MarkQueuedAsync(_tenantId, delivery.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, delivery.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.Status.Should().Be(DeliveryStatus.Queued);
|
||||
fetched.QueuedAt.Should().NotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MarkSent_UpdatesStatusAndExternalId()
|
||||
{
|
||||
// Arrange
|
||||
var delivery = CreateDelivery();
|
||||
await _repository.CreateAsync(delivery);
|
||||
await _repository.MarkQueuedAsync(_tenantId, delivery.Id);
|
||||
|
||||
// Act
|
||||
var result = await _repository.MarkSentAsync(_tenantId, delivery.Id, "external-123");
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, delivery.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.Status.Should().Be(DeliveryStatus.Sent);
|
||||
fetched.ExternalId.Should().Be("external-123");
|
||||
fetched.SentAt.Should().NotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MarkDelivered_UpdatesStatus()
|
||||
{
|
||||
// Arrange
|
||||
var delivery = CreateDelivery();
|
||||
await _repository.CreateAsync(delivery);
|
||||
await _repository.MarkSentAsync(_tenantId, delivery.Id);
|
||||
|
||||
// Act
|
||||
var result = await _repository.MarkDeliveredAsync(_tenantId, delivery.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, delivery.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.Status.Should().Be(DeliveryStatus.Delivered);
|
||||
fetched.DeliveredAt.Should().NotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MarkFailed_UpdatesStatusAndError()
|
||||
{
|
||||
// Arrange
|
||||
var delivery = CreateDelivery();
|
||||
await _repository.CreateAsync(delivery);
|
||||
|
||||
// Act
|
||||
var result = await _repository.MarkFailedAsync(_tenantId, delivery.Id, "Connection timeout", TimeSpan.FromMinutes(5));
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, delivery.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.Status.Should().Be(DeliveryStatus.Failed);
|
||||
fetched.ErrorMessage.Should().Be("Connection timeout");
|
||||
fetched.FailedAt.Should().NotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetStats_ReturnsCorrectCounts()
|
||||
{
|
||||
// Arrange
|
||||
var delivery1 = CreateDelivery();
|
||||
var delivery2 = CreateDelivery();
|
||||
await _repository.CreateAsync(delivery1);
|
||||
await _repository.CreateAsync(delivery2);
|
||||
await _repository.MarkSentAsync(_tenantId, delivery2.Id);
|
||||
|
||||
var from = DateTimeOffset.UtcNow.AddHours(-1);
|
||||
var to = DateTimeOffset.UtcNow.AddHours(1);
|
||||
|
||||
// Act
|
||||
var stats = await _repository.GetStatsAsync(_tenantId, from, to);
|
||||
|
||||
// Assert
|
||||
stats.Total.Should().Be(2);
|
||||
stats.Pending.Should().Be(1);
|
||||
stats.Sent.Should().Be(1);
|
||||
}
|
||||
|
||||
private DeliveryEntity CreateDelivery() => new()
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
ChannelId = Guid.NewGuid(),
|
||||
Recipient = "user@example.com",
|
||||
EventType = "scan.completed",
|
||||
Status = DeliveryStatus.Pending
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,191 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Notify.Storage.Postgres.Models;
|
||||
using StellaOps.Notify.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Notify.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(NotifyPostgresCollection.Name)]
|
||||
public sealed class DigestRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly NotifyPostgresFixture _fixture;
|
||||
private readonly DigestRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public DigestRepositoryTests(NotifyPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new NotifyDataSource(Options.Create(options), NullLogger<NotifyDataSource>.Instance);
|
||||
_repository = new DigestRepository(dataSource, NullLogger<DigestRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task UpsertAndGetById_RoundTripsDigest()
|
||||
{
|
||||
// Arrange
|
||||
var digest = new DigestEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
ChannelId = Guid.NewGuid(),
|
||||
Recipient = "user@example.com",
|
||||
DigestKey = "daily-summary",
|
||||
EventCount = 0,
|
||||
Status = DigestStatus.Collecting,
|
||||
CollectUntil = DateTimeOffset.UtcNow.AddHours(1)
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.UpsertAsync(digest);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, digest.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(digest.Id);
|
||||
fetched.DigestKey.Should().Be("daily-summary");
|
||||
fetched.Status.Should().Be(DigestStatus.Collecting);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByKey_ReturnsCorrectDigest()
|
||||
{
|
||||
// Arrange
|
||||
var channelId = Guid.NewGuid();
|
||||
var digest = new DigestEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
ChannelId = channelId,
|
||||
Recipient = "user@example.com",
|
||||
DigestKey = "weekly-report",
|
||||
CollectUntil = DateTimeOffset.UtcNow.AddDays(7)
|
||||
};
|
||||
await _repository.UpsertAsync(digest);
|
||||
|
||||
// Act
|
||||
var fetched = await _repository.GetByKeyAsync(_tenantId, channelId, "user@example.com", "weekly-report");
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(digest.Id);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task AddEvent_IncrementsEventCount()
|
||||
{
|
||||
// Arrange
|
||||
var digest = CreateDigest("event-test");
|
||||
await _repository.UpsertAsync(digest);
|
||||
|
||||
// Act
|
||||
await _repository.AddEventAsync(_tenantId, digest.Id, "{\"type\": \"test\"}");
|
||||
await _repository.AddEventAsync(_tenantId, digest.Id, "{\"type\": \"test2\"}");
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, digest.Id);
|
||||
|
||||
// Assert
|
||||
fetched!.EventCount.Should().Be(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetReadyToSend_ReturnsDigestsReadyToSend()
|
||||
{
|
||||
// Arrange - One ready digest (past CollectUntil), one not ready
|
||||
var readyDigest = new DigestEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
ChannelId = Guid.NewGuid(),
|
||||
Recipient = "ready@example.com",
|
||||
DigestKey = "ready",
|
||||
Status = DigestStatus.Collecting,
|
||||
CollectUntil = DateTimeOffset.UtcNow.AddMinutes(-1)
|
||||
};
|
||||
var notReadyDigest = new DigestEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
ChannelId = Guid.NewGuid(),
|
||||
Recipient = "notready@example.com",
|
||||
DigestKey = "notready",
|
||||
Status = DigestStatus.Collecting,
|
||||
CollectUntil = DateTimeOffset.UtcNow.AddHours(1)
|
||||
};
|
||||
await _repository.UpsertAsync(readyDigest);
|
||||
await _repository.UpsertAsync(notReadyDigest);
|
||||
|
||||
// Act
|
||||
var readyDigests = await _repository.GetReadyToSendAsync();
|
||||
|
||||
// Assert
|
||||
readyDigests.Should().HaveCount(1);
|
||||
readyDigests[0].DigestKey.Should().Be("ready");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MarkSending_UpdatesStatus()
|
||||
{
|
||||
// Arrange
|
||||
var digest = CreateDigest("sending-test");
|
||||
await _repository.UpsertAsync(digest);
|
||||
|
||||
// Act
|
||||
var result = await _repository.MarkSendingAsync(_tenantId, digest.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, digest.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.Status.Should().Be(DigestStatus.Sending);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MarkSent_UpdatesStatusAndSentAt()
|
||||
{
|
||||
// Arrange
|
||||
var digest = CreateDigest("sent-test");
|
||||
await _repository.UpsertAsync(digest);
|
||||
await _repository.MarkSendingAsync(_tenantId, digest.Id);
|
||||
|
||||
// Act
|
||||
var result = await _repository.MarkSentAsync(_tenantId, digest.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, digest.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.Status.Should().Be(DigestStatus.Sent);
|
||||
fetched.SentAt.Should().NotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task DeleteOld_RemovesOldDigests()
|
||||
{
|
||||
// Arrange
|
||||
var digest = CreateDigest("old-digest");
|
||||
await _repository.UpsertAsync(digest);
|
||||
|
||||
// Act - Delete digests older than future date
|
||||
var cutoff = DateTimeOffset.UtcNow.AddMinutes(1);
|
||||
var count = await _repository.DeleteOldAsync(cutoff);
|
||||
|
||||
// Assert
|
||||
count.Should().Be(1);
|
||||
}
|
||||
|
||||
private DigestEntity CreateDigest(string key) => new()
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
ChannelId = Guid.NewGuid(),
|
||||
Recipient = "user@example.com",
|
||||
DigestKey = key,
|
||||
Status = DigestStatus.Collecting,
|
||||
CollectUntil = DateTimeOffset.UtcNow.AddHours(1)
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,208 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Notify.Storage.Postgres.Models;
|
||||
using StellaOps.Notify.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Notify.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(NotifyPostgresCollection.Name)]
|
||||
public sealed class InboxRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly NotifyPostgresFixture _fixture;
|
||||
private readonly InboxRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public InboxRepositoryTests(NotifyPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new NotifyDataSource(Options.Create(options), NullLogger<NotifyDataSource>.Instance);
|
||||
_repository = new InboxRepository(dataSource, NullLogger<InboxRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task CreateAndGetById_RoundTripsInboxItem()
|
||||
{
|
||||
// Arrange
|
||||
var userId = Guid.NewGuid();
|
||||
var inbox = new InboxEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
UserId = userId,
|
||||
Title = "New Vulnerability Found",
|
||||
Body = "Critical vulnerability CVE-2024-1234 detected",
|
||||
EventType = "vulnerability.found",
|
||||
ActionUrl = "/scans/123/vulnerabilities"
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(inbox);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, inbox.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(inbox.Id);
|
||||
fetched.Title.Should().Be("New Vulnerability Found");
|
||||
fetched.Read.Should().BeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetForUser_ReturnsUserInboxItems()
|
||||
{
|
||||
// Arrange
|
||||
var userId = Guid.NewGuid();
|
||||
var inbox1 = CreateInbox(userId, "Item 1");
|
||||
var inbox2 = CreateInbox(userId, "Item 2");
|
||||
var otherUserInbox = CreateInbox(Guid.NewGuid(), "Other user item");
|
||||
await _repository.CreateAsync(inbox1);
|
||||
await _repository.CreateAsync(inbox2);
|
||||
await _repository.CreateAsync(otherUserInbox);
|
||||
|
||||
// Act
|
||||
var items = await _repository.GetForUserAsync(_tenantId, userId);
|
||||
|
||||
// Assert
|
||||
items.Should().HaveCount(2);
|
||||
items.Select(i => i.Title).Should().Contain(["Item 1", "Item 2"]);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetForUser_FiltersUnreadOnly()
|
||||
{
|
||||
// Arrange
|
||||
var userId = Guid.NewGuid();
|
||||
var unreadItem = CreateInbox(userId, "Unread");
|
||||
var readItem = CreateInbox(userId, "Read");
|
||||
await _repository.CreateAsync(unreadItem);
|
||||
await _repository.CreateAsync(readItem);
|
||||
await _repository.MarkReadAsync(_tenantId, readItem.Id);
|
||||
|
||||
// Act
|
||||
var unreadItems = await _repository.GetForUserAsync(_tenantId, userId, unreadOnly: true);
|
||||
|
||||
// Assert
|
||||
unreadItems.Should().HaveCount(1);
|
||||
unreadItems[0].Title.Should().Be("Unread");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetUnreadCount_ReturnsCorrectCount()
|
||||
{
|
||||
// Arrange
|
||||
var userId = Guid.NewGuid();
|
||||
await _repository.CreateAsync(CreateInbox(userId, "Unread 1"));
|
||||
await _repository.CreateAsync(CreateInbox(userId, "Unread 2"));
|
||||
var readItem = CreateInbox(userId, "Read");
|
||||
await _repository.CreateAsync(readItem);
|
||||
await _repository.MarkReadAsync(_tenantId, readItem.Id);
|
||||
|
||||
// Act
|
||||
var count = await _repository.GetUnreadCountAsync(_tenantId, userId);
|
||||
|
||||
// Assert
|
||||
count.Should().Be(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MarkRead_UpdatesReadStatus()
|
||||
{
|
||||
// Arrange
|
||||
var userId = Guid.NewGuid();
|
||||
var inbox = CreateInbox(userId, "To be read");
|
||||
await _repository.CreateAsync(inbox);
|
||||
|
||||
// Act
|
||||
var result = await _repository.MarkReadAsync(_tenantId, inbox.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, inbox.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.Read.Should().BeTrue();
|
||||
fetched.ReadAt.Should().NotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MarkAllRead_MarksAllUserItemsAsRead()
|
||||
{
|
||||
// Arrange
|
||||
var userId = Guid.NewGuid();
|
||||
await _repository.CreateAsync(CreateInbox(userId, "Item 1"));
|
||||
await _repository.CreateAsync(CreateInbox(userId, "Item 2"));
|
||||
await _repository.CreateAsync(CreateInbox(userId, "Item 3"));
|
||||
|
||||
// Act
|
||||
var count = await _repository.MarkAllReadAsync(_tenantId, userId);
|
||||
var unreadCount = await _repository.GetUnreadCountAsync(_tenantId, userId);
|
||||
|
||||
// Assert
|
||||
count.Should().Be(3);
|
||||
unreadCount.Should().Be(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Archive_ArchivesItem()
|
||||
{
|
||||
// Arrange
|
||||
var userId = Guid.NewGuid();
|
||||
var inbox = CreateInbox(userId, "To be archived");
|
||||
await _repository.CreateAsync(inbox);
|
||||
|
||||
// Act
|
||||
var result = await _repository.ArchiveAsync(_tenantId, inbox.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, inbox.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.Archived.Should().BeTrue();
|
||||
fetched.ArchivedAt.Should().NotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_RemovesItem()
|
||||
{
|
||||
// Arrange
|
||||
var userId = Guid.NewGuid();
|
||||
var inbox = CreateInbox(userId, "To be deleted");
|
||||
await _repository.CreateAsync(inbox);
|
||||
|
||||
// Act
|
||||
var result = await _repository.DeleteAsync(_tenantId, inbox.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, inbox.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task DeleteOld_RemovesOldItems()
|
||||
{
|
||||
// Arrange - We can't easily set CreatedAt in the test, so this tests the API works
|
||||
var userId = Guid.NewGuid();
|
||||
await _repository.CreateAsync(CreateInbox(userId, "Recent item"));
|
||||
|
||||
// Act - Delete items older than future date (should delete the item)
|
||||
var cutoff = DateTimeOffset.UtcNow.AddMinutes(1);
|
||||
var count = await _repository.DeleteOldAsync(cutoff);
|
||||
|
||||
// Assert
|
||||
count.Should().Be(1);
|
||||
}
|
||||
|
||||
private InboxEntity CreateInbox(Guid userId, string title) => new()
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
UserId = userId,
|
||||
Title = title,
|
||||
EventType = "test.event"
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,168 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Notify.Storage.Postgres.Models;
|
||||
using StellaOps.Notify.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Notify.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(NotifyPostgresCollection.Name)]
|
||||
public sealed class NotifyAuditRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly NotifyPostgresFixture _fixture;
|
||||
private readonly NotifyAuditRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public NotifyAuditRepositoryTests(NotifyPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new NotifyDataSource(Options.Create(options), NullLogger<NotifyDataSource>.Instance);
|
||||
_repository = new NotifyAuditRepository(dataSource, NullLogger<NotifyAuditRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task Create_ReturnsGeneratedId()
|
||||
{
|
||||
// Arrange
|
||||
var audit = new NotifyAuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
UserId = Guid.NewGuid(),
|
||||
Action = "channel.created",
|
||||
ResourceType = "channel",
|
||||
ResourceId = Guid.NewGuid().ToString()
|
||||
};
|
||||
|
||||
// Act
|
||||
var id = await _repository.CreateAsync(audit);
|
||||
|
||||
// Assert
|
||||
id.Should().BeGreaterThan(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task List_ReturnsAuditEntriesOrderedByCreatedAtDesc()
|
||||
{
|
||||
// Arrange
|
||||
var audit1 = CreateAudit("action1");
|
||||
var audit2 = CreateAudit("action2");
|
||||
await _repository.CreateAsync(audit1);
|
||||
await Task.Delay(10);
|
||||
await _repository.CreateAsync(audit2);
|
||||
|
||||
// Act
|
||||
var audits = await _repository.ListAsync(_tenantId, limit: 10);
|
||||
|
||||
// Assert
|
||||
audits.Should().HaveCount(2);
|
||||
audits[0].Action.Should().Be("action2"); // Most recent first
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByResource_ReturnsResourceAudits()
|
||||
{
|
||||
// Arrange
|
||||
var resourceId = Guid.NewGuid().ToString();
|
||||
var audit = new NotifyAuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
Action = "rule.updated",
|
||||
ResourceType = "rule",
|
||||
ResourceId = resourceId
|
||||
};
|
||||
await _repository.CreateAsync(audit);
|
||||
|
||||
// Act
|
||||
var audits = await _repository.GetByResourceAsync(_tenantId, "rule", resourceId);
|
||||
|
||||
// Assert
|
||||
audits.Should().HaveCount(1);
|
||||
audits[0].ResourceId.Should().Be(resourceId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByResource_WithoutResourceId_ReturnsAllOfType()
|
||||
{
|
||||
// Arrange
|
||||
await _repository.CreateAsync(new NotifyAuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
Action = "template.created",
|
||||
ResourceType = "template",
|
||||
ResourceId = Guid.NewGuid().ToString()
|
||||
});
|
||||
await _repository.CreateAsync(new NotifyAuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
Action = "template.updated",
|
||||
ResourceType = "template",
|
||||
ResourceId = Guid.NewGuid().ToString()
|
||||
});
|
||||
|
||||
// Act
|
||||
var audits = await _repository.GetByResourceAsync(_tenantId, "template");
|
||||
|
||||
// Assert
|
||||
audits.Should().HaveCount(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByCorrelationId_ReturnsCorrelatedAudits()
|
||||
{
|
||||
// Arrange
|
||||
var correlationId = Guid.NewGuid().ToString();
|
||||
var audit1 = new NotifyAuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
Action = "step1",
|
||||
ResourceType = "delivery",
|
||||
CorrelationId = correlationId
|
||||
};
|
||||
var audit2 = new NotifyAuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
Action = "step2",
|
||||
ResourceType = "delivery",
|
||||
CorrelationId = correlationId
|
||||
};
|
||||
await _repository.CreateAsync(audit1);
|
||||
await _repository.CreateAsync(audit2);
|
||||
|
||||
// Act
|
||||
var audits = await _repository.GetByCorrelationIdAsync(_tenantId, correlationId);
|
||||
|
||||
// Assert
|
||||
audits.Should().HaveCount(2);
|
||||
audits.Should().AllSatisfy(a => a.CorrelationId.Should().Be(correlationId));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task DeleteOld_RemovesOldAudits()
|
||||
{
|
||||
// Arrange
|
||||
await _repository.CreateAsync(CreateAudit("old-action"));
|
||||
|
||||
// Act - Delete audits older than future date
|
||||
var cutoff = DateTimeOffset.UtcNow.AddMinutes(1);
|
||||
var count = await _repository.DeleteOldAsync(cutoff);
|
||||
|
||||
// Assert
|
||||
count.Should().Be(1);
|
||||
}
|
||||
|
||||
private NotifyAuditEntity CreateAudit(string action) => new()
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
UserId = Guid.NewGuid(),
|
||||
Action = action,
|
||||
ResourceType = "test",
|
||||
ResourceId = Guid.NewGuid().ToString()
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,197 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Notify.Storage.Postgres.Models;
|
||||
using StellaOps.Notify.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Notify.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(NotifyPostgresCollection.Name)]
|
||||
public sealed class RuleRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly NotifyPostgresFixture _fixture;
|
||||
private readonly RuleRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public RuleRepositoryTests(NotifyPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new NotifyDataSource(Options.Create(options), NullLogger<NotifyDataSource>.Instance);
|
||||
_repository = new RuleRepository(dataSource, NullLogger<RuleRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task CreateAndGetById_RoundTripsRule()
|
||||
{
|
||||
// Arrange
|
||||
var rule = new RuleEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "critical-alerts",
|
||||
Description = "Send critical alerts to ops team",
|
||||
Enabled = true,
|
||||
Priority = 100,
|
||||
EventTypes = ["scan.completed", "vulnerability.found"],
|
||||
ChannelIds = [Guid.NewGuid()]
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(rule);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, rule.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(rule.Id);
|
||||
fetched.Name.Should().Be("critical-alerts");
|
||||
fetched.Priority.Should().Be(100);
|
||||
fetched.EventTypes.Should().Contain(["scan.completed", "vulnerability.found"]);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByName_ReturnsCorrectRule()
|
||||
{
|
||||
// Arrange
|
||||
var rule = CreateRule("info-digest");
|
||||
await _repository.CreateAsync(rule);
|
||||
|
||||
// Act
|
||||
var fetched = await _repository.GetByNameAsync(_tenantId, "info-digest");
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(rule.Id);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task List_ReturnsAllRulesForTenant()
|
||||
{
|
||||
// Arrange
|
||||
var rule1 = CreateRule("rule1");
|
||||
var rule2 = CreateRule("rule2");
|
||||
await _repository.CreateAsync(rule1);
|
||||
await _repository.CreateAsync(rule2);
|
||||
|
||||
// Act
|
||||
var rules = await _repository.ListAsync(_tenantId);
|
||||
|
||||
// Assert
|
||||
rules.Should().HaveCount(2);
|
||||
rules.Select(r => r.Name).Should().Contain(["rule1", "rule2"]);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task List_FiltersByEnabled()
|
||||
{
|
||||
// Arrange
|
||||
var enabledRule = CreateRule("enabled");
|
||||
var disabledRule = new RuleEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "disabled",
|
||||
Enabled = false,
|
||||
EventTypes = ["test"]
|
||||
};
|
||||
await _repository.CreateAsync(enabledRule);
|
||||
await _repository.CreateAsync(disabledRule);
|
||||
|
||||
// Act
|
||||
var enabledRules = await _repository.ListAsync(_tenantId, enabled: true);
|
||||
|
||||
// Assert
|
||||
enabledRules.Should().HaveCount(1);
|
||||
enabledRules[0].Name.Should().Be("enabled");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetMatchingRules_ReturnsRulesForEventType()
|
||||
{
|
||||
// Arrange
|
||||
var scanRule = new RuleEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "scan-rule",
|
||||
Enabled = true,
|
||||
EventTypes = ["scan.completed"]
|
||||
};
|
||||
var vulnRule = new RuleEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "vuln-rule",
|
||||
Enabled = true,
|
||||
EventTypes = ["vulnerability.found"]
|
||||
};
|
||||
await _repository.CreateAsync(scanRule);
|
||||
await _repository.CreateAsync(vulnRule);
|
||||
|
||||
// Act
|
||||
var matchingRules = await _repository.GetMatchingRulesAsync(_tenantId, "scan.completed");
|
||||
|
||||
// Assert
|
||||
matchingRules.Should().HaveCount(1);
|
||||
matchingRules[0].Name.Should().Be("scan-rule");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Update_ModifiesRule()
|
||||
{
|
||||
// Arrange
|
||||
var rule = CreateRule("update-test");
|
||||
await _repository.CreateAsync(rule);
|
||||
|
||||
// Act
|
||||
var updated = new RuleEntity
|
||||
{
|
||||
Id = rule.Id,
|
||||
TenantId = _tenantId,
|
||||
Name = "update-test",
|
||||
Description = "Updated description",
|
||||
Priority = 200,
|
||||
Enabled = false,
|
||||
EventTypes = ["new.event"]
|
||||
};
|
||||
var result = await _repository.UpdateAsync(updated);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, rule.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.Description.Should().Be("Updated description");
|
||||
fetched.Priority.Should().Be(200);
|
||||
fetched.Enabled.Should().BeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_RemovesRule()
|
||||
{
|
||||
// Arrange
|
||||
var rule = CreateRule("delete-test");
|
||||
await _repository.CreateAsync(rule);
|
||||
|
||||
// Act
|
||||
var result = await _repository.DeleteAsync(_tenantId, rule.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, rule.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched.Should().BeNull();
|
||||
}
|
||||
|
||||
private RuleEntity CreateRule(string name) => new()
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = name,
|
||||
Enabled = true,
|
||||
EventTypes = ["test.event"]
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,189 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Notify.Storage.Postgres.Models;
|
||||
using StellaOps.Notify.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Notify.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(NotifyPostgresCollection.Name)]
|
||||
public sealed class TemplateRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly NotifyPostgresFixture _fixture;
|
||||
private readonly TemplateRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public TemplateRepositoryTests(NotifyPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new NotifyDataSource(Options.Create(options), NullLogger<NotifyDataSource>.Instance);
|
||||
_repository = new TemplateRepository(dataSource, NullLogger<TemplateRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task CreateAndGetById_RoundTripsTemplate()
|
||||
{
|
||||
// Arrange
|
||||
var template = new TemplateEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "scan-completed",
|
||||
ChannelType = ChannelType.Email,
|
||||
SubjectTemplate = "Scan Completed: {{imageName}}",
|
||||
BodyTemplate = "<p>Scan for {{imageName}} completed with {{vulnCount}} vulnerabilities.</p>",
|
||||
Locale = "en"
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(template);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, template.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(template.Id);
|
||||
fetched.Name.Should().Be("scan-completed");
|
||||
fetched.ChannelType.Should().Be(ChannelType.Email);
|
||||
fetched.SubjectTemplate.Should().Contain("{{imageName}}");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByName_ReturnsCorrectTemplate()
|
||||
{
|
||||
// Arrange
|
||||
var template = CreateTemplate("alert-template", ChannelType.Slack);
|
||||
await _repository.CreateAsync(template);
|
||||
|
||||
// Act
|
||||
var fetched = await _repository.GetByNameAsync(_tenantId, "alert-template", ChannelType.Slack);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(template.Id);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByName_FiltersCorrectlyByLocale()
|
||||
{
|
||||
// Arrange
|
||||
var enTemplate = new TemplateEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "localized-template",
|
||||
ChannelType = ChannelType.Email,
|
||||
BodyTemplate = "English content",
|
||||
Locale = "en"
|
||||
};
|
||||
var frTemplate = new TemplateEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "localized-template",
|
||||
ChannelType = ChannelType.Email,
|
||||
BodyTemplate = "Contenu français",
|
||||
Locale = "fr"
|
||||
};
|
||||
await _repository.CreateAsync(enTemplate);
|
||||
await _repository.CreateAsync(frTemplate);
|
||||
|
||||
// Act
|
||||
var frFetched = await _repository.GetByNameAsync(_tenantId, "localized-template", ChannelType.Email, "fr");
|
||||
|
||||
// Assert
|
||||
frFetched.Should().NotBeNull();
|
||||
frFetched!.BodyTemplate.Should().Contain("français");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task List_ReturnsAllTemplatesForTenant()
|
||||
{
|
||||
// Arrange
|
||||
var template1 = CreateTemplate("template1", ChannelType.Email);
|
||||
var template2 = CreateTemplate("template2", ChannelType.Slack);
|
||||
await _repository.CreateAsync(template1);
|
||||
await _repository.CreateAsync(template2);
|
||||
|
||||
// Act
|
||||
var templates = await _repository.ListAsync(_tenantId);
|
||||
|
||||
// Assert
|
||||
templates.Should().HaveCount(2);
|
||||
templates.Select(t => t.Name).Should().Contain(["template1", "template2"]);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task List_FiltersByChannelType()
|
||||
{
|
||||
// Arrange
|
||||
var emailTemplate = CreateTemplate("email", ChannelType.Email);
|
||||
var slackTemplate = CreateTemplate("slack", ChannelType.Slack);
|
||||
await _repository.CreateAsync(emailTemplate);
|
||||
await _repository.CreateAsync(slackTemplate);
|
||||
|
||||
// Act
|
||||
var emailTemplates = await _repository.ListAsync(_tenantId, channelType: ChannelType.Email);
|
||||
|
||||
// Assert
|
||||
emailTemplates.Should().HaveCount(1);
|
||||
emailTemplates[0].Name.Should().Be("email");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Update_ModifiesTemplate()
|
||||
{
|
||||
// Arrange
|
||||
var template = CreateTemplate("update-test", ChannelType.Email);
|
||||
await _repository.CreateAsync(template);
|
||||
|
||||
// Act
|
||||
var updated = new TemplateEntity
|
||||
{
|
||||
Id = template.Id,
|
||||
TenantId = _tenantId,
|
||||
Name = "update-test",
|
||||
ChannelType = ChannelType.Email,
|
||||
SubjectTemplate = "Updated Subject",
|
||||
BodyTemplate = "Updated body content"
|
||||
};
|
||||
var result = await _repository.UpdateAsync(updated);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, template.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.SubjectTemplate.Should().Be("Updated Subject");
|
||||
fetched.BodyTemplate.Should().Be("Updated body content");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_RemovesTemplate()
|
||||
{
|
||||
// Arrange
|
||||
var template = CreateTemplate("delete-test", ChannelType.Email);
|
||||
await _repository.CreateAsync(template);
|
||||
|
||||
// Act
|
||||
var result = await _repository.DeleteAsync(_tenantId, template.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, template.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched.Should().BeNull();
|
||||
}
|
||||
|
||||
private TemplateEntity CreateTemplate(string name, ChannelType type) => new()
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = name,
|
||||
ChannelType = type,
|
||||
BodyTemplate = "Default template body"
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,567 @@
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using System.Text.Json.Serialization;
|
||||
|
||||
namespace StellaOps.Orchestrator.Core.Domain.Events;
|
||||
|
||||
/// <summary>
|
||||
/// Standardized event envelope for orchestrator events.
|
||||
/// Supports policy, export, and job lifecycle events with idempotency keys.
|
||||
/// </summary>
|
||||
public sealed record EventEnvelope(
|
||||
/// <summary>Schema version identifier.</summary>
|
||||
string SchemaVersion,
|
||||
|
||||
/// <summary>Unique event ID (UUIDv7 or ULID format).</summary>
|
||||
string EventId,
|
||||
|
||||
/// <summary>Event type classification.</summary>
|
||||
OrchestratorEventType EventType,
|
||||
|
||||
/// <summary>When the event occurred (UTC).</summary>
|
||||
DateTimeOffset OccurredAt,
|
||||
|
||||
/// <summary>Idempotency key for deduplication.</summary>
|
||||
string IdempotencyKey,
|
||||
|
||||
/// <summary>Correlation ID for request tracing.</summary>
|
||||
string? CorrelationId,
|
||||
|
||||
/// <summary>Tenant identifier.</summary>
|
||||
string TenantId,
|
||||
|
||||
/// <summary>Project identifier (optional but preferred).</summary>
|
||||
string? ProjectId,
|
||||
|
||||
/// <summary>Actor who triggered/emitted the event.</summary>
|
||||
EventActor Actor,
|
||||
|
||||
/// <summary>Job-related metadata (null for non-job events).</summary>
|
||||
EventJob? Job,
|
||||
|
||||
/// <summary>Event metrics.</summary>
|
||||
EventMetrics? Metrics,
|
||||
|
||||
/// <summary>Notifier transport metadata.</summary>
|
||||
EventNotifier? Notifier,
|
||||
|
||||
/// <summary>Event-specific payload.</summary>
|
||||
JsonElement? Payload)
|
||||
{
|
||||
/// <summary>Current schema version.</summary>
|
||||
public const string CurrentSchemaVersion = "orch.event.v1";
|
||||
|
||||
/// <summary>Creates a new event envelope with generated ID and timestamp.</summary>
|
||||
public static EventEnvelope Create(
|
||||
OrchestratorEventType eventType,
|
||||
string tenantId,
|
||||
EventActor actor,
|
||||
string? correlationId = null,
|
||||
string? projectId = null,
|
||||
EventJob? job = null,
|
||||
EventMetrics? metrics = null,
|
||||
EventNotifier? notifier = null,
|
||||
JsonElement? payload = null)
|
||||
{
|
||||
var eventId = GenerateEventId();
|
||||
var idempotencyKey = GenerateIdempotencyKey(eventType, job?.Id, job?.Attempt ?? 0);
|
||||
|
||||
return new EventEnvelope(
|
||||
SchemaVersion: CurrentSchemaVersion,
|
||||
EventId: eventId,
|
||||
EventType: eventType,
|
||||
OccurredAt: DateTimeOffset.UtcNow,
|
||||
IdempotencyKey: idempotencyKey,
|
||||
CorrelationId: correlationId,
|
||||
TenantId: tenantId,
|
||||
ProjectId: projectId,
|
||||
Actor: actor,
|
||||
Job: job,
|
||||
Metrics: metrics,
|
||||
Notifier: notifier,
|
||||
Payload: payload);
|
||||
}
|
||||
|
||||
/// <summary>Creates a job-related event envelope.</summary>
|
||||
public static EventEnvelope ForJob(
|
||||
OrchestratorEventType eventType,
|
||||
string tenantId,
|
||||
EventActor actor,
|
||||
EventJob job,
|
||||
string? correlationId = null,
|
||||
string? projectId = null,
|
||||
EventMetrics? metrics = null,
|
||||
JsonElement? payload = null)
|
||||
{
|
||||
return Create(
|
||||
eventType: eventType,
|
||||
tenantId: tenantId,
|
||||
actor: actor,
|
||||
correlationId: correlationId,
|
||||
projectId: projectId,
|
||||
job: job,
|
||||
metrics: metrics,
|
||||
payload: payload);
|
||||
}
|
||||
|
||||
/// <summary>Creates an export-related event envelope.</summary>
|
||||
public static EventEnvelope ForExport(
|
||||
OrchestratorEventType eventType,
|
||||
string tenantId,
|
||||
EventActor actor,
|
||||
EventJob exportJob,
|
||||
string? correlationId = null,
|
||||
string? projectId = null,
|
||||
EventMetrics? metrics = null,
|
||||
JsonElement? payload = null)
|
||||
{
|
||||
return ForJob(
|
||||
eventType: eventType,
|
||||
tenantId: tenantId,
|
||||
actor: actor,
|
||||
job: exportJob,
|
||||
correlationId: correlationId,
|
||||
projectId: projectId,
|
||||
metrics: metrics,
|
||||
payload: payload);
|
||||
}
|
||||
|
||||
/// <summary>Creates a policy-related event envelope.</summary>
|
||||
public static EventEnvelope ForPolicy(
|
||||
OrchestratorEventType eventType,
|
||||
string tenantId,
|
||||
EventActor actor,
|
||||
string? correlationId = null,
|
||||
string? projectId = null,
|
||||
JsonElement? payload = null)
|
||||
{
|
||||
return Create(
|
||||
eventType: eventType,
|
||||
tenantId: tenantId,
|
||||
actor: actor,
|
||||
correlationId: correlationId,
|
||||
projectId: projectId,
|
||||
payload: payload);
|
||||
}
|
||||
|
||||
/// <summary>Generates a UUIDv7-style event ID.</summary>
|
||||
private static string GenerateEventId()
|
||||
{
|
||||
// UUIDv7: timestamp-based with random suffix
|
||||
var timestamp = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
|
||||
var random = Guid.NewGuid().ToString("N")[..16];
|
||||
return $"urn:orch:event:{timestamp:x}-{random}";
|
||||
}
|
||||
|
||||
/// <summary>Generates an idempotency key for deduplication.</summary>
|
||||
public static string GenerateIdempotencyKey(OrchestratorEventType eventType, string? jobId, int attempt)
|
||||
{
|
||||
var jobPart = jobId ?? "none";
|
||||
return $"orch-{eventType.ToEventTypeName()}-{jobPart}-{attempt}";
|
||||
}
|
||||
|
||||
/// <summary>Serializes the envelope to JSON.</summary>
|
||||
public string ToJson() => JsonSerializer.Serialize(this, JsonOptions);
|
||||
|
||||
/// <summary>Deserializes an envelope from JSON.</summary>
|
||||
public static EventEnvelope? FromJson(string json)
|
||||
{
|
||||
try
|
||||
{
|
||||
return JsonSerializer.Deserialize<EventEnvelope>(json, JsonOptions);
|
||||
}
|
||||
catch (JsonException)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>Computes a digest of the envelope for signing.</summary>
|
||||
public string ComputeDigest()
|
||||
{
|
||||
var json = ToJson();
|
||||
var bytes = Encoding.UTF8.GetBytes(json);
|
||||
var hash = SHA256.HashData(bytes);
|
||||
return $"sha256:{Convert.ToHexStringLower(hash)}";
|
||||
}
|
||||
|
||||
private static readonly JsonSerializerOptions JsonOptions = new()
|
||||
{
|
||||
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
|
||||
DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull,
|
||||
WriteIndented = false,
|
||||
Converters = { new JsonStringEnumConverter(JsonNamingPolicy.SnakeCaseLower) }
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Actor who triggered or emitted an event.
|
||||
/// </summary>
|
||||
public sealed record EventActor(
|
||||
/// <summary>Subject identifier (e.g., "service/worker-sdk-go", "user/admin@example.com").</summary>
|
||||
string Subject,
|
||||
|
||||
/// <summary>Scopes/permissions under which the action was taken.</summary>
|
||||
IReadOnlyList<string>? Scopes)
|
||||
{
|
||||
/// <summary>Creates a service actor.</summary>
|
||||
public static EventActor Service(string serviceName, params string[] scopes)
|
||||
=> new($"service/{serviceName}", scopes.Length > 0 ? scopes : null);
|
||||
|
||||
/// <summary>Creates a user actor.</summary>
|
||||
public static EventActor User(string userId, params string[] scopes)
|
||||
=> new($"user/{userId}", scopes.Length > 0 ? scopes : null);
|
||||
|
||||
/// <summary>Creates a system actor (for automated processes).</summary>
|
||||
public static EventActor System(string component, params string[] scopes)
|
||||
=> new($"system/{component}", scopes.Length > 0 ? scopes : null);
|
||||
|
||||
/// <summary>Creates a worker actor.</summary>
|
||||
public static EventActor Worker(string workerId, string sdkType)
|
||||
=> new($"worker/{sdkType}/{workerId}", null);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Job-related metadata in an event.
|
||||
/// </summary>
|
||||
public sealed record EventJob(
|
||||
/// <summary>Job identifier.</summary>
|
||||
string Id,
|
||||
|
||||
/// <summary>Job type (e.g., "pack-run", "ingest", "export").</summary>
|
||||
string Type,
|
||||
|
||||
/// <summary>Run identifier (for pack runs / simulations).</summary>
|
||||
string? RunId,
|
||||
|
||||
/// <summary>Attempt number.</summary>
|
||||
int Attempt,
|
||||
|
||||
/// <summary>Lease identifier.</summary>
|
||||
string? LeaseId,
|
||||
|
||||
/// <summary>Task runner identifier.</summary>
|
||||
string? TaskRunnerId,
|
||||
|
||||
/// <summary>Job status.</summary>
|
||||
string Status,
|
||||
|
||||
/// <summary>Status reason (for failures/cancellations).</summary>
|
||||
string? Reason,
|
||||
|
||||
/// <summary>Payload digest for integrity.</summary>
|
||||
string? PayloadDigest,
|
||||
|
||||
/// <summary>Associated artifacts.</summary>
|
||||
IReadOnlyList<EventArtifact>? Artifacts)
|
||||
{
|
||||
/// <summary>Creates job metadata from basic info.</summary>
|
||||
public static EventJob Create(
|
||||
string id,
|
||||
string type,
|
||||
string status,
|
||||
int attempt = 1,
|
||||
string? runId = null,
|
||||
string? leaseId = null,
|
||||
string? taskRunnerId = null,
|
||||
string? reason = null,
|
||||
string? payloadDigest = null,
|
||||
IReadOnlyList<EventArtifact>? artifacts = null)
|
||||
{
|
||||
return new EventJob(
|
||||
Id: id,
|
||||
Type: type,
|
||||
RunId: runId,
|
||||
Attempt: attempt,
|
||||
LeaseId: leaseId,
|
||||
TaskRunnerId: taskRunnerId,
|
||||
Status: status,
|
||||
Reason: reason,
|
||||
PayloadDigest: payloadDigest,
|
||||
Artifacts: artifacts);
|
||||
}
|
||||
|
||||
/// <summary>Creates a completed job event.</summary>
|
||||
public static EventJob Completed(string id, string type, int attempt, string? payloadDigest = null, IReadOnlyList<EventArtifact>? artifacts = null)
|
||||
=> Create(id, type, "completed", attempt, payloadDigest: payloadDigest, artifacts: artifacts);
|
||||
|
||||
/// <summary>Creates a failed job event.</summary>
|
||||
public static EventJob Failed(string id, string type, int attempt, string reason)
|
||||
=> Create(id, type, "failed", attempt, reason: reason);
|
||||
|
||||
/// <summary>Creates a canceled job event.</summary>
|
||||
public static EventJob Canceled(string id, string type, int attempt, string reason)
|
||||
=> Create(id, type, "canceled", attempt, reason: reason);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Artifact metadata in an event.
|
||||
/// </summary>
|
||||
public sealed record EventArtifact(
|
||||
/// <summary>Artifact URI (storage location).</summary>
|
||||
string Uri,
|
||||
|
||||
/// <summary>Content digest for integrity.</summary>
|
||||
string Digest,
|
||||
|
||||
/// <summary>MIME type.</summary>
|
||||
string Mime);
|
||||
|
||||
/// <summary>
|
||||
/// Event timing and performance metrics.
|
||||
/// </summary>
|
||||
public sealed record EventMetrics(
|
||||
/// <summary>Duration in seconds.</summary>
|
||||
double? DurationSeconds,
|
||||
|
||||
/// <summary>Log stream lag in seconds.</summary>
|
||||
double? LogStreamLagSeconds,
|
||||
|
||||
/// <summary>Backoff delay in seconds.</summary>
|
||||
double? BackoffSeconds,
|
||||
|
||||
/// <summary>Queue wait time in seconds.</summary>
|
||||
double? QueueWaitSeconds,
|
||||
|
||||
/// <summary>Processing time in seconds.</summary>
|
||||
double? ProcessingSeconds)
|
||||
{
|
||||
/// <summary>Creates metrics with just duration.</summary>
|
||||
public static EventMetrics WithDuration(double seconds)
|
||||
=> new(seconds, null, null, null, null);
|
||||
|
||||
/// <summary>Creates metrics with duration and processing breakdown.</summary>
|
||||
public static EventMetrics WithBreakdown(double total, double queueWait, double processing)
|
||||
=> new(total, null, null, queueWait, processing);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Notifier transport metadata.
|
||||
/// </summary>
|
||||
public sealed record EventNotifier(
|
||||
/// <summary>Notifier channel name.</summary>
|
||||
string Channel,
|
||||
|
||||
/// <summary>Delivery format (e.g., "dsse", "raw").</summary>
|
||||
string Delivery,
|
||||
|
||||
/// <summary>Replay metadata (for replayed events).</summary>
|
||||
EventReplay? Replay)
|
||||
{
|
||||
/// <summary>Creates notifier metadata for the jobs channel.</summary>
|
||||
public static EventNotifier JobsChannel(string delivery = "dsse")
|
||||
=> new("orch.jobs", delivery, null);
|
||||
|
||||
/// <summary>Creates notifier metadata for the exports channel.</summary>
|
||||
public static EventNotifier ExportsChannel(string delivery = "dsse")
|
||||
=> new("orch.exports", delivery, null);
|
||||
|
||||
/// <summary>Creates notifier metadata for the policy channel.</summary>
|
||||
public static EventNotifier PolicyChannel(string delivery = "dsse")
|
||||
=> new("orch.policy", delivery, null);
|
||||
|
||||
/// <summary>Adds replay metadata.</summary>
|
||||
public EventNotifier WithReplay(int ordinal, int total)
|
||||
=> this with { Replay = new EventReplay(ordinal, total) };
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Replay metadata for replayed events.
|
||||
/// </summary>
|
||||
public sealed record EventReplay(
|
||||
/// <summary>Ordinal position in replay sequence.</summary>
|
||||
int Ordinal,
|
||||
|
||||
/// <summary>Total events in replay sequence.</summary>
|
||||
int Total);
|
||||
|
||||
/// <summary>
|
||||
/// Orchestrator event types.
|
||||
/// </summary>
|
||||
public enum OrchestratorEventType
|
||||
{
|
||||
// Job lifecycle
|
||||
JobCreated,
|
||||
JobScheduled,
|
||||
JobStarted,
|
||||
JobCompleted,
|
||||
JobFailed,
|
||||
JobCanceled,
|
||||
JobRetrying,
|
||||
|
||||
// Export lifecycle
|
||||
ExportCreated,
|
||||
ExportStarted,
|
||||
ExportCompleted,
|
||||
ExportFailed,
|
||||
ExportCanceled,
|
||||
ExportArchived,
|
||||
ExportExpired,
|
||||
ExportDeleted,
|
||||
|
||||
// Schedule lifecycle
|
||||
ScheduleCreated,
|
||||
ScheduleEnabled,
|
||||
ScheduleDisabled,
|
||||
ScheduleTriggered,
|
||||
ScheduleSkipped,
|
||||
|
||||
// Alert lifecycle
|
||||
AlertCreated,
|
||||
AlertAcknowledged,
|
||||
AlertResolved,
|
||||
|
||||
// Retention lifecycle
|
||||
RetentionPruneStarted,
|
||||
RetentionPruneCompleted,
|
||||
|
||||
// Policy lifecycle
|
||||
PolicyUpdated,
|
||||
PolicySimulated,
|
||||
PolicyApplied,
|
||||
|
||||
// Pack run lifecycle
|
||||
PackRunCreated,
|
||||
PackRunStarted,
|
||||
PackRunLog,
|
||||
PackRunArtifact,
|
||||
PackRunCompleted,
|
||||
PackRunFailed
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Extension methods for event types.
|
||||
/// </summary>
|
||||
public static class OrchestratorEventTypeExtensions
|
||||
{
|
||||
/// <summary>Converts event type to canonical string name.</summary>
|
||||
public static string ToEventTypeName(this OrchestratorEventType eventType)
|
||||
{
|
||||
return eventType switch
|
||||
{
|
||||
OrchestratorEventType.JobCreated => "job.created",
|
||||
OrchestratorEventType.JobScheduled => "job.scheduled",
|
||||
OrchestratorEventType.JobStarted => "job.started",
|
||||
OrchestratorEventType.JobCompleted => "job.completed",
|
||||
OrchestratorEventType.JobFailed => "job.failed",
|
||||
OrchestratorEventType.JobCanceled => "job.canceled",
|
||||
OrchestratorEventType.JobRetrying => "job.retrying",
|
||||
|
||||
OrchestratorEventType.ExportCreated => "export.created",
|
||||
OrchestratorEventType.ExportStarted => "export.started",
|
||||
OrchestratorEventType.ExportCompleted => "export.completed",
|
||||
OrchestratorEventType.ExportFailed => "export.failed",
|
||||
OrchestratorEventType.ExportCanceled => "export.canceled",
|
||||
OrchestratorEventType.ExportArchived => "export.archived",
|
||||
OrchestratorEventType.ExportExpired => "export.expired",
|
||||
OrchestratorEventType.ExportDeleted => "export.deleted",
|
||||
|
||||
OrchestratorEventType.ScheduleCreated => "schedule.created",
|
||||
OrchestratorEventType.ScheduleEnabled => "schedule.enabled",
|
||||
OrchestratorEventType.ScheduleDisabled => "schedule.disabled",
|
||||
OrchestratorEventType.ScheduleTriggered => "schedule.triggered",
|
||||
OrchestratorEventType.ScheduleSkipped => "schedule.skipped",
|
||||
|
||||
OrchestratorEventType.AlertCreated => "alert.created",
|
||||
OrchestratorEventType.AlertAcknowledged => "alert.acknowledged",
|
||||
OrchestratorEventType.AlertResolved => "alert.resolved",
|
||||
|
||||
OrchestratorEventType.RetentionPruneStarted => "retention.prune_started",
|
||||
OrchestratorEventType.RetentionPruneCompleted => "retention.prune_completed",
|
||||
|
||||
OrchestratorEventType.PolicyUpdated => "policy.updated",
|
||||
OrchestratorEventType.PolicySimulated => "policy.simulated",
|
||||
OrchestratorEventType.PolicyApplied => "policy.applied",
|
||||
|
||||
OrchestratorEventType.PackRunCreated => "pack_run.created",
|
||||
OrchestratorEventType.PackRunStarted => "pack_run.started",
|
||||
OrchestratorEventType.PackRunLog => "pack_run.log",
|
||||
OrchestratorEventType.PackRunArtifact => "pack_run.artifact",
|
||||
OrchestratorEventType.PackRunCompleted => "pack_run.completed",
|
||||
OrchestratorEventType.PackRunFailed => "pack_run.failed",
|
||||
|
||||
_ => eventType.ToString().ToLowerInvariant()
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>Parses a canonical event type name.</summary>
|
||||
public static OrchestratorEventType? FromEventTypeName(string name)
|
||||
{
|
||||
return name switch
|
||||
{
|
||||
"job.created" => OrchestratorEventType.JobCreated,
|
||||
"job.scheduled" => OrchestratorEventType.JobScheduled,
|
||||
"job.started" => OrchestratorEventType.JobStarted,
|
||||
"job.completed" => OrchestratorEventType.JobCompleted,
|
||||
"job.failed" => OrchestratorEventType.JobFailed,
|
||||
"job.canceled" => OrchestratorEventType.JobCanceled,
|
||||
"job.retrying" => OrchestratorEventType.JobRetrying,
|
||||
|
||||
"export.created" => OrchestratorEventType.ExportCreated,
|
||||
"export.started" => OrchestratorEventType.ExportStarted,
|
||||
"export.completed" => OrchestratorEventType.ExportCompleted,
|
||||
"export.failed" => OrchestratorEventType.ExportFailed,
|
||||
"export.canceled" => OrchestratorEventType.ExportCanceled,
|
||||
"export.archived" => OrchestratorEventType.ExportArchived,
|
||||
"export.expired" => OrchestratorEventType.ExportExpired,
|
||||
"export.deleted" => OrchestratorEventType.ExportDeleted,
|
||||
|
||||
"schedule.created" => OrchestratorEventType.ScheduleCreated,
|
||||
"schedule.enabled" => OrchestratorEventType.ScheduleEnabled,
|
||||
"schedule.disabled" => OrchestratorEventType.ScheduleDisabled,
|
||||
"schedule.triggered" => OrchestratorEventType.ScheduleTriggered,
|
||||
"schedule.skipped" => OrchestratorEventType.ScheduleSkipped,
|
||||
|
||||
"alert.created" => OrchestratorEventType.AlertCreated,
|
||||
"alert.acknowledged" => OrchestratorEventType.AlertAcknowledged,
|
||||
"alert.resolved" => OrchestratorEventType.AlertResolved,
|
||||
|
||||
"retention.prune_started" => OrchestratorEventType.RetentionPruneStarted,
|
||||
"retention.prune_completed" => OrchestratorEventType.RetentionPruneCompleted,
|
||||
|
||||
"policy.updated" => OrchestratorEventType.PolicyUpdated,
|
||||
"policy.simulated" => OrchestratorEventType.PolicySimulated,
|
||||
"policy.applied" => OrchestratorEventType.PolicyApplied,
|
||||
|
||||
"pack_run.created" => OrchestratorEventType.PackRunCreated,
|
||||
"pack_run.started" => OrchestratorEventType.PackRunStarted,
|
||||
"pack_run.log" => OrchestratorEventType.PackRunLog,
|
||||
"pack_run.artifact" => OrchestratorEventType.PackRunArtifact,
|
||||
"pack_run.completed" => OrchestratorEventType.PackRunCompleted,
|
||||
"pack_run.failed" => OrchestratorEventType.PackRunFailed,
|
||||
|
||||
_ => null
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>Whether the event type is a failure event.</summary>
|
||||
public static bool IsFailure(this OrchestratorEventType eventType)
|
||||
{
|
||||
return eventType is
|
||||
OrchestratorEventType.JobFailed or
|
||||
OrchestratorEventType.ExportFailed or
|
||||
OrchestratorEventType.PackRunFailed;
|
||||
}
|
||||
|
||||
/// <summary>Whether the event type is a completion event.</summary>
|
||||
public static bool IsCompletion(this OrchestratorEventType eventType)
|
||||
{
|
||||
return eventType is
|
||||
OrchestratorEventType.JobCompleted or
|
||||
OrchestratorEventType.ExportCompleted or
|
||||
OrchestratorEventType.PackRunCompleted or
|
||||
OrchestratorEventType.RetentionPruneCompleted;
|
||||
}
|
||||
|
||||
/// <summary>Whether the event type is a lifecycle terminal event.</summary>
|
||||
public static bool IsTerminal(this OrchestratorEventType eventType)
|
||||
{
|
||||
return eventType.IsFailure() || eventType.IsCompletion() ||
|
||||
eventType is
|
||||
OrchestratorEventType.JobCanceled or
|
||||
OrchestratorEventType.ExportCanceled or
|
||||
OrchestratorEventType.ExportDeleted or
|
||||
OrchestratorEventType.AlertResolved;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,241 @@
|
||||
namespace StellaOps.Orchestrator.Core.Domain.Events;
|
||||
|
||||
/// <summary>
|
||||
/// Interface for publishing orchestrator events to the notifier bus.
|
||||
/// </summary>
|
||||
public interface IEventPublisher
|
||||
{
|
||||
/// <summary>Publishes an event to the notifier bus.</summary>
|
||||
/// <param name="envelope">The event envelope to publish.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>True if published successfully; false if deduplicated.</returns>
|
||||
Task<bool> PublishAsync(EventEnvelope envelope, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>Publishes multiple events to the notifier bus.</summary>
|
||||
/// <param name="envelopes">The event envelopes to publish.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The result containing success/dedup counts.</returns>
|
||||
Task<BatchPublishResult> PublishBatchAsync(IEnumerable<EventEnvelope> envelopes, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>Checks if an event with the given idempotency key has already been published.</summary>
|
||||
/// <param name="idempotencyKey">The idempotency key to check.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>True if already published.</returns>
|
||||
Task<bool> IsPublishedAsync(string idempotencyKey, CancellationToken cancellationToken = default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Result of a batch publish operation.
|
||||
/// </summary>
|
||||
public sealed record BatchPublishResult(
|
||||
/// <summary>Number of events successfully published.</summary>
|
||||
int Published,
|
||||
|
||||
/// <summary>Number of events deduplicated (already published).</summary>
|
||||
int Deduplicated,
|
||||
|
||||
/// <summary>Number of events that failed to publish.</summary>
|
||||
int Failed,
|
||||
|
||||
/// <summary>Errors encountered during publishing.</summary>
|
||||
IReadOnlyList<string> Errors)
|
||||
{
|
||||
/// <summary>Total events processed.</summary>
|
||||
public int Total => Published + Deduplicated + Failed;
|
||||
|
||||
/// <summary>Whether any events were published successfully.</summary>
|
||||
public bool HasPublished => Published > 0;
|
||||
|
||||
/// <summary>Whether any errors occurred.</summary>
|
||||
public bool HasErrors => Failed > 0 || Errors.Count > 0;
|
||||
|
||||
/// <summary>Creates an empty result.</summary>
|
||||
public static BatchPublishResult Empty => new(0, 0, 0, []);
|
||||
|
||||
/// <summary>Creates a successful single publish result.</summary>
|
||||
public static BatchPublishResult SingleSuccess => new(1, 0, 0, []);
|
||||
|
||||
/// <summary>Creates a deduplicated single result.</summary>
|
||||
public static BatchPublishResult SingleDeduplicated => new(0, 1, 0, []);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Event publishing options.
|
||||
/// </summary>
|
||||
public sealed record EventPublishOptions(
|
||||
/// <summary>Whether to sign events with DSSE.</summary>
|
||||
bool SignWithDsse,
|
||||
|
||||
/// <summary>Maximum retry attempts for transient failures.</summary>
|
||||
int MaxRetries,
|
||||
|
||||
/// <summary>Base delay between retries.</summary>
|
||||
TimeSpan RetryDelay,
|
||||
|
||||
/// <summary>TTL for idempotency key tracking.</summary>
|
||||
TimeSpan IdempotencyTtl,
|
||||
|
||||
/// <summary>Whether to include provenance metadata.</summary>
|
||||
bool IncludeProvenance,
|
||||
|
||||
/// <summary>Whether to compress large payloads.</summary>
|
||||
bool CompressLargePayloads,
|
||||
|
||||
/// <summary>Threshold for payload compression (bytes).</summary>
|
||||
int CompressionThreshold)
|
||||
{
|
||||
/// <summary>Default publishing options.</summary>
|
||||
public static EventPublishOptions Default => new(
|
||||
SignWithDsse: true,
|
||||
MaxRetries: 3,
|
||||
RetryDelay: TimeSpan.FromSeconds(1),
|
||||
IdempotencyTtl: TimeSpan.FromHours(24),
|
||||
IncludeProvenance: true,
|
||||
CompressLargePayloads: true,
|
||||
CompressionThreshold: 64 * 1024);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Interface for event signing.
|
||||
/// </summary>
|
||||
public interface IEventSigner
|
||||
{
|
||||
/// <summary>Signs an event envelope with DSSE.</summary>
|
||||
/// <param name="envelope">The envelope to sign.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The signed envelope as a DSSE payload.</returns>
|
||||
Task<string> SignAsync(EventEnvelope envelope, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>Verifies a signed event envelope.</summary>
|
||||
/// <param name="signedPayload">The signed DSSE payload.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The verified envelope, or null if verification fails.</returns>
|
||||
Task<EventEnvelope?> VerifyAsync(string signedPayload, CancellationToken cancellationToken = default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Interface for idempotency tracking.
|
||||
/// </summary>
|
||||
public interface IIdempotencyStore
|
||||
{
|
||||
/// <summary>Tries to mark an idempotency key as processed.</summary>
|
||||
/// <param name="key">The idempotency key.</param>
|
||||
/// <param name="ttl">TTL for the key.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>True if newly marked; false if already existed.</returns>
|
||||
Task<bool> TryMarkAsync(string key, TimeSpan ttl, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>Checks if an idempotency key exists.</summary>
|
||||
/// <param name="key">The idempotency key.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>True if the key exists.</returns>
|
||||
Task<bool> ExistsAsync(string key, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>Removes an idempotency key.</summary>
|
||||
/// <param name="key">The idempotency key.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task RemoveAsync(string key, CancellationToken cancellationToken = default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Interface for the notifier bus transport.
|
||||
/// </summary>
|
||||
public interface INotifierBus
|
||||
{
|
||||
/// <summary>Sends a message to the notifier bus.</summary>
|
||||
/// <param name="channel">Target channel.</param>
|
||||
/// <param name="message">Message payload (JSON or signed DSSE).</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task SendAsync(string channel, string message, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>Sends multiple messages to the notifier bus.</summary>
|
||||
/// <param name="channel">Target channel.</param>
|
||||
/// <param name="messages">Message payloads.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task SendBatchAsync(string channel, IEnumerable<string> messages, CancellationToken cancellationToken = default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Null implementation of event publisher for testing.
|
||||
/// </summary>
|
||||
public sealed class NullEventPublisher : IEventPublisher
|
||||
{
|
||||
/// <summary>Singleton instance.</summary>
|
||||
public static NullEventPublisher Instance { get; } = new();
|
||||
|
||||
private NullEventPublisher() { }
|
||||
|
||||
public Task<bool> PublishAsync(EventEnvelope envelope, CancellationToken cancellationToken = default)
|
||||
=> Task.FromResult(true);
|
||||
|
||||
public Task<BatchPublishResult> PublishBatchAsync(IEnumerable<EventEnvelope> envelopes, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var count = envelopes.Count();
|
||||
return Task.FromResult(new BatchPublishResult(count, 0, 0, []));
|
||||
}
|
||||
|
||||
public Task<bool> IsPublishedAsync(string idempotencyKey, CancellationToken cancellationToken = default)
|
||||
=> Task.FromResult(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// In-memory implementation of idempotency store for testing.
|
||||
/// </summary>
|
||||
public sealed class InMemoryIdempotencyStore : IIdempotencyStore
|
||||
{
|
||||
private readonly Dictionary<string, DateTimeOffset> _keys = new();
|
||||
private readonly object _lock = new();
|
||||
|
||||
public Task<bool> TryMarkAsync(string key, TimeSpan ttl, CancellationToken cancellationToken = default)
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
CleanupExpired();
|
||||
if (_keys.ContainsKey(key))
|
||||
return Task.FromResult(false);
|
||||
|
||||
_keys[key] = DateTimeOffset.UtcNow.Add(ttl);
|
||||
return Task.FromResult(true);
|
||||
}
|
||||
}
|
||||
|
||||
public Task<bool> ExistsAsync(string key, CancellationToken cancellationToken = default)
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
CleanupExpired();
|
||||
return Task.FromResult(_keys.ContainsKey(key));
|
||||
}
|
||||
}
|
||||
|
||||
public Task RemoveAsync(string key, CancellationToken cancellationToken = default)
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
_keys.Remove(key);
|
||||
}
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
private void CleanupExpired()
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
var expired = _keys.Where(kv => kv.Value <= now).Select(kv => kv.Key).ToList();
|
||||
foreach (var key in expired)
|
||||
{
|
||||
_keys.Remove(key);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>Gets the current key count (for testing).</summary>
|
||||
public int Count
|
||||
{
|
||||
get { lock (_lock) { CleanupExpired(); return _keys.Count; } }
|
||||
}
|
||||
|
||||
/// <summary>Clears all keys (for testing).</summary>
|
||||
public void Clear()
|
||||
{
|
||||
lock (_lock) { _keys.Clear(); }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,559 @@
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
|
||||
namespace StellaOps.Orchestrator.Core.Domain.Export;
|
||||
|
||||
/// <summary>
|
||||
/// Export job payload containing export-specific parameters.
|
||||
/// Serialized to JSON and stored in Job.Payload.
|
||||
/// </summary>
|
||||
public sealed record ExportJobPayload(
|
||||
/// <summary>Export format (e.g., "json", "ndjson", "csv", "spdx", "cyclonedx").</summary>
|
||||
string Format,
|
||||
|
||||
/// <summary>Start of time range to export (inclusive).</summary>
|
||||
DateTimeOffset? StartTime,
|
||||
|
||||
/// <summary>End of time range to export (exclusive).</summary>
|
||||
DateTimeOffset? EndTime,
|
||||
|
||||
/// <summary>Filter by source ID.</summary>
|
||||
Guid? SourceId,
|
||||
|
||||
/// <summary>Filter by project ID.</summary>
|
||||
string? ProjectId,
|
||||
|
||||
/// <summary>Filter by specific entity IDs.</summary>
|
||||
IReadOnlyList<Guid>? EntityIds,
|
||||
|
||||
/// <summary>Maximum entries to export (pagination/limit).</summary>
|
||||
int? MaxEntries,
|
||||
|
||||
/// <summary>Whether to include provenance metadata.</summary>
|
||||
bool IncludeProvenance,
|
||||
|
||||
/// <summary>Whether to sign the export output.</summary>
|
||||
bool SignOutput,
|
||||
|
||||
/// <summary>Compression format (null = none, "gzip", "zstd").</summary>
|
||||
string? Compression,
|
||||
|
||||
/// <summary>Destination URI for the export output.</summary>
|
||||
string? DestinationUri,
|
||||
|
||||
/// <summary>Callback URL for completion notification.</summary>
|
||||
string? CallbackUrl,
|
||||
|
||||
/// <summary>Additional export-specific options.</summary>
|
||||
IReadOnlyDictionary<string, string>? Options)
|
||||
{
|
||||
/// <summary>Default export payload with minimal settings.</summary>
|
||||
public static ExportJobPayload Default(string format) => new(
|
||||
Format: format,
|
||||
StartTime: null,
|
||||
EndTime: null,
|
||||
SourceId: null,
|
||||
ProjectId: null,
|
||||
EntityIds: null,
|
||||
MaxEntries: null,
|
||||
IncludeProvenance: true,
|
||||
SignOutput: true,
|
||||
Compression: null,
|
||||
DestinationUri: null,
|
||||
CallbackUrl: null,
|
||||
Options: null);
|
||||
|
||||
/// <summary>Serializes the payload to JSON.</summary>
|
||||
public string ToJson() => JsonSerializer.Serialize(this, JsonOptions);
|
||||
|
||||
/// <summary>Computes SHA-256 digest of the payload.</summary>
|
||||
public string ComputeDigest()
|
||||
{
|
||||
var json = ToJson();
|
||||
var bytes = Encoding.UTF8.GetBytes(json);
|
||||
var hash = SHA256.HashData(bytes);
|
||||
return $"sha256:{Convert.ToHexStringLower(hash)}";
|
||||
}
|
||||
|
||||
/// <summary>Deserializes a payload from JSON. Returns null for invalid JSON.</summary>
|
||||
public static ExportJobPayload? FromJson(string json)
|
||||
{
|
||||
try
|
||||
{
|
||||
return JsonSerializer.Deserialize<ExportJobPayload>(json, JsonOptions);
|
||||
}
|
||||
catch (JsonException)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private static readonly JsonSerializerOptions JsonOptions = new()
|
||||
{
|
||||
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
|
||||
WriteIndented = false
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Export job result containing output metadata.
|
||||
/// </summary>
|
||||
public sealed record ExportJobResult(
|
||||
/// <summary>Output URI where export is stored.</summary>
|
||||
string OutputUri,
|
||||
|
||||
/// <summary>SHA-256 digest of the output.</summary>
|
||||
string OutputDigest,
|
||||
|
||||
/// <summary>Output size in bytes.</summary>
|
||||
long OutputSizeBytes,
|
||||
|
||||
/// <summary>Number of entries exported.</summary>
|
||||
int EntryCount,
|
||||
|
||||
/// <summary>Export format used.</summary>
|
||||
string Format,
|
||||
|
||||
/// <summary>Compression applied (if any).</summary>
|
||||
string? Compression,
|
||||
|
||||
/// <summary>Provenance attestation URI (if signed).</summary>
|
||||
string? ProvenanceUri,
|
||||
|
||||
/// <summary>Start of actual exported time range.</summary>
|
||||
DateTimeOffset? ActualStartTime,
|
||||
|
||||
/// <summary>End of actual exported time range.</summary>
|
||||
DateTimeOffset? ActualEndTime,
|
||||
|
||||
/// <summary>Export generation timestamp.</summary>
|
||||
DateTimeOffset GeneratedAt,
|
||||
|
||||
/// <summary>Duration of export operation in seconds.</summary>
|
||||
double DurationSeconds)
|
||||
{
|
||||
/// <summary>Serializes the result to JSON.</summary>
|
||||
public string ToJson() => JsonSerializer.Serialize(this, JsonOptions);
|
||||
|
||||
/// <summary>Deserializes a result from JSON.</summary>
|
||||
public static ExportJobResult? FromJson(string json) =>
|
||||
JsonSerializer.Deserialize<ExportJobResult>(json, JsonOptions);
|
||||
|
||||
private static readonly JsonSerializerOptions JsonOptions = new()
|
||||
{
|
||||
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
|
||||
WriteIndented = false
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Export job progress information.
|
||||
/// </summary>
|
||||
public sealed record ExportJobProgress(
|
||||
/// <summary>Current phase of export.</summary>
|
||||
ExportPhase Phase,
|
||||
|
||||
/// <summary>Entries processed so far.</summary>
|
||||
int EntriesProcessed,
|
||||
|
||||
/// <summary>Total entries to process (if known).</summary>
|
||||
int? TotalEntries,
|
||||
|
||||
/// <summary>Bytes written so far.</summary>
|
||||
long BytesWritten,
|
||||
|
||||
/// <summary>Current progress message.</summary>
|
||||
string? Message)
|
||||
{
|
||||
/// <summary>Computes progress percentage (0-100).</summary>
|
||||
public double? ProgressPercent => TotalEntries > 0
|
||||
? Math.Min(100.0, 100.0 * EntriesProcessed / TotalEntries.Value)
|
||||
: null;
|
||||
|
||||
/// <summary>Serializes the progress to JSON.</summary>
|
||||
public string ToJson() => JsonSerializer.Serialize(this, JsonOptions);
|
||||
|
||||
/// <summary>Deserializes progress from JSON.</summary>
|
||||
public static ExportJobProgress? FromJson(string json) =>
|
||||
JsonSerializer.Deserialize<ExportJobProgress>(json, JsonOptions);
|
||||
|
||||
private static readonly JsonSerializerOptions JsonOptions = new()
|
||||
{
|
||||
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
|
||||
WriteIndented = false
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Export job phases.
|
||||
/// </summary>
|
||||
public enum ExportPhase
|
||||
{
|
||||
/// <summary>Initializing export.</summary>
|
||||
Initializing = 0,
|
||||
|
||||
/// <summary>Querying data.</summary>
|
||||
Querying = 1,
|
||||
|
||||
/// <summary>Formatting output.</summary>
|
||||
Formatting = 2,
|
||||
|
||||
/// <summary>Compressing output.</summary>
|
||||
Compressing = 3,
|
||||
|
||||
/// <summary>Signing/attesting output.</summary>
|
||||
Signing = 4,
|
||||
|
||||
/// <summary>Uploading to destination.</summary>
|
||||
Uploading = 5,
|
||||
|
||||
/// <summary>Finalizing export.</summary>
|
||||
Finalizing = 6,
|
||||
|
||||
/// <summary>Export completed.</summary>
|
||||
Completed = 7
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Distribution metadata for export jobs.
|
||||
/// Tracks where exports are stored, download URLs, and replication status.
|
||||
/// </summary>
|
||||
public sealed record ExportDistribution(
|
||||
/// <summary>Primary storage location URI.</summary>
|
||||
string PrimaryUri,
|
||||
|
||||
/// <summary>Pre-signed download URL (time-limited).</summary>
|
||||
string? DownloadUrl,
|
||||
|
||||
/// <summary>Download URL expiration time.</summary>
|
||||
DateTimeOffset? DownloadUrlExpiresAt,
|
||||
|
||||
/// <summary>Storage provider (e.g., "s3", "azure-blob", "gcs", "local").</summary>
|
||||
string StorageProvider,
|
||||
|
||||
/// <summary>Storage region/location.</summary>
|
||||
string? Region,
|
||||
|
||||
/// <summary>Storage tier (e.g., "hot", "cool", "archive").</summary>
|
||||
string StorageTier,
|
||||
|
||||
/// <summary>Replication targets with their URIs.</summary>
|
||||
IReadOnlyDictionary<string, string>? Replicas,
|
||||
|
||||
/// <summary>Replication status per target.</summary>
|
||||
IReadOnlyDictionary<string, ReplicationStatus>? ReplicationStatus,
|
||||
|
||||
/// <summary>Content type of the export.</summary>
|
||||
string ContentType,
|
||||
|
||||
/// <summary>Access control list (principals with access).</summary>
|
||||
IReadOnlyList<string>? AccessList,
|
||||
|
||||
/// <summary>Whether export is publicly accessible.</summary>
|
||||
bool IsPublic,
|
||||
|
||||
/// <summary>Distribution creation timestamp.</summary>
|
||||
DateTimeOffset CreatedAt)
|
||||
{
|
||||
/// <summary>Serializes distribution to JSON.</summary>
|
||||
public string ToJson() => JsonSerializer.Serialize(this, JsonOptions);
|
||||
|
||||
/// <summary>Deserializes distribution from JSON.</summary>
|
||||
public static ExportDistribution? FromJson(string json)
|
||||
{
|
||||
try
|
||||
{
|
||||
return JsonSerializer.Deserialize<ExportDistribution>(json, JsonOptions);
|
||||
}
|
||||
catch (JsonException)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>Creates a download URL with expiration.</summary>
|
||||
public ExportDistribution WithDownloadUrl(string url, TimeSpan validity) => this with
|
||||
{
|
||||
DownloadUrl = url,
|
||||
DownloadUrlExpiresAt = DateTimeOffset.UtcNow.Add(validity)
|
||||
};
|
||||
|
||||
/// <summary>Adds a replication target.</summary>
|
||||
public ExportDistribution WithReplica(string target, string uri, ReplicationStatus status)
|
||||
{
|
||||
var replicas = Replicas is null
|
||||
? new Dictionary<string, string> { [target] = uri }
|
||||
: new Dictionary<string, string>(Replicas) { [target] = uri };
|
||||
|
||||
var replicationStatus = ReplicationStatus is null
|
||||
? new Dictionary<string, ReplicationStatus> { [target] = status }
|
||||
: new Dictionary<string, ReplicationStatus>(ReplicationStatus) { [target] = status };
|
||||
|
||||
return this with { Replicas = replicas, ReplicationStatus = replicationStatus };
|
||||
}
|
||||
|
||||
private static readonly JsonSerializerOptions JsonOptions = new()
|
||||
{
|
||||
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
|
||||
WriteIndented = false
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Replication status for a distribution target.
|
||||
/// </summary>
|
||||
public enum ReplicationStatus
|
||||
{
|
||||
/// <summary>Replication pending.</summary>
|
||||
Pending = 0,
|
||||
|
||||
/// <summary>Replication in progress.</summary>
|
||||
InProgress = 1,
|
||||
|
||||
/// <summary>Replication completed successfully.</summary>
|
||||
Completed = 2,
|
||||
|
||||
/// <summary>Replication failed.</summary>
|
||||
Failed = 3,
|
||||
|
||||
/// <summary>Replication skipped.</summary>
|
||||
Skipped = 4
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Retention policy and timestamps for export jobs.
|
||||
/// Controls when exports are archived, deleted, or need manual action.
|
||||
/// </summary>
|
||||
public sealed record ExportRetention(
|
||||
/// <summary>Retention policy name.</summary>
|
||||
string PolicyName,
|
||||
|
||||
/// <summary>When the export becomes available for download.</summary>
|
||||
DateTimeOffset AvailableAt,
|
||||
|
||||
/// <summary>When the export should be moved to archive tier.</summary>
|
||||
DateTimeOffset? ArchiveAt,
|
||||
|
||||
/// <summary>When the export should be deleted.</summary>
|
||||
DateTimeOffset? ExpiresAt,
|
||||
|
||||
/// <summary>When the export was actually archived.</summary>
|
||||
DateTimeOffset? ArchivedAt,
|
||||
|
||||
/// <summary>When the export was actually deleted.</summary>
|
||||
DateTimeOffset? DeletedAt,
|
||||
|
||||
/// <summary>Whether legal hold prevents deletion.</summary>
|
||||
bool LegalHold,
|
||||
|
||||
/// <summary>Legal hold reason (if applicable).</summary>
|
||||
string? LegalHoldReason,
|
||||
|
||||
/// <summary>Whether export requires explicit release before deletion.</summary>
|
||||
bool RequiresRelease,
|
||||
|
||||
/// <summary>Who released the export for deletion (if applicable).</summary>
|
||||
string? ReleasedBy,
|
||||
|
||||
/// <summary>When export was released for deletion.</summary>
|
||||
DateTimeOffset? ReleasedAt,
|
||||
|
||||
/// <summary>Number of times retention was extended.</summary>
|
||||
int ExtensionCount,
|
||||
|
||||
/// <summary>Retention metadata (audit trail, etc.).</summary>
|
||||
IReadOnlyDictionary<string, string>? Metadata)
|
||||
{
|
||||
/// <summary>Default retention policy names.</summary>
|
||||
public static class PolicyNames
|
||||
{
|
||||
public const string Default = "default";
|
||||
public const string Compliance = "compliance";
|
||||
public const string Temporary = "temporary";
|
||||
public const string LongTerm = "long-term";
|
||||
public const string Permanent = "permanent";
|
||||
}
|
||||
|
||||
/// <summary>Default retention periods.</summary>
|
||||
public static class DefaultPeriods
|
||||
{
|
||||
public static readonly TimeSpan Temporary = TimeSpan.FromDays(7);
|
||||
public static readonly TimeSpan Default = TimeSpan.FromDays(30);
|
||||
public static readonly TimeSpan LongTerm = TimeSpan.FromDays(365);
|
||||
public static readonly TimeSpan ArchiveDelay = TimeSpan.FromDays(90);
|
||||
}
|
||||
|
||||
/// <summary>Creates a default retention policy.</summary>
|
||||
public static ExportRetention Default(DateTimeOffset now) => new(
|
||||
PolicyName: PolicyNames.Default,
|
||||
AvailableAt: now,
|
||||
ArchiveAt: now.Add(DefaultPeriods.ArchiveDelay),
|
||||
ExpiresAt: now.Add(DefaultPeriods.Default),
|
||||
ArchivedAt: null,
|
||||
DeletedAt: null,
|
||||
LegalHold: false,
|
||||
LegalHoldReason: null,
|
||||
RequiresRelease: false,
|
||||
ReleasedBy: null,
|
||||
ReleasedAt: null,
|
||||
ExtensionCount: 0,
|
||||
Metadata: null);
|
||||
|
||||
/// <summary>Creates a temporary retention policy.</summary>
|
||||
public static ExportRetention Temporary(DateTimeOffset now) => new(
|
||||
PolicyName: PolicyNames.Temporary,
|
||||
AvailableAt: now,
|
||||
ArchiveAt: null,
|
||||
ExpiresAt: now.Add(DefaultPeriods.Temporary),
|
||||
ArchivedAt: null,
|
||||
DeletedAt: null,
|
||||
LegalHold: false,
|
||||
LegalHoldReason: null,
|
||||
RequiresRelease: false,
|
||||
ReleasedBy: null,
|
||||
ReleasedAt: null,
|
||||
ExtensionCount: 0,
|
||||
Metadata: null);
|
||||
|
||||
/// <summary>Creates a compliance retention policy (requires release).</summary>
|
||||
public static ExportRetention Compliance(DateTimeOffset now, TimeSpan minimumRetention) => new(
|
||||
PolicyName: PolicyNames.Compliance,
|
||||
AvailableAt: now,
|
||||
ArchiveAt: now.Add(DefaultPeriods.ArchiveDelay),
|
||||
ExpiresAt: now.Add(minimumRetention),
|
||||
ArchivedAt: null,
|
||||
DeletedAt: null,
|
||||
LegalHold: false,
|
||||
LegalHoldReason: null,
|
||||
RequiresRelease: true,
|
||||
ReleasedBy: null,
|
||||
ReleasedAt: null,
|
||||
ExtensionCount: 0,
|
||||
Metadata: null);
|
||||
|
||||
/// <summary>Whether the export is expired.</summary>
|
||||
public bool IsExpired => ExpiresAt.HasValue && DateTimeOffset.UtcNow >= ExpiresAt.Value && !LegalHold;
|
||||
|
||||
/// <summary>Whether the export should be archived.</summary>
|
||||
public bool ShouldArchive => ArchiveAt.HasValue && DateTimeOffset.UtcNow >= ArchiveAt.Value && !ArchivedAt.HasValue;
|
||||
|
||||
/// <summary>Whether the export can be deleted.</summary>
|
||||
public bool CanDelete => IsExpired && (!RequiresRelease || ReleasedAt.HasValue) && !LegalHold;
|
||||
|
||||
/// <summary>Extends the retention period.</summary>
|
||||
public ExportRetention ExtendRetention(TimeSpan extension, string? reason = null)
|
||||
{
|
||||
var metadata = Metadata is null
|
||||
? new Dictionary<string, string>()
|
||||
: new Dictionary<string, string>(Metadata);
|
||||
|
||||
metadata[$"extension_{ExtensionCount + 1}_at"] = DateTimeOffset.UtcNow.ToString("o");
|
||||
if (reason is not null)
|
||||
metadata[$"extension_{ExtensionCount + 1}_reason"] = reason;
|
||||
|
||||
return this with
|
||||
{
|
||||
ExpiresAt = (ExpiresAt ?? DateTimeOffset.UtcNow).Add(extension),
|
||||
ArchiveAt = ArchiveAt?.Add(extension),
|
||||
ExtensionCount = ExtensionCount + 1,
|
||||
Metadata = metadata
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>Places a legal hold on the export.</summary>
|
||||
public ExportRetention PlaceLegalHold(string reason) => this with
|
||||
{
|
||||
LegalHold = true,
|
||||
LegalHoldReason = reason
|
||||
};
|
||||
|
||||
/// <summary>Releases a legal hold.</summary>
|
||||
public ExportRetention ReleaseLegalHold() => this with
|
||||
{
|
||||
LegalHold = false,
|
||||
LegalHoldReason = null
|
||||
};
|
||||
|
||||
/// <summary>Releases the export for deletion.</summary>
|
||||
public ExportRetention Release(string releasedBy) => this with
|
||||
{
|
||||
ReleasedBy = releasedBy,
|
||||
ReleasedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
/// <summary>Marks the export as archived.</summary>
|
||||
public ExportRetention MarkArchived() => this with
|
||||
{
|
||||
ArchivedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
/// <summary>Marks the export as deleted.</summary>
|
||||
public ExportRetention MarkDeleted() => this with
|
||||
{
|
||||
DeletedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
/// <summary>Serializes retention to JSON.</summary>
|
||||
public string ToJson() => JsonSerializer.Serialize(this, JsonOptions);
|
||||
|
||||
/// <summary>Deserializes retention from JSON.</summary>
|
||||
public static ExportRetention? FromJson(string json)
|
||||
{
|
||||
try
|
||||
{
|
||||
return JsonSerializer.Deserialize<ExportRetention>(json, JsonOptions);
|
||||
}
|
||||
catch (JsonException)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private static readonly JsonSerializerOptions JsonOptions = new()
|
||||
{
|
||||
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
|
||||
WriteIndented = false
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Complete export job state for streaming updates.
|
||||
/// </summary>
|
||||
public sealed record ExportJobState(
|
||||
/// <summary>Job ID.</summary>
|
||||
Guid JobId,
|
||||
|
||||
/// <summary>Export type.</summary>
|
||||
string ExportType,
|
||||
|
||||
/// <summary>Current status.</summary>
|
||||
string Status,
|
||||
|
||||
/// <summary>Current progress.</summary>
|
||||
ExportJobProgress? Progress,
|
||||
|
||||
/// <summary>Job result (when complete).</summary>
|
||||
ExportJobResult? Result,
|
||||
|
||||
/// <summary>Distribution metadata (when complete).</summary>
|
||||
ExportDistribution? Distribution,
|
||||
|
||||
/// <summary>Retention policy.</summary>
|
||||
ExportRetention? Retention,
|
||||
|
||||
/// <summary>Error message (when failed).</summary>
|
||||
string? Error,
|
||||
|
||||
/// <summary>State timestamp.</summary>
|
||||
DateTimeOffset Timestamp)
|
||||
{
|
||||
/// <summary>Serializes state to JSON.</summary>
|
||||
public string ToJson() => JsonSerializer.Serialize(this, JsonOptions);
|
||||
|
||||
private static readonly JsonSerializerOptions JsonOptions = new()
|
||||
{
|
||||
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
|
||||
WriteIndented = false
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,173 @@
|
||||
namespace StellaOps.Orchestrator.Core.Domain.Export;
|
||||
|
||||
/// <summary>
|
||||
/// Default policy settings for export jobs.
|
||||
/// These values are used when creating export job quotas and rate limits.
|
||||
/// </summary>
|
||||
public static class ExportJobPolicy
|
||||
{
|
||||
/// <summary>
|
||||
/// Default quota settings for export jobs.
|
||||
/// Export jobs are typically I/O bound and should be limited to prevent resource exhaustion.
|
||||
/// </summary>
|
||||
public static class QuotaDefaults
|
||||
{
|
||||
/// <summary>Maximum concurrent export jobs per tenant.</summary>
|
||||
public const int MaxActive = 5;
|
||||
|
||||
/// <summary>Maximum export jobs per hour per tenant.</summary>
|
||||
public const int MaxPerHour = 50;
|
||||
|
||||
/// <summary>Token bucket burst capacity.</summary>
|
||||
public const int BurstCapacity = 10;
|
||||
|
||||
/// <summary>Token refill rate (tokens per second).</summary>
|
||||
public const double RefillRate = 0.5;
|
||||
|
||||
/// <summary>Default priority for export jobs (lower than scan jobs).</summary>
|
||||
public const int DefaultPriority = -10;
|
||||
|
||||
/// <summary>Maximum retry attempts for export jobs.</summary>
|
||||
public const int MaxAttempts = 3;
|
||||
|
||||
/// <summary>Default lease duration in seconds.</summary>
|
||||
public const int DefaultLeaseSeconds = 600; // 10 minutes
|
||||
|
||||
/// <summary>Maximum lease duration in seconds.</summary>
|
||||
public const int MaxLeaseSeconds = 3600; // 1 hour
|
||||
|
||||
/// <summary>Heartbeat interval recommendation in seconds.</summary>
|
||||
public const int RecommendedHeartbeatInterval = 60;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Rate limiting settings for export jobs by type.
|
||||
/// Different export types may have different resource requirements.
|
||||
/// </summary>
|
||||
public static class RateLimits
|
||||
{
|
||||
/// <summary>Ledger export: moderate rate (database-heavy).</summary>
|
||||
public static readonly ExportRateLimit Ledger = new(
|
||||
MaxConcurrent: 3,
|
||||
MaxPerHour: 30,
|
||||
EstimatedDurationSeconds: 120);
|
||||
|
||||
/// <summary>SBOM export: higher rate (typically smaller datasets).</summary>
|
||||
public static readonly ExportRateLimit Sbom = new(
|
||||
MaxConcurrent: 5,
|
||||
MaxPerHour: 100,
|
||||
EstimatedDurationSeconds: 30);
|
||||
|
||||
/// <summary>VEX export: similar to SBOM.</summary>
|
||||
public static readonly ExportRateLimit Vex = new(
|
||||
MaxConcurrent: 5,
|
||||
MaxPerHour: 100,
|
||||
EstimatedDurationSeconds: 30);
|
||||
|
||||
/// <summary>Scan results export: moderate rate.</summary>
|
||||
public static readonly ExportRateLimit ScanResults = new(
|
||||
MaxConcurrent: 3,
|
||||
MaxPerHour: 50,
|
||||
EstimatedDurationSeconds: 60);
|
||||
|
||||
/// <summary>Policy evaluation export: moderate rate.</summary>
|
||||
public static readonly ExportRateLimit PolicyEvaluation = new(
|
||||
MaxConcurrent: 3,
|
||||
MaxPerHour: 50,
|
||||
EstimatedDurationSeconds: 60);
|
||||
|
||||
/// <summary>Attestation export: lower rate (cryptographic operations).</summary>
|
||||
public static readonly ExportRateLimit Attestation = new(
|
||||
MaxConcurrent: 2,
|
||||
MaxPerHour: 20,
|
||||
EstimatedDurationSeconds: 180);
|
||||
|
||||
/// <summary>Portable bundle export: lowest rate (large bundles).</summary>
|
||||
public static readonly ExportRateLimit PortableBundle = new(
|
||||
MaxConcurrent: 1,
|
||||
MaxPerHour: 10,
|
||||
EstimatedDurationSeconds: 600);
|
||||
|
||||
/// <summary>Gets rate limit for a specific export type.</summary>
|
||||
public static ExportRateLimit GetForJobType(string jobType) => jobType switch
|
||||
{
|
||||
ExportJobTypes.Ledger => Ledger,
|
||||
ExportJobTypes.Sbom => Sbom,
|
||||
ExportJobTypes.Vex => Vex,
|
||||
ExportJobTypes.ScanResults => ScanResults,
|
||||
ExportJobTypes.PolicyEvaluation => PolicyEvaluation,
|
||||
ExportJobTypes.Attestation => Attestation,
|
||||
ExportJobTypes.PortableBundle => PortableBundle,
|
||||
_ => new ExportRateLimit(MaxConcurrent: 3, MaxPerHour: 30, EstimatedDurationSeconds: 120)
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Timeout settings for export jobs.
|
||||
/// </summary>
|
||||
public static class Timeouts
|
||||
{
|
||||
/// <summary>Maximum time for an export job before it's considered stale.</summary>
|
||||
public static readonly TimeSpan MaxJobDuration = TimeSpan.FromHours(2);
|
||||
|
||||
/// <summary>Maximum time to wait for a heartbeat before reclaiming.</summary>
|
||||
public static readonly TimeSpan HeartbeatTimeout = TimeSpan.FromMinutes(5);
|
||||
|
||||
/// <summary>Backoff delay after failure before retry.</summary>
|
||||
public static readonly TimeSpan RetryBackoff = TimeSpan.FromMinutes(1);
|
||||
|
||||
/// <summary>Maximum backoff delay for exponential retry.</summary>
|
||||
public static readonly TimeSpan MaxRetryBackoff = TimeSpan.FromMinutes(30);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a default quota for export jobs.
|
||||
/// </summary>
|
||||
public static Quota CreateDefaultQuota(
|
||||
string tenantId,
|
||||
string? jobType = null,
|
||||
string createdBy = "system")
|
||||
{
|
||||
var rateLimit = jobType is not null && ExportJobTypes.IsExportJob(jobType)
|
||||
? RateLimits.GetForJobType(jobType)
|
||||
: new ExportRateLimit(
|
||||
QuotaDefaults.MaxActive,
|
||||
QuotaDefaults.MaxPerHour,
|
||||
QuotaDefaults.DefaultLeaseSeconds);
|
||||
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
|
||||
return new Quota(
|
||||
QuotaId: Guid.NewGuid(),
|
||||
TenantId: tenantId,
|
||||
JobType: jobType,
|
||||
MaxActive: rateLimit.MaxConcurrent,
|
||||
MaxPerHour: rateLimit.MaxPerHour,
|
||||
BurstCapacity: QuotaDefaults.BurstCapacity,
|
||||
RefillRate: QuotaDefaults.RefillRate,
|
||||
CurrentTokens: QuotaDefaults.BurstCapacity,
|
||||
LastRefillAt: now,
|
||||
CurrentActive: 0,
|
||||
CurrentHourCount: 0,
|
||||
CurrentHourStart: now,
|
||||
Paused: false,
|
||||
PauseReason: null,
|
||||
QuotaTicket: null,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
UpdatedBy: createdBy);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Rate limit configuration for an export type.
|
||||
/// </summary>
|
||||
public sealed record ExportRateLimit(
|
||||
/// <summary>Maximum concurrent jobs of this type.</summary>
|
||||
int MaxConcurrent,
|
||||
|
||||
/// <summary>Maximum jobs per hour.</summary>
|
||||
int MaxPerHour,
|
||||
|
||||
/// <summary>Estimated duration in seconds (for scheduling hints).</summary>
|
||||
int EstimatedDurationSeconds);
|
||||
@@ -0,0 +1,61 @@
|
||||
namespace StellaOps.Orchestrator.Core.Domain.Export;
|
||||
|
||||
/// <summary>
|
||||
/// Standard export job type identifiers.
|
||||
/// Export jobs follow the pattern "export.{target}" where target is the export destination/format.
|
||||
/// </summary>
|
||||
public static class ExportJobTypes
|
||||
{
|
||||
/// <summary>Job type prefix for all export jobs.</summary>
|
||||
public const string Prefix = "export.";
|
||||
|
||||
/// <summary>Run ledger export (audit trail, immutable snapshots).</summary>
|
||||
public const string Ledger = "export.ledger";
|
||||
|
||||
/// <summary>SBOM export (SPDX, CycloneDX formats).</summary>
|
||||
public const string Sbom = "export.sbom";
|
||||
|
||||
/// <summary>VEX document export.</summary>
|
||||
public const string Vex = "export.vex";
|
||||
|
||||
/// <summary>Scan results export.</summary>
|
||||
public const string ScanResults = "export.scan-results";
|
||||
|
||||
/// <summary>Policy evaluation export.</summary>
|
||||
public const string PolicyEvaluation = "export.policy-evaluation";
|
||||
|
||||
/// <summary>Attestation bundle export.</summary>
|
||||
public const string Attestation = "export.attestation";
|
||||
|
||||
/// <summary>Portable evidence bundle export (for air-gap transfer).</summary>
|
||||
public const string PortableBundle = "export.portable-bundle";
|
||||
|
||||
/// <summary>All known export job types.</summary>
|
||||
public static readonly IReadOnlyList<string> All =
|
||||
[
|
||||
Ledger,
|
||||
Sbom,
|
||||
Vex,
|
||||
ScanResults,
|
||||
PolicyEvaluation,
|
||||
Attestation,
|
||||
PortableBundle
|
||||
];
|
||||
|
||||
/// <summary>Checks if a job type is an export job.</summary>
|
||||
public static bool IsExportJob(string? jobType) =>
|
||||
jobType is not null && jobType.StartsWith(Prefix, StringComparison.OrdinalIgnoreCase);
|
||||
|
||||
/// <summary>Gets the export target from a job type (e.g., "ledger" from "export.ledger").</summary>
|
||||
public static string? GetExportTarget(string? jobType)
|
||||
{
|
||||
if (!IsExportJob(jobType))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
return jobType!.Length > Prefix.Length
|
||||
? jobType[Prefix.Length..]
|
||||
: null;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,537 @@
|
||||
using System.Text.Json;
|
||||
|
||||
namespace StellaOps.Orchestrator.Core.Domain.Export;
|
||||
|
||||
/// <summary>
|
||||
/// Represents a scheduled export configuration.
|
||||
/// Exports can be scheduled to run on a cron pattern.
|
||||
/// </summary>
|
||||
public sealed record ExportSchedule(
|
||||
/// <summary>Schedule ID.</summary>
|
||||
Guid ScheduleId,
|
||||
|
||||
/// <summary>Tenant ID.</summary>
|
||||
string TenantId,
|
||||
|
||||
/// <summary>Schedule name for identification.</summary>
|
||||
string Name,
|
||||
|
||||
/// <summary>Schedule description.</summary>
|
||||
string? Description,
|
||||
|
||||
/// <summary>Export type to execute.</summary>
|
||||
string ExportType,
|
||||
|
||||
/// <summary>Cron expression for scheduling (5 or 6 fields).</summary>
|
||||
string CronExpression,
|
||||
|
||||
/// <summary>Timezone for cron evaluation (IANA format).</summary>
|
||||
string Timezone,
|
||||
|
||||
/// <summary>Whether the schedule is enabled.</summary>
|
||||
bool Enabled,
|
||||
|
||||
/// <summary>Export payload template.</summary>
|
||||
ExportJobPayload PayloadTemplate,
|
||||
|
||||
/// <summary>Retention policy to apply to generated exports.</summary>
|
||||
string RetentionPolicy,
|
||||
|
||||
/// <summary>Project ID filter (optional).</summary>
|
||||
string? ProjectId,
|
||||
|
||||
/// <summary>Maximum concurrent exports from this schedule.</summary>
|
||||
int MaxConcurrent,
|
||||
|
||||
/// <summary>Whether to skip if previous run is still executing.</summary>
|
||||
bool SkipIfRunning,
|
||||
|
||||
/// <summary>Last successful run timestamp.</summary>
|
||||
DateTimeOffset? LastRunAt,
|
||||
|
||||
/// <summary>Last run job ID.</summary>
|
||||
Guid? LastJobId,
|
||||
|
||||
/// <summary>Last run status.</summary>
|
||||
string? LastRunStatus,
|
||||
|
||||
/// <summary>Next scheduled run time.</summary>
|
||||
DateTimeOffset? NextRunAt,
|
||||
|
||||
/// <summary>Total runs executed.</summary>
|
||||
long TotalRuns,
|
||||
|
||||
/// <summary>Successful runs count.</summary>
|
||||
long SuccessfulRuns,
|
||||
|
||||
/// <summary>Failed runs count.</summary>
|
||||
long FailedRuns,
|
||||
|
||||
/// <summary>Created timestamp.</summary>
|
||||
DateTimeOffset CreatedAt,
|
||||
|
||||
/// <summary>Last updated timestamp.</summary>
|
||||
DateTimeOffset UpdatedAt,
|
||||
|
||||
/// <summary>Created by user.</summary>
|
||||
string CreatedBy,
|
||||
|
||||
/// <summary>Last updated by user.</summary>
|
||||
string UpdatedBy)
|
||||
{
|
||||
/// <summary>Creates a new export schedule.</summary>
|
||||
public static ExportSchedule Create(
|
||||
string tenantId,
|
||||
string name,
|
||||
string exportType,
|
||||
string cronExpression,
|
||||
ExportJobPayload payloadTemplate,
|
||||
string createdBy,
|
||||
string? description = null,
|
||||
string timezone = "UTC",
|
||||
string retentionPolicy = "default",
|
||||
string? projectId = null,
|
||||
int maxConcurrent = 1,
|
||||
bool skipIfRunning = true)
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
|
||||
return new ExportSchedule(
|
||||
ScheduleId: Guid.NewGuid(),
|
||||
TenantId: tenantId,
|
||||
Name: name,
|
||||
Description: description,
|
||||
ExportType: exportType,
|
||||
CronExpression: cronExpression,
|
||||
Timezone: timezone,
|
||||
Enabled: true,
|
||||
PayloadTemplate: payloadTemplate,
|
||||
RetentionPolicy: retentionPolicy,
|
||||
ProjectId: projectId,
|
||||
MaxConcurrent: maxConcurrent,
|
||||
SkipIfRunning: skipIfRunning,
|
||||
LastRunAt: null,
|
||||
LastJobId: null,
|
||||
LastRunStatus: null,
|
||||
NextRunAt: null,
|
||||
TotalRuns: 0,
|
||||
SuccessfulRuns: 0,
|
||||
FailedRuns: 0,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
CreatedBy: createdBy,
|
||||
UpdatedBy: createdBy);
|
||||
}
|
||||
|
||||
/// <summary>Success rate as percentage (0-100).</summary>
|
||||
public double SuccessRate => TotalRuns > 0
|
||||
? 100.0 * SuccessfulRuns / TotalRuns
|
||||
: 0;
|
||||
|
||||
/// <summary>Enables the schedule.</summary>
|
||||
public ExportSchedule Enable() => this with
|
||||
{
|
||||
Enabled = true,
|
||||
UpdatedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
/// <summary>Disables the schedule.</summary>
|
||||
public ExportSchedule Disable() => this with
|
||||
{
|
||||
Enabled = false,
|
||||
UpdatedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
/// <summary>Records a successful run.</summary>
|
||||
public ExportSchedule RecordSuccess(Guid jobId, DateTimeOffset? nextRun = null) => this with
|
||||
{
|
||||
LastRunAt = DateTimeOffset.UtcNow,
|
||||
LastJobId = jobId,
|
||||
LastRunStatus = "completed",
|
||||
NextRunAt = nextRun,
|
||||
TotalRuns = TotalRuns + 1,
|
||||
SuccessfulRuns = SuccessfulRuns + 1,
|
||||
UpdatedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
/// <summary>Records a failed run.</summary>
|
||||
public ExportSchedule RecordFailure(Guid jobId, string? reason = null, DateTimeOffset? nextRun = null) => this with
|
||||
{
|
||||
LastRunAt = DateTimeOffset.UtcNow,
|
||||
LastJobId = jobId,
|
||||
LastRunStatus = $"failed: {reason ?? "unknown"}",
|
||||
NextRunAt = nextRun,
|
||||
TotalRuns = TotalRuns + 1,
|
||||
FailedRuns = FailedRuns + 1,
|
||||
UpdatedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
/// <summary>Updates the next run time.</summary>
|
||||
public ExportSchedule WithNextRun(DateTimeOffset nextRun) => this with
|
||||
{
|
||||
NextRunAt = nextRun,
|
||||
UpdatedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
/// <summary>Updates the cron expression.</summary>
|
||||
public ExportSchedule WithCron(string cronExpression, string updatedBy) => this with
|
||||
{
|
||||
CronExpression = cronExpression,
|
||||
UpdatedAt = DateTimeOffset.UtcNow,
|
||||
UpdatedBy = updatedBy
|
||||
};
|
||||
|
||||
/// <summary>Updates the payload template.</summary>
|
||||
public ExportSchedule WithPayload(ExportJobPayload payload, string updatedBy) => this with
|
||||
{
|
||||
PayloadTemplate = payload,
|
||||
UpdatedAt = DateTimeOffset.UtcNow,
|
||||
UpdatedBy = updatedBy
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Configuration for retention pruning.
|
||||
/// </summary>
|
||||
public sealed record RetentionPruneConfig(
|
||||
/// <summary>Pruning job ID.</summary>
|
||||
Guid PruneId,
|
||||
|
||||
/// <summary>Tenant ID (null for global).</summary>
|
||||
string? TenantId,
|
||||
|
||||
/// <summary>Export type filter (null for all).</summary>
|
||||
string? ExportType,
|
||||
|
||||
/// <summary>Whether pruning is enabled.</summary>
|
||||
bool Enabled,
|
||||
|
||||
/// <summary>Cron expression for prune schedule.</summary>
|
||||
string CronExpression,
|
||||
|
||||
/// <summary>Maximum exports to prune per run.</summary>
|
||||
int BatchSize,
|
||||
|
||||
/// <summary>Whether to archive before deleting.</summary>
|
||||
bool ArchiveBeforeDelete,
|
||||
|
||||
/// <summary>Archive storage provider.</summary>
|
||||
string? ArchiveProvider,
|
||||
|
||||
/// <summary>Whether to notify on prune completion.</summary>
|
||||
bool NotifyOnComplete,
|
||||
|
||||
/// <summary>Notification channel for alerts.</summary>
|
||||
string? NotificationChannel,
|
||||
|
||||
/// <summary>Last prune timestamp.</summary>
|
||||
DateTimeOffset? LastPruneAt,
|
||||
|
||||
/// <summary>Exports pruned in last run.</summary>
|
||||
int LastPruneCount,
|
||||
|
||||
/// <summary>Total exports pruned.</summary>
|
||||
long TotalPruned,
|
||||
|
||||
/// <summary>Created timestamp.</summary>
|
||||
DateTimeOffset CreatedAt,
|
||||
|
||||
/// <summary>Updated timestamp.</summary>
|
||||
DateTimeOffset UpdatedAt)
|
||||
{
|
||||
/// <summary>Default batch size for pruning.</summary>
|
||||
public const int DefaultBatchSize = 100;
|
||||
|
||||
/// <summary>Default cron expression (daily at 2 AM).</summary>
|
||||
public const string DefaultCronExpression = "0 2 * * *";
|
||||
|
||||
/// <summary>Creates a default prune configuration.</summary>
|
||||
public static RetentionPruneConfig Create(
|
||||
string? tenantId = null,
|
||||
string? exportType = null,
|
||||
string? cronExpression = null,
|
||||
int batchSize = DefaultBatchSize)
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
|
||||
return new RetentionPruneConfig(
|
||||
PruneId: Guid.NewGuid(),
|
||||
TenantId: tenantId,
|
||||
ExportType: exportType,
|
||||
Enabled: true,
|
||||
CronExpression: cronExpression ?? DefaultCronExpression,
|
||||
BatchSize: batchSize,
|
||||
ArchiveBeforeDelete: true,
|
||||
ArchiveProvider: null,
|
||||
NotifyOnComplete: false,
|
||||
NotificationChannel: null,
|
||||
LastPruneAt: null,
|
||||
LastPruneCount: 0,
|
||||
TotalPruned: 0,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now);
|
||||
}
|
||||
|
||||
/// <summary>Records a prune operation.</summary>
|
||||
public RetentionPruneConfig RecordPrune(int count) => this with
|
||||
{
|
||||
LastPruneAt = DateTimeOffset.UtcNow,
|
||||
LastPruneCount = count,
|
||||
TotalPruned = TotalPruned + count,
|
||||
UpdatedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Export failure alert configuration.
|
||||
/// </summary>
|
||||
public sealed record ExportAlertConfig(
|
||||
/// <summary>Alert configuration ID.</summary>
|
||||
Guid AlertConfigId,
|
||||
|
||||
/// <summary>Tenant ID.</summary>
|
||||
string TenantId,
|
||||
|
||||
/// <summary>Alert name.</summary>
|
||||
string Name,
|
||||
|
||||
/// <summary>Export type filter (null for all).</summary>
|
||||
string? ExportType,
|
||||
|
||||
/// <summary>Whether alerting is enabled.</summary>
|
||||
bool Enabled,
|
||||
|
||||
/// <summary>Minimum consecutive failures to trigger.</summary>
|
||||
int ConsecutiveFailuresThreshold,
|
||||
|
||||
/// <summary>Failure rate threshold (0-100).</summary>
|
||||
double FailureRateThreshold,
|
||||
|
||||
/// <summary>Time window for failure rate calculation.</summary>
|
||||
TimeSpan FailureRateWindow,
|
||||
|
||||
/// <summary>Alert severity.</summary>
|
||||
ExportAlertSeverity Severity,
|
||||
|
||||
/// <summary>Notification channels (comma-separated).</summary>
|
||||
string NotificationChannels,
|
||||
|
||||
/// <summary>Alert cooldown period.</summary>
|
||||
TimeSpan Cooldown,
|
||||
|
||||
/// <summary>Last alert timestamp.</summary>
|
||||
DateTimeOffset? LastAlertAt,
|
||||
|
||||
/// <summary>Total alerts triggered.</summary>
|
||||
long TotalAlerts,
|
||||
|
||||
/// <summary>Created timestamp.</summary>
|
||||
DateTimeOffset CreatedAt,
|
||||
|
||||
/// <summary>Updated timestamp.</summary>
|
||||
DateTimeOffset UpdatedAt)
|
||||
{
|
||||
/// <summary>Creates a default alert configuration.</summary>
|
||||
public static ExportAlertConfig Create(
|
||||
string tenantId,
|
||||
string name,
|
||||
string? exportType = null,
|
||||
int consecutiveFailuresThreshold = 3,
|
||||
double failureRateThreshold = 50.0,
|
||||
ExportAlertSeverity severity = ExportAlertSeverity.Warning)
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
|
||||
return new ExportAlertConfig(
|
||||
AlertConfigId: Guid.NewGuid(),
|
||||
TenantId: tenantId,
|
||||
Name: name,
|
||||
ExportType: exportType,
|
||||
Enabled: true,
|
||||
ConsecutiveFailuresThreshold: consecutiveFailuresThreshold,
|
||||
FailureRateThreshold: failureRateThreshold,
|
||||
FailureRateWindow: TimeSpan.FromHours(1),
|
||||
Severity: severity,
|
||||
NotificationChannels: "email",
|
||||
Cooldown: TimeSpan.FromMinutes(15),
|
||||
LastAlertAt: null,
|
||||
TotalAlerts: 0,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now);
|
||||
}
|
||||
|
||||
/// <summary>Whether an alert can be triggered (respects cooldown).</summary>
|
||||
public bool CanAlert => !LastAlertAt.HasValue ||
|
||||
DateTimeOffset.UtcNow >= LastAlertAt.Value.Add(Cooldown);
|
||||
|
||||
/// <summary>Records an alert.</summary>
|
||||
public ExportAlertConfig RecordAlert() => this with
|
||||
{
|
||||
LastAlertAt = DateTimeOffset.UtcNow,
|
||||
TotalAlerts = TotalAlerts + 1,
|
||||
UpdatedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Export alert severity levels.
|
||||
/// </summary>
|
||||
public enum ExportAlertSeverity
|
||||
{
|
||||
/// <summary>Informational.</summary>
|
||||
Info = 0,
|
||||
|
||||
/// <summary>Warning - attention needed.</summary>
|
||||
Warning = 1,
|
||||
|
||||
/// <summary>Error - action required.</summary>
|
||||
Error = 2,
|
||||
|
||||
/// <summary>Critical - immediate action.</summary>
|
||||
Critical = 3
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Export failure alert instance.
|
||||
/// </summary>
|
||||
public sealed record ExportAlert(
|
||||
/// <summary>Alert ID.</summary>
|
||||
Guid AlertId,
|
||||
|
||||
/// <summary>Alert configuration ID.</summary>
|
||||
Guid AlertConfigId,
|
||||
|
||||
/// <summary>Tenant ID.</summary>
|
||||
string TenantId,
|
||||
|
||||
/// <summary>Export type.</summary>
|
||||
string ExportType,
|
||||
|
||||
/// <summary>Alert severity.</summary>
|
||||
ExportAlertSeverity Severity,
|
||||
|
||||
/// <summary>Alert message.</summary>
|
||||
string Message,
|
||||
|
||||
/// <summary>Failed job IDs.</summary>
|
||||
IReadOnlyList<Guid> FailedJobIds,
|
||||
|
||||
/// <summary>Consecutive failure count.</summary>
|
||||
int ConsecutiveFailures,
|
||||
|
||||
/// <summary>Current failure rate.</summary>
|
||||
double FailureRate,
|
||||
|
||||
/// <summary>Alert timestamp.</summary>
|
||||
DateTimeOffset TriggeredAt,
|
||||
|
||||
/// <summary>Acknowledged timestamp.</summary>
|
||||
DateTimeOffset? AcknowledgedAt,
|
||||
|
||||
/// <summary>Acknowledged by user.</summary>
|
||||
string? AcknowledgedBy,
|
||||
|
||||
/// <summary>Resolved timestamp.</summary>
|
||||
DateTimeOffset? ResolvedAt,
|
||||
|
||||
/// <summary>Resolution notes.</summary>
|
||||
string? ResolutionNotes)
|
||||
{
|
||||
/// <summary>Creates a new alert for consecutive failures.</summary>
|
||||
public static ExportAlert CreateForConsecutiveFailures(
|
||||
Guid alertConfigId,
|
||||
string tenantId,
|
||||
string exportType,
|
||||
ExportAlertSeverity severity,
|
||||
IReadOnlyList<Guid> failedJobIds,
|
||||
int consecutiveFailures)
|
||||
{
|
||||
return new ExportAlert(
|
||||
AlertId: Guid.NewGuid(),
|
||||
AlertConfigId: alertConfigId,
|
||||
TenantId: tenantId,
|
||||
ExportType: exportType,
|
||||
Severity: severity,
|
||||
Message: $"Export job {exportType} has failed {consecutiveFailures} consecutive times",
|
||||
FailedJobIds: failedJobIds,
|
||||
ConsecutiveFailures: consecutiveFailures,
|
||||
FailureRate: 0,
|
||||
TriggeredAt: DateTimeOffset.UtcNow,
|
||||
AcknowledgedAt: null,
|
||||
AcknowledgedBy: null,
|
||||
ResolvedAt: null,
|
||||
ResolutionNotes: null);
|
||||
}
|
||||
|
||||
/// <summary>Creates a new alert for high failure rate.</summary>
|
||||
public static ExportAlert CreateForHighFailureRate(
|
||||
Guid alertConfigId,
|
||||
string tenantId,
|
||||
string exportType,
|
||||
ExportAlertSeverity severity,
|
||||
double failureRate,
|
||||
IReadOnlyList<Guid> recentFailedJobIds)
|
||||
{
|
||||
return new ExportAlert(
|
||||
AlertId: Guid.NewGuid(),
|
||||
AlertConfigId: alertConfigId,
|
||||
TenantId: tenantId,
|
||||
ExportType: exportType,
|
||||
Severity: severity,
|
||||
Message: $"Export job {exportType} failure rate is {failureRate:F1}%",
|
||||
FailedJobIds: recentFailedJobIds,
|
||||
ConsecutiveFailures: 0,
|
||||
FailureRate: failureRate,
|
||||
TriggeredAt: DateTimeOffset.UtcNow,
|
||||
AcknowledgedAt: null,
|
||||
AcknowledgedBy: null,
|
||||
ResolvedAt: null,
|
||||
ResolutionNotes: null);
|
||||
}
|
||||
|
||||
/// <summary>Acknowledges the alert.</summary>
|
||||
public ExportAlert Acknowledge(string acknowledgedBy) => this with
|
||||
{
|
||||
AcknowledgedAt = DateTimeOffset.UtcNow,
|
||||
AcknowledgedBy = acknowledgedBy
|
||||
};
|
||||
|
||||
/// <summary>Resolves the alert.</summary>
|
||||
public ExportAlert Resolve(string? notes = null) => this with
|
||||
{
|
||||
ResolvedAt = DateTimeOffset.UtcNow,
|
||||
ResolutionNotes = notes
|
||||
};
|
||||
|
||||
/// <summary>Whether the alert is active (not resolved).</summary>
|
||||
public bool IsActive => ResolvedAt is null;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Result of a retention prune operation.
|
||||
/// </summary>
|
||||
public sealed record RetentionPruneResult(
|
||||
/// <summary>Number of exports archived.</summary>
|
||||
int ArchivedCount,
|
||||
|
||||
/// <summary>Number of exports deleted.</summary>
|
||||
int DeletedCount,
|
||||
|
||||
/// <summary>Number of exports skipped (legal hold, etc.).</summary>
|
||||
int SkippedCount,
|
||||
|
||||
/// <summary>Errors encountered.</summary>
|
||||
IReadOnlyList<string> Errors,
|
||||
|
||||
/// <summary>Duration of prune operation.</summary>
|
||||
TimeSpan Duration)
|
||||
{
|
||||
/// <summary>Total exports processed.</summary>
|
||||
public int TotalProcessed => ArchivedCount + DeletedCount + SkippedCount;
|
||||
|
||||
/// <summary>Whether any errors occurred.</summary>
|
||||
public bool HasErrors => Errors.Count > 0;
|
||||
|
||||
/// <summary>Empty result.</summary>
|
||||
public static RetentionPruneResult Empty => new(0, 0, 0, [], TimeSpan.Zero);
|
||||
}
|
||||
@@ -0,0 +1,180 @@
|
||||
namespace StellaOps.Orchestrator.Core.Domain;
|
||||
|
||||
/// <summary>
|
||||
/// Represents an Authority pack execution.
|
||||
/// Pack runs execute policy automation scripts with log collection and artifact production.
|
||||
/// </summary>
|
||||
public sealed record PackRun(
|
||||
/// <summary>Unique pack run identifier.</summary>
|
||||
Guid PackRunId,
|
||||
|
||||
/// <summary>Tenant owning this pack run.</summary>
|
||||
string TenantId,
|
||||
|
||||
/// <summary>Optional project scope within tenant.</summary>
|
||||
string? ProjectId,
|
||||
|
||||
/// <summary>Authority pack ID being executed.</summary>
|
||||
string PackId,
|
||||
|
||||
/// <summary>Pack version (e.g., "1.2.3", "latest").</summary>
|
||||
string PackVersion,
|
||||
|
||||
/// <summary>Current pack run status.</summary>
|
||||
PackRunStatus Status,
|
||||
|
||||
/// <summary>Priority (higher = more urgent). Default 0.</summary>
|
||||
int Priority,
|
||||
|
||||
/// <summary>Current attempt number (1-based).</summary>
|
||||
int Attempt,
|
||||
|
||||
/// <summary>Maximum retry attempts.</summary>
|
||||
int MaxAttempts,
|
||||
|
||||
/// <summary>Pack input parameters JSON.</summary>
|
||||
string Parameters,
|
||||
|
||||
/// <summary>SHA-256 digest of the parameters for determinism verification.</summary>
|
||||
string ParametersDigest,
|
||||
|
||||
/// <summary>Idempotency key for deduplication.</summary>
|
||||
string IdempotencyKey,
|
||||
|
||||
/// <summary>Correlation ID for distributed tracing.</summary>
|
||||
string? CorrelationId,
|
||||
|
||||
/// <summary>Current lease ID (if leased to a task runner).</summary>
|
||||
Guid? LeaseId,
|
||||
|
||||
/// <summary>Task runner executing this pack run.</summary>
|
||||
string? TaskRunnerId,
|
||||
|
||||
/// <summary>Lease expiration time.</summary>
|
||||
DateTimeOffset? LeaseUntil,
|
||||
|
||||
/// <summary>When the pack run was created.</summary>
|
||||
DateTimeOffset CreatedAt,
|
||||
|
||||
/// <summary>When the pack run was scheduled (quota cleared).</summary>
|
||||
DateTimeOffset? ScheduledAt,
|
||||
|
||||
/// <summary>When the pack run was leased to a task runner.</summary>
|
||||
DateTimeOffset? LeasedAt,
|
||||
|
||||
/// <summary>When the pack run started executing.</summary>
|
||||
DateTimeOffset? StartedAt,
|
||||
|
||||
/// <summary>When the pack run completed (terminal state).</summary>
|
||||
DateTimeOffset? CompletedAt,
|
||||
|
||||
/// <summary>Earliest time the pack run can be scheduled (for backoff).</summary>
|
||||
DateTimeOffset? NotBefore,
|
||||
|
||||
/// <summary>Terminal status reason (failure message, cancel reason, etc.).</summary>
|
||||
string? Reason,
|
||||
|
||||
/// <summary>Exit code from pack execution (null if not completed).</summary>
|
||||
int? ExitCode,
|
||||
|
||||
/// <summary>Duration of pack execution in milliseconds.</summary>
|
||||
long? DurationMs,
|
||||
|
||||
/// <summary>Actor who initiated the pack run.</summary>
|
||||
string CreatedBy,
|
||||
|
||||
/// <summary>Optional metadata JSON blob (e.g., trigger info, source context).</summary>
|
||||
string? Metadata)
|
||||
{
|
||||
/// <summary>
|
||||
/// Creates a new pack run in pending status.
|
||||
/// </summary>
|
||||
public static PackRun Create(
|
||||
Guid packRunId,
|
||||
string tenantId,
|
||||
string? projectId,
|
||||
string packId,
|
||||
string packVersion,
|
||||
string parameters,
|
||||
string parametersDigest,
|
||||
string idempotencyKey,
|
||||
string? correlationId,
|
||||
string createdBy,
|
||||
int priority = 0,
|
||||
int maxAttempts = 3,
|
||||
string? metadata = null,
|
||||
DateTimeOffset? createdAt = null)
|
||||
{
|
||||
return new PackRun(
|
||||
PackRunId: packRunId,
|
||||
TenantId: tenantId,
|
||||
ProjectId: projectId,
|
||||
PackId: packId,
|
||||
PackVersion: packVersion,
|
||||
Status: PackRunStatus.Pending,
|
||||
Priority: priority,
|
||||
Attempt: 1,
|
||||
MaxAttempts: maxAttempts,
|
||||
Parameters: parameters,
|
||||
ParametersDigest: parametersDigest,
|
||||
IdempotencyKey: idempotencyKey,
|
||||
CorrelationId: correlationId,
|
||||
LeaseId: null,
|
||||
TaskRunnerId: null,
|
||||
LeaseUntil: null,
|
||||
CreatedAt: createdAt ?? DateTimeOffset.UtcNow,
|
||||
ScheduledAt: null,
|
||||
LeasedAt: null,
|
||||
StartedAt: null,
|
||||
CompletedAt: null,
|
||||
NotBefore: null,
|
||||
Reason: null,
|
||||
ExitCode: null,
|
||||
DurationMs: null,
|
||||
CreatedBy: createdBy,
|
||||
Metadata: metadata);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Checks if the pack run is in a terminal state.
|
||||
/// </summary>
|
||||
public bool IsTerminal => Status is PackRunStatus.Succeeded or PackRunStatus.Failed or PackRunStatus.Canceled or PackRunStatus.TimedOut;
|
||||
|
||||
/// <summary>
|
||||
/// Checks if the pack run can be retried.
|
||||
/// </summary>
|
||||
public bool CanRetry => Attempt < MaxAttempts && Status == PackRunStatus.Failed;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Pack run lifecycle states.
|
||||
/// Transitions follow the state machine:
|
||||
/// Pending → Scheduled → Leased → Running → (Succeeded | Failed | Canceled | TimedOut)
|
||||
/// Failed pack runs may transition to Pending via retry.
|
||||
/// </summary>
|
||||
public enum PackRunStatus
|
||||
{
|
||||
/// <summary>Pack run created but not yet scheduled (e.g., quota exceeded).</summary>
|
||||
Pending = 0,
|
||||
|
||||
/// <summary>Pack run scheduled and awaiting task runner lease.</summary>
|
||||
Scheduled = 1,
|
||||
|
||||
/// <summary>Pack run leased to a task runner.</summary>
|
||||
Leased = 2,
|
||||
|
||||
/// <summary>Pack run is executing (received start signal from runner).</summary>
|
||||
Running = 3,
|
||||
|
||||
/// <summary>Pack run completed successfully (exit code 0).</summary>
|
||||
Succeeded = 4,
|
||||
|
||||
/// <summary>Pack run failed (non-zero exit or execution error).</summary>
|
||||
Failed = 5,
|
||||
|
||||
/// <summary>Pack run canceled by operator or system.</summary>
|
||||
Canceled = 6,
|
||||
|
||||
/// <summary>Pack run timed out (lease expired without completion).</summary>
|
||||
TimedOut = 7
|
||||
}
|
||||
@@ -0,0 +1,191 @@
|
||||
namespace StellaOps.Orchestrator.Core.Domain;
|
||||
|
||||
/// <summary>
|
||||
/// Represents a log entry from a pack run execution.
|
||||
/// Log entries are append-only and ordered by sequence number within a pack run.
|
||||
/// </summary>
|
||||
public sealed record PackRunLog(
|
||||
/// <summary>Unique log entry identifier.</summary>
|
||||
Guid LogId,
|
||||
|
||||
/// <summary>Tenant owning this log entry.</summary>
|
||||
string TenantId,
|
||||
|
||||
/// <summary>Pack run this log belongs to.</summary>
|
||||
Guid PackRunId,
|
||||
|
||||
/// <summary>Sequence number within the pack run (0-based, monotonically increasing).</summary>
|
||||
long Sequence,
|
||||
|
||||
/// <summary>Log level (info, warn, error, debug, trace).</summary>
|
||||
LogLevel Level,
|
||||
|
||||
/// <summary>Log source (e.g., "stdout", "stderr", "system", "pack").</summary>
|
||||
string Source,
|
||||
|
||||
/// <summary>Log message content.</summary>
|
||||
string Message,
|
||||
|
||||
/// <summary>When the log entry was created.</summary>
|
||||
DateTimeOffset Timestamp,
|
||||
|
||||
/// <summary>Optional structured data JSON (e.g., key-value pairs, metrics).</summary>
|
||||
string? Data)
|
||||
{
|
||||
/// <summary>
|
||||
/// Creates a new log entry.
|
||||
/// </summary>
|
||||
public static PackRunLog Create(
|
||||
Guid packRunId,
|
||||
string tenantId,
|
||||
long sequence,
|
||||
LogLevel level,
|
||||
string source,
|
||||
string message,
|
||||
string? data = null,
|
||||
DateTimeOffset? timestamp = null)
|
||||
{
|
||||
return new PackRunLog(
|
||||
LogId: Guid.NewGuid(),
|
||||
TenantId: tenantId,
|
||||
PackRunId: packRunId,
|
||||
Sequence: sequence,
|
||||
Level: level,
|
||||
Source: source,
|
||||
Message: message,
|
||||
Timestamp: timestamp ?? DateTimeOffset.UtcNow,
|
||||
Data: data);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates an info-level stdout log entry.
|
||||
/// </summary>
|
||||
public static PackRunLog Stdout(
|
||||
Guid packRunId,
|
||||
string tenantId,
|
||||
long sequence,
|
||||
string message,
|
||||
DateTimeOffset? timestamp = null)
|
||||
{
|
||||
return Create(packRunId, tenantId, sequence, LogLevel.Info, "stdout", message, null, timestamp);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a warn-level stderr log entry.
|
||||
/// </summary>
|
||||
public static PackRunLog Stderr(
|
||||
Guid packRunId,
|
||||
string tenantId,
|
||||
long sequence,
|
||||
string message,
|
||||
DateTimeOffset? timestamp = null)
|
||||
{
|
||||
return Create(packRunId, tenantId, sequence, LogLevel.Warn, "stderr", message, null, timestamp);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a system-level log entry (lifecycle events).
|
||||
/// </summary>
|
||||
public static PackRunLog System(
|
||||
Guid packRunId,
|
||||
string tenantId,
|
||||
long sequence,
|
||||
LogLevel level,
|
||||
string message,
|
||||
string? data = null,
|
||||
DateTimeOffset? timestamp = null)
|
||||
{
|
||||
return Create(packRunId, tenantId, sequence, level, "system", message, data, timestamp);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Log levels for pack run logs.
|
||||
/// </summary>
|
||||
public enum LogLevel
|
||||
{
|
||||
/// <summary>Trace-level logging (most verbose).</summary>
|
||||
Trace = 0,
|
||||
|
||||
/// <summary>Debug-level logging.</summary>
|
||||
Debug = 1,
|
||||
|
||||
/// <summary>Informational messages (default for stdout).</summary>
|
||||
Info = 2,
|
||||
|
||||
/// <summary>Warning messages (default for stderr).</summary>
|
||||
Warn = 3,
|
||||
|
||||
/// <summary>Error messages.</summary>
|
||||
Error = 4,
|
||||
|
||||
/// <summary>Fatal/critical errors.</summary>
|
||||
Fatal = 5
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Represents a batch of log entries for efficient streaming/storage.
|
||||
/// </summary>
|
||||
public sealed record PackRunLogBatch(
|
||||
/// <summary>Pack run ID these logs belong to.</summary>
|
||||
Guid PackRunId,
|
||||
|
||||
/// <summary>Tenant owning these logs.</summary>
|
||||
string TenantId,
|
||||
|
||||
/// <summary>Starting sequence number of this batch.</summary>
|
||||
long StartSequence,
|
||||
|
||||
/// <summary>Log entries in this batch.</summary>
|
||||
IReadOnlyList<PackRunLog> Logs)
|
||||
{
|
||||
/// <summary>
|
||||
/// Gets the next expected sequence number after this batch.
|
||||
/// </summary>
|
||||
public long NextSequence => StartSequence + Logs.Count;
|
||||
|
||||
/// <summary>
|
||||
/// Creates a batch from a list of logs.
|
||||
/// </summary>
|
||||
public static PackRunLogBatch FromLogs(Guid packRunId, string tenantId, IReadOnlyList<PackRunLog> logs)
|
||||
{
|
||||
if (logs.Count == 0)
|
||||
return new PackRunLogBatch(packRunId, tenantId, 0, logs);
|
||||
|
||||
return new PackRunLogBatch(packRunId, tenantId, logs[0].Sequence, logs);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Represents a log cursor for resumable streaming.
|
||||
/// </summary>
|
||||
public sealed record PackRunLogCursor(
|
||||
/// <summary>Pack run ID.</summary>
|
||||
Guid PackRunId,
|
||||
|
||||
/// <summary>Last seen sequence number.</summary>
|
||||
long LastSequence,
|
||||
|
||||
/// <summary>Whether we've reached the end of current logs.</summary>
|
||||
bool IsComplete)
|
||||
{
|
||||
/// <summary>
|
||||
/// Creates a cursor starting from the beginning.
|
||||
/// </summary>
|
||||
public static PackRunLogCursor Start(Guid packRunId) => new(packRunId, -1, false);
|
||||
|
||||
/// <summary>
|
||||
/// Creates a cursor for resuming from a specific sequence.
|
||||
/// </summary>
|
||||
public static PackRunLogCursor Resume(Guid packRunId, long lastSequence) => new(packRunId, lastSequence, false);
|
||||
|
||||
/// <summary>
|
||||
/// Creates a completed cursor.
|
||||
/// </summary>
|
||||
public PackRunLogCursor Complete() => this with { IsComplete = true };
|
||||
|
||||
/// <summary>
|
||||
/// Advances the cursor to a new sequence.
|
||||
/// </summary>
|
||||
public PackRunLogCursor Advance(long newSequence) => this with { LastSequence = newSequence };
|
||||
}
|
||||
@@ -0,0 +1,341 @@
|
||||
using StellaOps.Orchestrator.Core.Domain;
|
||||
using StellaOps.Orchestrator.Core.Domain.Export;
|
||||
|
||||
namespace StellaOps.Orchestrator.Core.Services;
|
||||
|
||||
/// <summary>
|
||||
/// Service for managing export jobs.
|
||||
/// Provides high-level operations for creating, scheduling, and tracking export jobs.
|
||||
/// </summary>
|
||||
public interface IExportJobService
|
||||
{
|
||||
/// <summary>Creates a new export job.</summary>
|
||||
Task<Job> CreateExportJobAsync(
|
||||
string tenantId,
|
||||
string exportType,
|
||||
ExportJobPayload payload,
|
||||
string createdBy,
|
||||
string? projectId = null,
|
||||
string? correlationId = null,
|
||||
int? priority = null,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>Gets an export job by ID.</summary>
|
||||
Task<Job?> GetExportJobAsync(
|
||||
string tenantId,
|
||||
Guid jobId,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>Lists export jobs with optional filters.</summary>
|
||||
Task<IReadOnlyList<Job>> ListExportJobsAsync(
|
||||
string tenantId,
|
||||
string? exportType = null,
|
||||
JobStatus? status = null,
|
||||
string? projectId = null,
|
||||
DateTimeOffset? createdAfter = null,
|
||||
DateTimeOffset? createdBefore = null,
|
||||
int limit = 50,
|
||||
int offset = 0,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>Cancels an export job.</summary>
|
||||
Task<bool> CancelExportJobAsync(
|
||||
string tenantId,
|
||||
Guid jobId,
|
||||
string reason,
|
||||
string canceledBy,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>Gets the quota status for export jobs.</summary>
|
||||
Task<ExportQuotaStatus> GetQuotaStatusAsync(
|
||||
string tenantId,
|
||||
string? exportType = null,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>Ensures quota exists for an export type, creating with defaults if needed.</summary>
|
||||
Task<Quota> EnsureQuotaAsync(
|
||||
string tenantId,
|
||||
string exportType,
|
||||
string createdBy,
|
||||
CancellationToken cancellationToken = default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Export quota status information.
|
||||
/// </summary>
|
||||
public sealed record ExportQuotaStatus(
|
||||
/// <summary>Maximum concurrent active jobs.</summary>
|
||||
int MaxActive,
|
||||
|
||||
/// <summary>Current active jobs.</summary>
|
||||
int CurrentActive,
|
||||
|
||||
/// <summary>Maximum jobs per hour.</summary>
|
||||
int MaxPerHour,
|
||||
|
||||
/// <summary>Current hour job count.</summary>
|
||||
int CurrentHourCount,
|
||||
|
||||
/// <summary>Available tokens in bucket.</summary>
|
||||
double AvailableTokens,
|
||||
|
||||
/// <summary>Whether quota is paused.</summary>
|
||||
bool Paused,
|
||||
|
||||
/// <summary>Reason for pause (if paused).</summary>
|
||||
string? PauseReason,
|
||||
|
||||
/// <summary>Whether more jobs can be created.</summary>
|
||||
bool CanCreateJob,
|
||||
|
||||
/// <summary>Estimated wait time if quota exhausted.</summary>
|
||||
TimeSpan? EstimatedWaitTime);
|
||||
|
||||
/// <summary>
|
||||
/// Default implementation of the export job service.
|
||||
/// </summary>
|
||||
public sealed class ExportJobService : IExportJobService
|
||||
{
|
||||
private readonly IJobRepository _jobRepository;
|
||||
private readonly IQuotaRepository _quotaRepository;
|
||||
|
||||
public ExportJobService(
|
||||
IJobRepository jobRepository,
|
||||
IQuotaRepository quotaRepository)
|
||||
{
|
||||
_jobRepository = jobRepository;
|
||||
_quotaRepository = quotaRepository;
|
||||
}
|
||||
|
||||
public async Task<Job> CreateExportJobAsync(
|
||||
string tenantId,
|
||||
string exportType,
|
||||
ExportJobPayload payload,
|
||||
string createdBy,
|
||||
string? projectId = null,
|
||||
string? correlationId = null,
|
||||
int? priority = null,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(tenantId);
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(exportType);
|
||||
ArgumentNullException.ThrowIfNull(payload);
|
||||
|
||||
if (!ExportJobTypes.IsExportJob(exportType))
|
||||
{
|
||||
throw new ArgumentException($"Invalid export job type: {exportType}", nameof(exportType));
|
||||
}
|
||||
|
||||
var payloadJson = payload.ToJson();
|
||||
var payloadDigest = payload.ComputeDigest();
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
|
||||
var job = new Job(
|
||||
JobId: Guid.NewGuid(),
|
||||
TenantId: tenantId,
|
||||
ProjectId: projectId,
|
||||
RunId: null,
|
||||
JobType: exportType,
|
||||
Status: JobStatus.Pending,
|
||||
Priority: priority ?? ExportJobPolicy.QuotaDefaults.DefaultPriority,
|
||||
Attempt: 1,
|
||||
MaxAttempts: ExportJobPolicy.QuotaDefaults.MaxAttempts,
|
||||
PayloadDigest: payloadDigest,
|
||||
Payload: payloadJson,
|
||||
IdempotencyKey: $"{tenantId}:{exportType}:{payloadDigest}:{now.Ticks}",
|
||||
CorrelationId: correlationId,
|
||||
LeaseId: null,
|
||||
WorkerId: null,
|
||||
TaskRunnerId: null,
|
||||
LeaseUntil: null,
|
||||
CreatedAt: now,
|
||||
ScheduledAt: null,
|
||||
LeasedAt: null,
|
||||
CompletedAt: null,
|
||||
NotBefore: null,
|
||||
Reason: null,
|
||||
ReplayOf: null,
|
||||
CreatedBy: createdBy);
|
||||
|
||||
await _jobRepository.CreateAsync(job, cancellationToken);
|
||||
|
||||
return job;
|
||||
}
|
||||
|
||||
public async Task<Job?> GetExportJobAsync(
|
||||
string tenantId,
|
||||
Guid jobId,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var job = await _jobRepository.GetAsync(tenantId, jobId, cancellationToken);
|
||||
|
||||
if (job is not null && !ExportJobTypes.IsExportJob(job.JobType))
|
||||
{
|
||||
return null; // Not an export job
|
||||
}
|
||||
|
||||
return job;
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<Job>> ListExportJobsAsync(
|
||||
string tenantId,
|
||||
string? exportType = null,
|
||||
JobStatus? status = null,
|
||||
string? projectId = null,
|
||||
DateTimeOffset? createdAfter = null,
|
||||
DateTimeOffset? createdBefore = null,
|
||||
int limit = 50,
|
||||
int offset = 0,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
// If no specific export type, use the prefix to filter all export jobs
|
||||
var jobTypeFilter = exportType ?? ExportJobTypes.Prefix;
|
||||
|
||||
var jobs = await _jobRepository.ListAsync(
|
||||
tenantId,
|
||||
status,
|
||||
jobTypeFilter,
|
||||
projectId,
|
||||
createdAfter,
|
||||
createdBefore,
|
||||
limit,
|
||||
offset,
|
||||
cancellationToken);
|
||||
|
||||
// Additional filter for export jobs only (in case repository doesn't support prefix matching)
|
||||
return jobs.Where(j => ExportJobTypes.IsExportJob(j.JobType)).ToList();
|
||||
}
|
||||
|
||||
public async Task<bool> CancelExportJobAsync(
|
||||
string tenantId,
|
||||
Guid jobId,
|
||||
string reason,
|
||||
string canceledBy,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var job = await GetExportJobAsync(tenantId, jobId, cancellationToken);
|
||||
if (job is null)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// Can only cancel pending or leased jobs
|
||||
if (job.Status != JobStatus.Pending && job.Status != JobStatus.Leased)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
var success = await _jobRepository.CancelAsync(tenantId, jobId, reason, canceledBy, cancellationToken);
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
public async Task<ExportQuotaStatus> GetQuotaStatusAsync(
|
||||
string tenantId,
|
||||
string? exportType = null,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var quota = await _quotaRepository.GetByTenantAndJobTypeAsync(
|
||||
tenantId,
|
||||
exportType,
|
||||
cancellationToken);
|
||||
|
||||
if (quota is null)
|
||||
{
|
||||
// No quota configured - return defaults (unlimited)
|
||||
return new ExportQuotaStatus(
|
||||
MaxActive: int.MaxValue,
|
||||
CurrentActive: 0,
|
||||
MaxPerHour: int.MaxValue,
|
||||
CurrentHourCount: 0,
|
||||
AvailableTokens: double.MaxValue,
|
||||
Paused: false,
|
||||
PauseReason: null,
|
||||
CanCreateJob: true,
|
||||
EstimatedWaitTime: null);
|
||||
}
|
||||
|
||||
var canCreate = !quota.Paused
|
||||
&& quota.CurrentActive < quota.MaxActive
|
||||
&& quota.CurrentTokens >= 1.0;
|
||||
|
||||
TimeSpan? waitTime = null;
|
||||
if (!canCreate && !quota.Paused)
|
||||
{
|
||||
if (quota.CurrentActive >= quota.MaxActive)
|
||||
{
|
||||
// Estimate based on typical job duration
|
||||
var rateLimit = ExportJobPolicy.RateLimits.GetForJobType(exportType ?? ExportJobTypes.Ledger);
|
||||
waitTime = TimeSpan.FromSeconds(rateLimit.EstimatedDurationSeconds);
|
||||
}
|
||||
else if (quota.CurrentTokens < 1.0)
|
||||
{
|
||||
// Estimate based on refill rate
|
||||
var tokensNeeded = 1.0 - quota.CurrentTokens;
|
||||
waitTime = TimeSpan.FromSeconds(tokensNeeded / quota.RefillRate);
|
||||
}
|
||||
}
|
||||
|
||||
return new ExportQuotaStatus(
|
||||
MaxActive: quota.MaxActive,
|
||||
CurrentActive: quota.CurrentActive,
|
||||
MaxPerHour: quota.MaxPerHour,
|
||||
CurrentHourCount: quota.CurrentHourCount,
|
||||
AvailableTokens: quota.CurrentTokens,
|
||||
Paused: quota.Paused,
|
||||
PauseReason: quota.PauseReason,
|
||||
CanCreateJob: canCreate,
|
||||
EstimatedWaitTime: waitTime);
|
||||
}
|
||||
|
||||
public async Task<Quota> EnsureQuotaAsync(
|
||||
string tenantId,
|
||||
string exportType,
|
||||
string createdBy,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var existing = await _quotaRepository.GetByTenantAndJobTypeAsync(
|
||||
tenantId,
|
||||
exportType,
|
||||
cancellationToken);
|
||||
|
||||
if (existing is not null)
|
||||
{
|
||||
return existing;
|
||||
}
|
||||
|
||||
var quota = ExportJobPolicy.CreateDefaultQuota(tenantId, exportType, createdBy);
|
||||
await _quotaRepository.CreateAsync(quota, cancellationToken);
|
||||
|
||||
return quota;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Job repository interface extensions for export jobs.
|
||||
/// </summary>
|
||||
public interface IJobRepository
|
||||
{
|
||||
Task CreateAsync(Job job, CancellationToken cancellationToken);
|
||||
Task<Job?> GetAsync(string tenantId, Guid jobId, CancellationToken cancellationToken);
|
||||
Task<IReadOnlyList<Job>> ListAsync(
|
||||
string tenantId,
|
||||
JobStatus? status,
|
||||
string? jobType,
|
||||
string? projectId,
|
||||
DateTimeOffset? createdAfter,
|
||||
DateTimeOffset? createdBefore,
|
||||
int limit,
|
||||
int offset,
|
||||
CancellationToken cancellationToken);
|
||||
Task<bool> CancelAsync(string tenantId, Guid jobId, string reason, string canceledBy, CancellationToken cancellationToken);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Quota repository interface.
|
||||
/// </summary>
|
||||
public interface IQuotaRepository
|
||||
{
|
||||
Task<Quota?> GetByTenantAndJobTypeAsync(string tenantId, string? jobType, CancellationToken cancellationToken);
|
||||
Task CreateAsync(Quota quota, CancellationToken cancellationToken);
|
||||
}
|
||||
@@ -0,0 +1,286 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Orchestrator.Core.Domain.Events;
|
||||
|
||||
namespace StellaOps.Orchestrator.Infrastructure.Events;
|
||||
|
||||
/// <summary>
|
||||
/// Default implementation of event publisher with idempotency and retries.
|
||||
/// </summary>
|
||||
public sealed class OrchestratorEventPublisher : IEventPublisher
|
||||
{
|
||||
private readonly IIdempotencyStore _idempotencyStore;
|
||||
private readonly INotifierBus _notifierBus;
|
||||
private readonly IEventSigner? _eventSigner;
|
||||
private readonly EventPublishOptions _options;
|
||||
private readonly ILogger<OrchestratorEventPublisher> _logger;
|
||||
|
||||
public OrchestratorEventPublisher(
|
||||
IIdempotencyStore idempotencyStore,
|
||||
INotifierBus notifierBus,
|
||||
IOptions<EventPublishOptions> options,
|
||||
ILogger<OrchestratorEventPublisher> logger,
|
||||
IEventSigner? eventSigner = null)
|
||||
{
|
||||
_idempotencyStore = idempotencyStore;
|
||||
_notifierBus = notifierBus;
|
||||
_eventSigner = eventSigner;
|
||||
_options = options.Value;
|
||||
_logger = logger;
|
||||
}
|
||||
|
||||
public async Task<bool> PublishAsync(EventEnvelope envelope, CancellationToken cancellationToken = default)
|
||||
{
|
||||
// Check idempotency
|
||||
if (!await _idempotencyStore.TryMarkAsync(envelope.IdempotencyKey, _options.IdempotencyTtl, cancellationToken))
|
||||
{
|
||||
_logger.LogDebug(
|
||||
"Event {EventId} deduplicated by idempotency key {IdempotencyKey}",
|
||||
envelope.EventId, envelope.IdempotencyKey);
|
||||
OrchestratorMetrics.EventDeduplicated(envelope.TenantId, envelope.EventType.ToEventTypeName());
|
||||
return false;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
var message = await PrepareMessageAsync(envelope, cancellationToken);
|
||||
var channel = GetChannel(envelope);
|
||||
|
||||
await PublishWithRetryAsync(channel, message, cancellationToken);
|
||||
|
||||
OrchestratorMetrics.EventPublished(envelope.TenantId, envelope.EventType.ToEventTypeName());
|
||||
|
||||
_logger.LogInformation(
|
||||
"Published event {EventId} type {EventType} to channel {Channel}",
|
||||
envelope.EventId, envelope.EventType, channel);
|
||||
|
||||
return true;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
// Remove idempotency key on failure to allow retry
|
||||
await _idempotencyStore.RemoveAsync(envelope.IdempotencyKey, cancellationToken);
|
||||
OrchestratorMetrics.EventPublishFailed(envelope.TenantId, envelope.EventType.ToEventTypeName());
|
||||
|
||||
_logger.LogError(ex,
|
||||
"Failed to publish event {EventId} type {EventType}",
|
||||
envelope.EventId, envelope.EventType);
|
||||
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
public async Task<BatchPublishResult> PublishBatchAsync(
|
||||
IEnumerable<EventEnvelope> envelopes,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var published = 0;
|
||||
var deduplicated = 0;
|
||||
var failed = 0;
|
||||
var errors = new List<string>();
|
||||
|
||||
foreach (var envelope in envelopes)
|
||||
{
|
||||
try
|
||||
{
|
||||
var result = await PublishAsync(envelope, cancellationToken);
|
||||
if (result)
|
||||
published++;
|
||||
else
|
||||
deduplicated++;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
failed++;
|
||||
errors.Add($"{envelope.EventId}: {ex.Message}");
|
||||
}
|
||||
}
|
||||
|
||||
return new BatchPublishResult(published, deduplicated, failed, errors);
|
||||
}
|
||||
|
||||
public async Task<bool> IsPublishedAsync(string idempotencyKey, CancellationToken cancellationToken = default)
|
||||
{
|
||||
return await _idempotencyStore.ExistsAsync(idempotencyKey, cancellationToken);
|
||||
}
|
||||
|
||||
private async Task<string> PrepareMessageAsync(EventEnvelope envelope, CancellationToken cancellationToken)
|
||||
{
|
||||
if (_options.SignWithDsse && _eventSigner is not null)
|
||||
{
|
||||
return await _eventSigner.SignAsync(envelope, cancellationToken);
|
||||
}
|
||||
|
||||
return envelope.ToJson();
|
||||
}
|
||||
|
||||
private static string GetChannel(EventEnvelope envelope)
|
||||
{
|
||||
return envelope.Notifier?.Channel ?? envelope.EventType switch
|
||||
{
|
||||
OrchestratorEventType.ExportCreated or
|
||||
OrchestratorEventType.ExportStarted or
|
||||
OrchestratorEventType.ExportCompleted or
|
||||
OrchestratorEventType.ExportFailed or
|
||||
OrchestratorEventType.ExportCanceled or
|
||||
OrchestratorEventType.ExportArchived or
|
||||
OrchestratorEventType.ExportExpired or
|
||||
OrchestratorEventType.ExportDeleted => "orch.exports",
|
||||
|
||||
OrchestratorEventType.PolicyUpdated or
|
||||
OrchestratorEventType.PolicySimulated or
|
||||
OrchestratorEventType.PolicyApplied => "orch.policy",
|
||||
|
||||
OrchestratorEventType.ScheduleCreated or
|
||||
OrchestratorEventType.ScheduleEnabled or
|
||||
OrchestratorEventType.ScheduleDisabled or
|
||||
OrchestratorEventType.ScheduleTriggered or
|
||||
OrchestratorEventType.ScheduleSkipped => "orch.schedules",
|
||||
|
||||
OrchestratorEventType.AlertCreated or
|
||||
OrchestratorEventType.AlertAcknowledged or
|
||||
OrchestratorEventType.AlertResolved => "orch.alerts",
|
||||
|
||||
OrchestratorEventType.PackRunCreated or
|
||||
OrchestratorEventType.PackRunStarted or
|
||||
OrchestratorEventType.PackRunLog or
|
||||
OrchestratorEventType.PackRunArtifact or
|
||||
OrchestratorEventType.PackRunCompleted or
|
||||
OrchestratorEventType.PackRunFailed => "orch.pack_runs",
|
||||
|
||||
_ => "orch.jobs"
|
||||
};
|
||||
}
|
||||
|
||||
private async Task PublishWithRetryAsync(string channel, string message, CancellationToken cancellationToken)
|
||||
{
|
||||
var attempt = 0;
|
||||
var delay = _options.RetryDelay;
|
||||
|
||||
while (true)
|
||||
{
|
||||
try
|
||||
{
|
||||
await _notifierBus.SendAsync(channel, message, cancellationToken);
|
||||
return;
|
||||
}
|
||||
catch (Exception ex) when (attempt < _options.MaxRetries && IsTransient(ex))
|
||||
{
|
||||
attempt++;
|
||||
_logger.LogWarning(ex,
|
||||
"Transient failure publishing to {Channel}, attempt {Attempt}/{MaxRetries}",
|
||||
channel, attempt, _options.MaxRetries);
|
||||
|
||||
await Task.Delay(delay, cancellationToken);
|
||||
delay = TimeSpan.FromMilliseconds(delay.TotalMilliseconds * 2); // Exponential backoff
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static bool IsTransient(Exception ex)
|
||||
{
|
||||
return ex is TimeoutException or
|
||||
TaskCanceledException or
|
||||
System.Net.Http.HttpRequestException or
|
||||
System.IO.IOException;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Null implementation of notifier bus for testing.
|
||||
/// </summary>
|
||||
public sealed class NullNotifierBus : INotifierBus
|
||||
{
|
||||
/// <summary>Singleton instance.</summary>
|
||||
public static NullNotifierBus Instance { get; } = new();
|
||||
|
||||
private readonly List<(string Channel, string Message)> _messages = new();
|
||||
private readonly object _lock = new();
|
||||
|
||||
public Task SendAsync(string channel, string message, CancellationToken cancellationToken = default)
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
_messages.Add((channel, message));
|
||||
}
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public Task SendBatchAsync(string channel, IEnumerable<string> messages, CancellationToken cancellationToken = default)
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
foreach (var message in messages)
|
||||
{
|
||||
_messages.Add((channel, message));
|
||||
}
|
||||
}
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
/// <summary>Gets all sent messages (for testing).</summary>
|
||||
public IReadOnlyList<(string Channel, string Message)> GetMessages()
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
return _messages.ToList();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>Gets messages for a specific channel (for testing).</summary>
|
||||
public IReadOnlyList<string> GetMessages(string channel)
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
return _messages.Where(m => m.Channel == channel).Select(m => m.Message).ToList();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>Clears all messages (for testing).</summary>
|
||||
public void Clear()
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
_messages.Clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Null implementation of event signer for testing.
|
||||
/// </summary>
|
||||
public sealed class NullEventSigner : IEventSigner
|
||||
{
|
||||
/// <summary>Singleton instance.</summary>
|
||||
public static NullEventSigner Instance { get; } = new();
|
||||
|
||||
private NullEventSigner() { }
|
||||
|
||||
public Task<string> SignAsync(EventEnvelope envelope, CancellationToken cancellationToken = default)
|
||||
{
|
||||
// Return envelope JSON wrapped in mock DSSE structure
|
||||
var payload = envelope.ToJson();
|
||||
var dsse = $"{{\"payloadType\":\"application/vnd.orch.event+json\",\"payload\":\"{Convert.ToBase64String(System.Text.Encoding.UTF8.GetBytes(payload))}\",\"signatures\":[]}}";
|
||||
return Task.FromResult(dsse);
|
||||
}
|
||||
|
||||
public Task<EventEnvelope?> VerifyAsync(string signedPayload, CancellationToken cancellationToken = default)
|
||||
{
|
||||
// Extract and parse for testing
|
||||
try
|
||||
{
|
||||
var doc = System.Text.Json.JsonDocument.Parse(signedPayload);
|
||||
if (doc.RootElement.TryGetProperty("payload", out var payloadElement))
|
||||
{
|
||||
var payloadBytes = Convert.FromBase64String(payloadElement.GetString()!);
|
||||
var json = System.Text.Encoding.UTF8.GetString(payloadBytes);
|
||||
return Task.FromResult(EventEnvelope.FromJson(json));
|
||||
}
|
||||
}
|
||||
catch
|
||||
{
|
||||
// Ignore parse errors
|
||||
}
|
||||
return Task.FromResult<EventEnvelope?>(null);
|
||||
}
|
||||
}
|
||||
@@ -721,4 +721,637 @@ public static class OrchestratorMetrics
|
||||
|
||||
public static void ScaleDownSignal(string reason)
|
||||
=> ScaleDownSignals.Add(1, new KeyValuePair<string, object?>("reason", reason));
|
||||
|
||||
// Export job metrics
|
||||
private static readonly Counter<long> ExportJobsCreated = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.jobs_created",
|
||||
description: "Total export jobs created");
|
||||
|
||||
private static readonly Counter<long> ExportJobsCompleted = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.jobs_completed",
|
||||
description: "Total export jobs completed successfully");
|
||||
|
||||
private static readonly Counter<long> ExportJobsFailed = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.jobs_failed",
|
||||
description: "Total export jobs that failed");
|
||||
|
||||
private static readonly Counter<long> ExportJobsCanceled = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.jobs_canceled",
|
||||
description: "Total export jobs canceled");
|
||||
|
||||
private static readonly Counter<long> ExportHeartbeats = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.heartbeats",
|
||||
description: "Total export worker heartbeats");
|
||||
|
||||
private static readonly Histogram<double> ExportDuration = Meter.CreateHistogram<double>(
|
||||
"orchestrator.export.duration.seconds",
|
||||
unit: "s",
|
||||
description: "Export job duration");
|
||||
|
||||
private static readonly Histogram<long> ExportSize = Meter.CreateHistogram<long>(
|
||||
"orchestrator.export.size.bytes",
|
||||
unit: "bytes",
|
||||
description: "Export output size");
|
||||
|
||||
private static readonly Histogram<long> ExportEntryCount = Meter.CreateHistogram<long>(
|
||||
"orchestrator.export.entry_count",
|
||||
unit: "entries",
|
||||
description: "Number of entries exported");
|
||||
|
||||
private static readonly UpDownCounter<long> ExportJobsActive = Meter.CreateUpDownCounter<long>(
|
||||
"orchestrator.export.jobs_active",
|
||||
description: "Currently active export jobs");
|
||||
|
||||
private static readonly Counter<long> ExportQuotaDenials = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.quota_denials",
|
||||
description: "Export jobs denied due to quota");
|
||||
|
||||
public static void ExportJobCreated(string tenantId, string exportType, string format)
|
||||
=> ExportJobsCreated.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("format", format));
|
||||
|
||||
public static void ExportJobCompleted(string tenantId, string exportType, string format)
|
||||
=> ExportJobsCompleted.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("format", format));
|
||||
|
||||
public static void ExportJobFailed(string tenantId, string exportType, string reason)
|
||||
=> ExportJobsFailed.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("reason", reason));
|
||||
|
||||
public static void ExportJobCanceled(string tenantId, string exportType)
|
||||
=> ExportJobsCanceled.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
|
||||
public static void ExportHeartbeat(string tenantId, string exportType)
|
||||
=> ExportHeartbeats.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
|
||||
public static void RecordExportDuration(string tenantId, string exportType, string format, double durationSeconds)
|
||||
=> ExportDuration.Record(durationSeconds,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("format", format));
|
||||
|
||||
public static void RecordExportSize(string tenantId, string exportType, string format, long sizeBytes)
|
||||
=> ExportSize.Record(sizeBytes,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("format", format));
|
||||
|
||||
public static void RecordExportEntryCount(string tenantId, string exportType, long entryCount)
|
||||
=> ExportEntryCount.Record(entryCount,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
|
||||
public static void ExportJobStarted(string tenantId, string exportType)
|
||||
=> ExportJobsActive.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
|
||||
public static void ExportJobFinished(string tenantId, string exportType)
|
||||
=> ExportJobsActive.Add(-1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
|
||||
public static void ExportQuotaDenied(string tenantId, string exportType, string reason)
|
||||
=> ExportQuotaDenials.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("reason", reason));
|
||||
|
||||
// Export distribution metrics
|
||||
private static readonly Counter<long> ExportDistributionsCreated = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.distributions_created",
|
||||
description: "Total export distributions created");
|
||||
|
||||
private static readonly Counter<long> ExportReplicationsStarted = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.replications_started",
|
||||
description: "Export replication operations started");
|
||||
|
||||
private static readonly Counter<long> ExportReplicationsCompleted = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.replications_completed",
|
||||
description: "Export replication operations completed");
|
||||
|
||||
private static readonly Counter<long> ExportReplicationsFailed = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.replications_failed",
|
||||
description: "Export replication operations failed");
|
||||
|
||||
private static readonly Counter<long> ExportDownloadsGenerated = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.downloads_generated",
|
||||
description: "Pre-signed download URLs generated");
|
||||
|
||||
public static void ExportDistributionCreated(string tenantId, string exportType, string storageProvider)
|
||||
=> ExportDistributionsCreated.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("storage_provider", storageProvider));
|
||||
|
||||
public static void ExportReplicationStarted(string tenantId, string exportType, string target)
|
||||
=> ExportReplicationsStarted.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("target", target));
|
||||
|
||||
public static void ExportReplicationCompleted(string tenantId, string exportType, string target)
|
||||
=> ExportReplicationsCompleted.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("target", target));
|
||||
|
||||
public static void ExportReplicationFailed(string tenantId, string exportType, string target, string error)
|
||||
=> ExportReplicationsFailed.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("target", target),
|
||||
new KeyValuePair<string, object?>("error", error));
|
||||
|
||||
public static void ExportDownloadGenerated(string tenantId, string exportType)
|
||||
=> ExportDownloadsGenerated.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
|
||||
// Export retention metrics
|
||||
private static readonly Counter<long> ExportRetentionsApplied = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.retentions_applied",
|
||||
description: "Retention policies applied to exports");
|
||||
|
||||
private static readonly Counter<long> ExportRetentionsExtended = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.retentions_extended",
|
||||
description: "Export retention periods extended");
|
||||
|
||||
private static readonly Counter<long> ExportLegalHoldsPlaced = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.legal_holds_placed",
|
||||
description: "Legal holds placed on exports");
|
||||
|
||||
private static readonly Counter<long> ExportLegalHoldsReleased = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.legal_holds_released",
|
||||
description: "Legal holds released on exports");
|
||||
|
||||
private static readonly Counter<long> ExportsArchived = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.archived",
|
||||
description: "Exports moved to archive tier");
|
||||
|
||||
private static readonly Counter<long> ExportsExpired = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.expired",
|
||||
description: "Exports that have expired");
|
||||
|
||||
private static readonly Counter<long> ExportsDeleted = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.deleted",
|
||||
description: "Exports deleted after retention");
|
||||
|
||||
private static readonly UpDownCounter<long> ExportsWithLegalHold = Meter.CreateUpDownCounter<long>(
|
||||
"orchestrator.export.with_legal_hold",
|
||||
description: "Current exports under legal hold");
|
||||
|
||||
public static void ExportRetentionApplied(string tenantId, string exportType, string policyName)
|
||||
=> ExportRetentionsApplied.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("policy_name", policyName));
|
||||
|
||||
public static void ExportRetentionExtended(string tenantId, string exportType, int extensionCount)
|
||||
=> ExportRetentionsExtended.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("extension_count", extensionCount));
|
||||
|
||||
public static void ExportLegalHoldPlaced(string tenantId, string exportType)
|
||||
{
|
||||
ExportLegalHoldsPlaced.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
ExportsWithLegalHold.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
}
|
||||
|
||||
public static void ExportLegalHoldReleased(string tenantId, string exportType)
|
||||
{
|
||||
ExportLegalHoldsReleased.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
ExportsWithLegalHold.Add(-1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
}
|
||||
|
||||
public static void ExportArchived(string tenantId, string exportType)
|
||||
=> ExportsArchived.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
|
||||
public static void ExportExpired(string tenantId, string exportType)
|
||||
=> ExportsExpired.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
|
||||
public static void ExportDeleted(string tenantId, string exportType)
|
||||
=> ExportsDeleted.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
|
||||
// Export Scheduling Metrics
|
||||
private static readonly Counter<long> ScheduledExportsCreated = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.schedules_created",
|
||||
description: "Export schedules created");
|
||||
|
||||
private static readonly Counter<long> ScheduledExportsEnabled = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.schedules_enabled",
|
||||
description: "Export schedules enabled");
|
||||
|
||||
private static readonly Counter<long> ScheduledExportsDisabled = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.schedules_disabled",
|
||||
description: "Export schedules disabled");
|
||||
|
||||
private static readonly Counter<long> ScheduledExportsTriggered = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.schedules_triggered",
|
||||
description: "Scheduled export runs triggered");
|
||||
|
||||
private static readonly Counter<long> ScheduledExportsSkipped = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.schedules_skipped",
|
||||
description: "Scheduled export runs skipped (already running)");
|
||||
|
||||
private static readonly Counter<long> ScheduledExportsSucceeded = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.schedules_succeeded",
|
||||
description: "Scheduled export runs succeeded");
|
||||
|
||||
private static readonly Counter<long> ScheduledExportsFailed = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.schedules_failed",
|
||||
description: "Scheduled export runs failed");
|
||||
|
||||
private static readonly UpDownCounter<long> ActiveSchedules = Meter.CreateUpDownCounter<long>(
|
||||
"orchestrator.export.active_schedules",
|
||||
description: "Currently active export schedules");
|
||||
|
||||
public static void ExportScheduleCreated(string tenantId, string exportType)
|
||||
{
|
||||
ScheduledExportsCreated.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
ActiveSchedules.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
}
|
||||
|
||||
public static void ExportScheduleEnabled(string tenantId, string exportType)
|
||||
{
|
||||
ScheduledExportsEnabled.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
ActiveSchedules.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
}
|
||||
|
||||
public static void ExportScheduleDisabled(string tenantId, string exportType)
|
||||
{
|
||||
ScheduledExportsDisabled.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
ActiveSchedules.Add(-1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
}
|
||||
|
||||
public static void ExportScheduleTriggered(string tenantId, string exportType, string scheduleName)
|
||||
=> ScheduledExportsTriggered.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("schedule_name", scheduleName));
|
||||
|
||||
public static void ExportScheduleSkipped(string tenantId, string exportType, string scheduleName)
|
||||
=> ScheduledExportsSkipped.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("schedule_name", scheduleName));
|
||||
|
||||
public static void ExportScheduleSucceeded(string tenantId, string exportType, string scheduleName)
|
||||
=> ScheduledExportsSucceeded.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("schedule_name", scheduleName));
|
||||
|
||||
public static void ExportScheduleFailed(string tenantId, string exportType, string scheduleName)
|
||||
=> ScheduledExportsFailed.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("schedule_name", scheduleName));
|
||||
|
||||
// Retention Pruning Metrics
|
||||
private static readonly Counter<long> RetentionPruneRuns = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.prune_runs",
|
||||
description: "Retention prune runs executed");
|
||||
|
||||
private static readonly Counter<long> RetentionPruneArchived = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.prune_archived",
|
||||
description: "Exports archived during pruning");
|
||||
|
||||
private static readonly Counter<long> RetentionPruneDeleted = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.prune_deleted",
|
||||
description: "Exports deleted during pruning");
|
||||
|
||||
private static readonly Counter<long> RetentionPruneSkipped = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.prune_skipped",
|
||||
description: "Exports skipped during pruning (legal hold, etc.)");
|
||||
|
||||
private static readonly Counter<long> RetentionPruneErrors = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.prune_errors",
|
||||
description: "Errors during retention pruning");
|
||||
|
||||
private static readonly Histogram<double> RetentionPruneDuration = Meter.CreateHistogram<double>(
|
||||
"orchestrator.export.prune_duration.seconds",
|
||||
unit: "s",
|
||||
description: "Duration of prune operations");
|
||||
|
||||
public static void ExportPruneRun(string? tenantId, string? exportType)
|
||||
=> RetentionPruneRuns.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId ?? "global"),
|
||||
new KeyValuePair<string, object?>("export_type", exportType ?? "all"));
|
||||
|
||||
public static void ExportPruneArchived(string? tenantId, string? exportType, int count)
|
||||
=> RetentionPruneArchived.Add(count,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId ?? "global"),
|
||||
new KeyValuePair<string, object?>("export_type", exportType ?? "all"));
|
||||
|
||||
public static void ExportPruneDeleted(string? tenantId, string? exportType, int count)
|
||||
=> RetentionPruneDeleted.Add(count,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId ?? "global"),
|
||||
new KeyValuePair<string, object?>("export_type", exportType ?? "all"));
|
||||
|
||||
public static void ExportPruneSkipped(string? tenantId, string? exportType, int count)
|
||||
=> RetentionPruneSkipped.Add(count,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId ?? "global"),
|
||||
new KeyValuePair<string, object?>("export_type", exportType ?? "all"));
|
||||
|
||||
public static void ExportPruneError(string? tenantId, string? exportType)
|
||||
=> RetentionPruneErrors.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId ?? "global"),
|
||||
new KeyValuePair<string, object?>("export_type", exportType ?? "all"));
|
||||
|
||||
public static void ExportPruneDuration(string? tenantId, string? exportType, double seconds)
|
||||
=> RetentionPruneDuration.Record(seconds,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId ?? "global"),
|
||||
new KeyValuePair<string, object?>("export_type", exportType ?? "all"));
|
||||
|
||||
// Export Alerting Metrics
|
||||
private static readonly Counter<long> ExportAlertsCreated = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.alerts_created",
|
||||
description: "Export alerts created");
|
||||
|
||||
private static readonly Counter<long> ExportAlertsAcknowledged = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.alerts_acknowledged",
|
||||
description: "Export alerts acknowledged");
|
||||
|
||||
private static readonly Counter<long> ExportAlertsResolved = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.alerts_resolved",
|
||||
description: "Export alerts resolved");
|
||||
|
||||
private static readonly Counter<long> ExportAlertsSuppressed = Meter.CreateCounter<long>(
|
||||
"orchestrator.export.alerts_suppressed",
|
||||
description: "Export alerts suppressed by cooldown");
|
||||
|
||||
private static readonly UpDownCounter<long> ActiveExportAlerts = Meter.CreateUpDownCounter<long>(
|
||||
"orchestrator.export.active_alerts",
|
||||
description: "Currently active export alerts");
|
||||
|
||||
public static void ExportAlertCreated(string tenantId, string exportType, string severity)
|
||||
{
|
||||
ExportAlertsCreated.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("severity", severity));
|
||||
ActiveExportAlerts.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("severity", severity));
|
||||
}
|
||||
|
||||
public static void ExportAlertAcknowledged(string tenantId, string exportType, string severity)
|
||||
=> ExportAlertsAcknowledged.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("severity", severity));
|
||||
|
||||
public static void ExportAlertResolved(string tenantId, string exportType, string severity)
|
||||
{
|
||||
ExportAlertsResolved.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("severity", severity));
|
||||
ActiveExportAlerts.Add(-1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType),
|
||||
new KeyValuePair<string, object?>("severity", severity));
|
||||
}
|
||||
|
||||
public static void ExportAlertSuppressed(string tenantId, string exportType)
|
||||
=> ExportAlertsSuppressed.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("export_type", exportType));
|
||||
|
||||
// Event Publishing Metrics
|
||||
private static readonly Counter<long> EventsPublished = Meter.CreateCounter<long>(
|
||||
"orchestrator.events.published",
|
||||
description: "Total events published to notifier bus");
|
||||
|
||||
private static readonly Counter<long> EventsDeduplicated = Meter.CreateCounter<long>(
|
||||
"orchestrator.events.deduplicated",
|
||||
description: "Total events deduplicated by idempotency key");
|
||||
|
||||
private static readonly Counter<long> EventsPublishFailed = Meter.CreateCounter<long>(
|
||||
"orchestrator.events.publish_failed",
|
||||
description: "Total events that failed to publish");
|
||||
|
||||
private static readonly Counter<long> EventsSignedCounter = Meter.CreateCounter<long>(
|
||||
"orchestrator.events.signed",
|
||||
description: "Total events signed with DSSE");
|
||||
|
||||
private static readonly Histogram<double> EventPublishLatency = Meter.CreateHistogram<double>(
|
||||
"orchestrator.events.publish_latency.ms",
|
||||
unit: "ms",
|
||||
description: "Event publish latency in milliseconds");
|
||||
|
||||
private static readonly Counter<long> EventRetryAttempts = Meter.CreateCounter<long>(
|
||||
"orchestrator.events.retry_attempts",
|
||||
description: "Total event publish retry attempts");
|
||||
|
||||
public static void EventPublished(string tenantId, string eventType)
|
||||
=> EventsPublished.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("event_type", eventType));
|
||||
|
||||
public static void EventDeduplicated(string tenantId, string eventType)
|
||||
=> EventsDeduplicated.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("event_type", eventType));
|
||||
|
||||
public static void EventPublishFailed(string tenantId, string eventType)
|
||||
=> EventsPublishFailed.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("event_type", eventType));
|
||||
|
||||
public static void EventSigned(string tenantId, string eventType)
|
||||
=> EventsSignedCounter.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("event_type", eventType));
|
||||
|
||||
public static void RecordEventPublishLatency(string tenantId, string eventType, double latencyMs)
|
||||
=> EventPublishLatency.Record(latencyMs,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("event_type", eventType));
|
||||
|
||||
public static void EventRetryAttempt(string tenantId, string eventType, int attempt)
|
||||
=> EventRetryAttempts.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("event_type", eventType),
|
||||
new KeyValuePair<string, object?>("attempt", attempt));
|
||||
|
||||
// Pack Run Metrics
|
||||
private static readonly Counter<long> PackRunsCreated = Meter.CreateCounter<long>(
|
||||
"orchestrator.pack_runs.created",
|
||||
description: "Total pack runs created");
|
||||
|
||||
private static readonly Counter<long> PackRunsScheduled = Meter.CreateCounter<long>(
|
||||
"orchestrator.pack_runs.scheduled",
|
||||
description: "Total pack runs scheduled");
|
||||
|
||||
private static readonly Counter<long> PackRunsLeased = Meter.CreateCounter<long>(
|
||||
"orchestrator.pack_runs.leased",
|
||||
description: "Total pack runs leased to task runners");
|
||||
|
||||
private static readonly Counter<long> PackRunsStarted = Meter.CreateCounter<long>(
|
||||
"orchestrator.pack_runs.started",
|
||||
description: "Total pack runs started executing");
|
||||
|
||||
private static readonly Counter<long> PackRunsCompleted = Meter.CreateCounter<long>(
|
||||
"orchestrator.pack_runs.completed",
|
||||
description: "Total pack runs completed");
|
||||
|
||||
private static readonly Counter<long> PackRunsFailed = Meter.CreateCounter<long>(
|
||||
"orchestrator.pack_runs.failed",
|
||||
description: "Total pack runs failed");
|
||||
|
||||
private static readonly Counter<long> PackRunsCanceled = Meter.CreateCounter<long>(
|
||||
"orchestrator.pack_runs.canceled",
|
||||
description: "Total pack runs canceled");
|
||||
|
||||
private static readonly Counter<long> PackRunsTimedOut = Meter.CreateCounter<long>(
|
||||
"orchestrator.pack_runs.timed_out",
|
||||
description: "Total pack runs that timed out");
|
||||
|
||||
private static readonly Counter<long> PackRunHeartbeats = Meter.CreateCounter<long>(
|
||||
"orchestrator.pack_runs.heartbeats",
|
||||
description: "Total pack run heartbeats received");
|
||||
|
||||
private static readonly Counter<long> PackRunLogsAppended = Meter.CreateCounter<long>(
|
||||
"orchestrator.pack_runs.logs_appended",
|
||||
description: "Total pack run log entries appended");
|
||||
|
||||
private static readonly Histogram<double> PackRunDuration = Meter.CreateHistogram<double>(
|
||||
"orchestrator.pack_run.duration.seconds",
|
||||
unit: "s",
|
||||
description: "Pack run execution duration");
|
||||
|
||||
private static readonly UpDownCounter<long> PackRunsActive = Meter.CreateUpDownCounter<long>(
|
||||
"orchestrator.pack_runs.active",
|
||||
description: "Currently active pack runs");
|
||||
|
||||
private static readonly Histogram<long> PackRunLogCount = Meter.CreateHistogram<long>(
|
||||
"orchestrator.pack_run.log_count",
|
||||
unit: "entries",
|
||||
description: "Number of log entries per pack run");
|
||||
|
||||
public static void PackRunCreated(string tenantId, string packId)
|
||||
{
|
||||
PackRunsCreated.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("pack_id", packId));
|
||||
PackRunsActive.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("pack_id", packId));
|
||||
}
|
||||
|
||||
public static void PackRunScheduled(string tenantId, string packId)
|
||||
=> PackRunsScheduled.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("pack_id", packId));
|
||||
|
||||
public static void PackRunLeased(string tenantId, string packId)
|
||||
=> PackRunsLeased.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("pack_id", packId));
|
||||
|
||||
public static void PackRunStarted(string tenantId, string packId)
|
||||
=> PackRunsStarted.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("pack_id", packId));
|
||||
|
||||
public static void PackRunCompleted(string tenantId, string packId, string status)
|
||||
{
|
||||
PackRunsCompleted.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("pack_id", packId),
|
||||
new KeyValuePair<string, object?>("status", status));
|
||||
PackRunsActive.Add(-1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("pack_id", packId));
|
||||
}
|
||||
|
||||
public static void PackRunFailed(string tenantId, string packId)
|
||||
{
|
||||
PackRunsFailed.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("pack_id", packId));
|
||||
PackRunsActive.Add(-1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("pack_id", packId));
|
||||
}
|
||||
|
||||
public static void PackRunCanceled(string tenantId, string packId)
|
||||
{
|
||||
PackRunsCanceled.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("pack_id", packId));
|
||||
PackRunsActive.Add(-1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("pack_id", packId));
|
||||
}
|
||||
|
||||
public static void PackRunTimedOut(string tenantId, string packId)
|
||||
{
|
||||
PackRunsTimedOut.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("pack_id", packId));
|
||||
PackRunsActive.Add(-1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("pack_id", packId));
|
||||
}
|
||||
|
||||
public static void PackRunHeartbeatReceived(string tenantId, string packId)
|
||||
=> PackRunHeartbeats.Add(1,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("pack_id", packId));
|
||||
|
||||
public static void PackRunLogAppended(string tenantId, string packId, int count)
|
||||
=> PackRunLogsAppended.Add(count,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("pack_id", packId));
|
||||
|
||||
public static void RecordPackRunDuration(string tenantId, string packId, double durationSeconds)
|
||||
=> PackRunDuration.Record(durationSeconds,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("pack_id", packId));
|
||||
|
||||
public static void RecordPackRunLogCount(string tenantId, string packId, long logCount)
|
||||
=> PackRunLogCount.Record(logCount,
|
||||
new KeyValuePair<string, object?>("tenant_id", tenantId),
|
||||
new KeyValuePair<string, object?>("pack_id", packId));
|
||||
}
|
||||
|
||||
@@ -0,0 +1,185 @@
|
||||
using StellaOps.Orchestrator.Core.Domain;
|
||||
|
||||
namespace StellaOps.Orchestrator.Infrastructure.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// Repository interface for pack run persistence operations.
|
||||
/// </summary>
|
||||
public interface IPackRunRepository
|
||||
{
|
||||
/// <summary>
|
||||
/// Gets a pack run by ID.
|
||||
/// </summary>
|
||||
Task<PackRun?> GetByIdAsync(string tenantId, Guid packRunId, CancellationToken cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// Gets a pack run by idempotency key.
|
||||
/// </summary>
|
||||
Task<PackRun?> GetByIdempotencyKeyAsync(string tenantId, string idempotencyKey, CancellationToken cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new pack run.
|
||||
/// </summary>
|
||||
Task CreateAsync(PackRun packRun, CancellationToken cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// Updates pack run status and related fields.
|
||||
/// </summary>
|
||||
Task UpdateStatusAsync(
|
||||
string tenantId,
|
||||
Guid packRunId,
|
||||
PackRunStatus status,
|
||||
int attempt,
|
||||
Guid? leaseId,
|
||||
string? taskRunnerId,
|
||||
DateTimeOffset? leaseUntil,
|
||||
DateTimeOffset? scheduledAt,
|
||||
DateTimeOffset? leasedAt,
|
||||
DateTimeOffset? startedAt,
|
||||
DateTimeOffset? completedAt,
|
||||
DateTimeOffset? notBefore,
|
||||
string? reason,
|
||||
int? exitCode,
|
||||
long? durationMs,
|
||||
CancellationToken cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// Leases the next available pack run for execution.
|
||||
/// Returns null if no pack runs are available.
|
||||
/// </summary>
|
||||
Task<PackRun?> LeaseNextAsync(
|
||||
string tenantId,
|
||||
string? packId,
|
||||
Guid leaseId,
|
||||
string taskRunnerId,
|
||||
DateTimeOffset leaseUntil,
|
||||
CancellationToken cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// Extends an existing lease.
|
||||
/// Returns false if lease has expired or doesn't match.
|
||||
/// </summary>
|
||||
Task<bool> ExtendLeaseAsync(
|
||||
string tenantId,
|
||||
Guid packRunId,
|
||||
Guid leaseId,
|
||||
DateTimeOffset newLeaseUntil,
|
||||
CancellationToken cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// Releases a lease (on failure or timeout).
|
||||
/// </summary>
|
||||
Task ReleaseLeaseAsync(
|
||||
string tenantId,
|
||||
Guid packRunId,
|
||||
Guid leaseId,
|
||||
PackRunStatus newStatus,
|
||||
string? reason,
|
||||
CancellationToken cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// Lists pack runs with pagination and filters.
|
||||
/// </summary>
|
||||
Task<IReadOnlyList<PackRun>> ListAsync(
|
||||
string tenantId,
|
||||
string? packId,
|
||||
PackRunStatus? status,
|
||||
string? projectId,
|
||||
DateTimeOffset? createdAfter,
|
||||
DateTimeOffset? createdBefore,
|
||||
int limit,
|
||||
int offset,
|
||||
CancellationToken cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// Counts pack runs matching the filters.
|
||||
/// </summary>
|
||||
Task<int> CountAsync(
|
||||
string tenantId,
|
||||
string? packId,
|
||||
PackRunStatus? status,
|
||||
string? projectId,
|
||||
CancellationToken cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// Gets pack runs with expired leases (for timeout handling).
|
||||
/// </summary>
|
||||
Task<IReadOnlyList<PackRun>> GetExpiredLeasesAsync(
|
||||
DateTimeOffset cutoff,
|
||||
int limit,
|
||||
CancellationToken cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// Cancels pending pack runs matching the filters.
|
||||
/// Returns the count of canceled pack runs.
|
||||
/// </summary>
|
||||
Task<int> CancelPendingAsync(
|
||||
string tenantId,
|
||||
string? packId,
|
||||
string reason,
|
||||
CancellationToken cancellationToken);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Repository interface for pack run log persistence operations.
|
||||
/// </summary>
|
||||
public interface IPackRunLogRepository
|
||||
{
|
||||
/// <summary>
|
||||
/// Appends a single log entry.
|
||||
/// </summary>
|
||||
Task AppendAsync(PackRunLog log, CancellationToken cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// Appends a batch of log entries.
|
||||
/// </summary>
|
||||
Task AppendBatchAsync(IReadOnlyList<PackRunLog> logs, CancellationToken cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// Gets log entries for a pack run with cursor-based pagination.
|
||||
/// </summary>
|
||||
Task<PackRunLogBatch> GetLogsAsync(
|
||||
string tenantId,
|
||||
Guid packRunId,
|
||||
long afterSequence,
|
||||
int limit,
|
||||
CancellationToken cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// Gets the current log count and latest sequence for a pack run.
|
||||
/// </summary>
|
||||
Task<(long Count, long LatestSequence)> GetLogStatsAsync(
|
||||
string tenantId,
|
||||
Guid packRunId,
|
||||
CancellationToken cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// Gets log entries matching a level filter.
|
||||
/// </summary>
|
||||
Task<PackRunLogBatch> GetLogsByLevelAsync(
|
||||
string tenantId,
|
||||
Guid packRunId,
|
||||
LogLevel minLevel,
|
||||
long afterSequence,
|
||||
int limit,
|
||||
CancellationToken cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// Searches log messages for a pattern (simple substring match).
|
||||
/// </summary>
|
||||
Task<PackRunLogBatch> SearchLogsAsync(
|
||||
string tenantId,
|
||||
Guid packRunId,
|
||||
string pattern,
|
||||
long afterSequence,
|
||||
int limit,
|
||||
CancellationToken cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// Deletes all logs for a pack run (for cleanup/retention).
|
||||
/// </summary>
|
||||
Task<long> DeleteLogsAsync(
|
||||
string tenantId,
|
||||
Guid packRunId,
|
||||
CancellationToken cancellationToken);
|
||||
}
|
||||
@@ -0,0 +1,911 @@
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Orchestrator.Core.Domain.Events;
|
||||
using StellaOps.Orchestrator.Infrastructure.Events;
|
||||
|
||||
namespace StellaOps.Orchestrator.Tests.Events;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for event envelope and publishing infrastructure.
|
||||
/// </summary>
|
||||
public class EventPublishingTests
|
||||
{
|
||||
private static readonly CancellationToken CT = CancellationToken.None;
|
||||
|
||||
#region EventEnvelope Tests
|
||||
|
||||
[Fact]
|
||||
public void EventEnvelope_Create_GeneratesIdAndTimestamp()
|
||||
{
|
||||
var actor = EventActor.Service("test-service", "orch:read");
|
||||
|
||||
var envelope = EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.JobCreated,
|
||||
tenantId: "tenant-1",
|
||||
actor: actor);
|
||||
|
||||
Assert.NotNull(envelope.EventId);
|
||||
Assert.StartsWith("urn:orch:event:", envelope.EventId);
|
||||
Assert.Equal(EventEnvelope.CurrentSchemaVersion, envelope.SchemaVersion);
|
||||
Assert.Equal(OrchestratorEventType.JobCreated, envelope.EventType);
|
||||
Assert.Equal("tenant-1", envelope.TenantId);
|
||||
Assert.True(envelope.OccurredAt <= DateTimeOffset.UtcNow);
|
||||
Assert.NotNull(envelope.IdempotencyKey);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventEnvelope_ForJob_IncludesJobMetadata()
|
||||
{
|
||||
var actor = EventActor.Worker("worker-1", "go-sdk");
|
||||
var job = EventJob.Completed("job-123", "pack-run", 1);
|
||||
|
||||
var envelope = EventEnvelope.ForJob(
|
||||
eventType: OrchestratorEventType.JobCompleted,
|
||||
tenantId: "tenant-1",
|
||||
actor: actor,
|
||||
job: job,
|
||||
correlationId: "corr-456",
|
||||
projectId: "proj-789");
|
||||
|
||||
Assert.NotNull(envelope.Job);
|
||||
Assert.Equal("job-123", envelope.Job!.Id);
|
||||
Assert.Equal("pack-run", envelope.Job.Type);
|
||||
Assert.Equal("completed", envelope.Job.Status);
|
||||
Assert.Equal(1, envelope.Job.Attempt);
|
||||
Assert.Equal("corr-456", envelope.CorrelationId);
|
||||
Assert.Equal("proj-789", envelope.ProjectId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventEnvelope_ForExport_CreatesExportEnvelope()
|
||||
{
|
||||
var actor = EventActor.System("scheduler");
|
||||
var exportJob = EventJob.Create("exp-123", "export.sbom", "running", attempt: 1);
|
||||
|
||||
var envelope = EventEnvelope.ForExport(
|
||||
eventType: OrchestratorEventType.ExportStarted,
|
||||
tenantId: "tenant-1",
|
||||
actor: actor,
|
||||
exportJob: exportJob);
|
||||
|
||||
Assert.Equal(OrchestratorEventType.ExportStarted, envelope.EventType);
|
||||
Assert.NotNull(envelope.Job);
|
||||
Assert.Equal("exp-123", envelope.Job!.Id);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventEnvelope_ForPolicy_CreatesPolicyEnvelope()
|
||||
{
|
||||
var actor = EventActor.User("admin@example.com", "policy:write");
|
||||
|
||||
var envelope = EventEnvelope.ForPolicy(
|
||||
eventType: OrchestratorEventType.PolicyUpdated,
|
||||
tenantId: "tenant-1",
|
||||
actor: actor,
|
||||
projectId: "proj-1");
|
||||
|
||||
Assert.Equal(OrchestratorEventType.PolicyUpdated, envelope.EventType);
|
||||
Assert.Null(envelope.Job);
|
||||
Assert.Equal("proj-1", envelope.ProjectId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventEnvelope_ToJson_SerializesCorrectly()
|
||||
{
|
||||
var actor = EventActor.Service("test-service");
|
||||
var envelope = EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.AlertCreated,
|
||||
tenantId: "tenant-1",
|
||||
actor: actor);
|
||||
|
||||
var json = envelope.ToJson();
|
||||
|
||||
Assert.NotNull(json);
|
||||
Assert.Contains("schemaVersion", json);
|
||||
Assert.Contains("eventId", json);
|
||||
Assert.Contains("eventType", json);
|
||||
Assert.Contains("tenantId", json);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventEnvelope_FromJson_DeserializesCorrectly()
|
||||
{
|
||||
var actor = EventActor.Service("test-service");
|
||||
var original = EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.ScheduleTriggered,
|
||||
tenantId: "tenant-1",
|
||||
actor: actor,
|
||||
projectId: "proj-1");
|
||||
|
||||
var json = original.ToJson();
|
||||
var restored = EventEnvelope.FromJson(json);
|
||||
|
||||
Assert.NotNull(restored);
|
||||
Assert.Equal(original.EventId, restored!.EventId);
|
||||
Assert.Equal(original.EventType, restored.EventType);
|
||||
Assert.Equal(original.TenantId, restored.TenantId);
|
||||
Assert.Equal(original.ProjectId, restored.ProjectId);
|
||||
Assert.Equal(original.IdempotencyKey, restored.IdempotencyKey);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventEnvelope_FromJson_ReturnsNullForInvalidJson()
|
||||
{
|
||||
var result = EventEnvelope.FromJson("not valid json");
|
||||
Assert.Null(result);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventEnvelope_ComputeDigest_ReturnsSha256Hash()
|
||||
{
|
||||
var actor = EventActor.Service("test-service");
|
||||
var envelope = EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.JobCreated,
|
||||
tenantId: "tenant-1",
|
||||
actor: actor);
|
||||
|
||||
var digest = envelope.ComputeDigest();
|
||||
|
||||
Assert.StartsWith("sha256:", digest);
|
||||
Assert.Equal(64 + 7, digest.Length); // "sha256:" + 64 hex chars
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventEnvelope_GenerateIdempotencyKey_IsDeterministic()
|
||||
{
|
||||
var key1 = EventEnvelope.GenerateIdempotencyKey(OrchestratorEventType.JobCompleted, "job-123", 2);
|
||||
var key2 = EventEnvelope.GenerateIdempotencyKey(OrchestratorEventType.JobCompleted, "job-123", 2);
|
||||
|
||||
Assert.Equal(key1, key2);
|
||||
Assert.Equal("orch-job.completed-job-123-2", key1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventEnvelope_GenerateIdempotencyKey_DiffersForDifferentAttempts()
|
||||
{
|
||||
var key1 = EventEnvelope.GenerateIdempotencyKey(OrchestratorEventType.JobCompleted, "job-123", 1);
|
||||
var key2 = EventEnvelope.GenerateIdempotencyKey(OrchestratorEventType.JobCompleted, "job-123", 2);
|
||||
|
||||
Assert.NotEqual(key1, key2);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region EventActor Tests
|
||||
|
||||
[Fact]
|
||||
public void EventActor_Service_CreatesServiceActor()
|
||||
{
|
||||
var actor = EventActor.Service("orchestrator", "orch:admin", "orch:write");
|
||||
|
||||
Assert.Equal("service/orchestrator", actor.Subject);
|
||||
Assert.NotNull(actor.Scopes);
|
||||
Assert.Equal(2, actor.Scopes!.Count);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventActor_User_CreatesUserActor()
|
||||
{
|
||||
var actor = EventActor.User("admin@example.com", "export:create");
|
||||
|
||||
Assert.Equal("user/admin@example.com", actor.Subject);
|
||||
Assert.Single(actor.Scopes!);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventActor_System_CreatesSystemActor()
|
||||
{
|
||||
var actor = EventActor.System("scheduler");
|
||||
|
||||
Assert.Equal("system/scheduler", actor.Subject);
|
||||
Assert.Null(actor.Scopes);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventActor_Worker_CreatesWorkerActor()
|
||||
{
|
||||
var actor = EventActor.Worker("worker-abc", "python-sdk");
|
||||
|
||||
Assert.Equal("worker/python-sdk/worker-abc", actor.Subject);
|
||||
Assert.Null(actor.Scopes);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region EventJob Tests
|
||||
|
||||
[Fact]
|
||||
public void EventJob_Create_CreatesJobMetadata()
|
||||
{
|
||||
var job = EventJob.Create(
|
||||
id: "job-123",
|
||||
type: "ingest",
|
||||
status: "running",
|
||||
attempt: 2,
|
||||
runId: "run-456",
|
||||
leaseId: "lease-789");
|
||||
|
||||
Assert.Equal("job-123", job.Id);
|
||||
Assert.Equal("ingest", job.Type);
|
||||
Assert.Equal("running", job.Status);
|
||||
Assert.Equal(2, job.Attempt);
|
||||
Assert.Equal("run-456", job.RunId);
|
||||
Assert.Equal("lease-789", job.LeaseId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventJob_Completed_SetsCompletedStatus()
|
||||
{
|
||||
var job = EventJob.Completed("job-123", "export", 1, payloadDigest: "sha256:abc");
|
||||
|
||||
Assert.Equal("completed", job.Status);
|
||||
Assert.Equal("sha256:abc", job.PayloadDigest);
|
||||
Assert.Null(job.Reason);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventJob_Failed_SetsFailedStatusWithReason()
|
||||
{
|
||||
var job = EventJob.Failed("job-123", "export", 2, "Connection timeout");
|
||||
|
||||
Assert.Equal("failed", job.Status);
|
||||
Assert.Equal("Connection timeout", job.Reason);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventJob_Canceled_SetsCanceledStatusWithReason()
|
||||
{
|
||||
var job = EventJob.Canceled("job-123", "export", 1, "User requested cancellation");
|
||||
|
||||
Assert.Equal("canceled", job.Status);
|
||||
Assert.Equal("User requested cancellation", job.Reason);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region EventMetrics Tests
|
||||
|
||||
[Fact]
|
||||
public void EventMetrics_WithDuration_CreatesDurationMetrics()
|
||||
{
|
||||
var metrics = EventMetrics.WithDuration(45.5);
|
||||
|
||||
Assert.Equal(45.5, metrics.DurationSeconds);
|
||||
Assert.Null(metrics.QueueWaitSeconds);
|
||||
Assert.Null(metrics.ProcessingSeconds);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventMetrics_WithBreakdown_CreatesDetailedMetrics()
|
||||
{
|
||||
var metrics = EventMetrics.WithBreakdown(total: 100.0, queueWait: 20.0, processing: 80.0);
|
||||
|
||||
Assert.Equal(100.0, metrics.DurationSeconds);
|
||||
Assert.Equal(20.0, metrics.QueueWaitSeconds);
|
||||
Assert.Equal(80.0, metrics.ProcessingSeconds);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region EventNotifier Tests
|
||||
|
||||
[Fact]
|
||||
public void EventNotifier_JobsChannel_CreatesJobsNotifier()
|
||||
{
|
||||
var notifier = EventNotifier.JobsChannel();
|
||||
|
||||
Assert.Equal("orch.jobs", notifier.Channel);
|
||||
Assert.Equal("dsse", notifier.Delivery);
|
||||
Assert.Null(notifier.Replay);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventNotifier_ExportsChannel_CreatesExportsNotifier()
|
||||
{
|
||||
var notifier = EventNotifier.ExportsChannel("raw");
|
||||
|
||||
Assert.Equal("orch.exports", notifier.Channel);
|
||||
Assert.Equal("raw", notifier.Delivery);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventNotifier_PolicyChannel_CreatesPolicyNotifier()
|
||||
{
|
||||
var notifier = EventNotifier.PolicyChannel();
|
||||
|
||||
Assert.Equal("orch.policy", notifier.Channel);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EventNotifier_WithReplay_AddsReplayMetadata()
|
||||
{
|
||||
var notifier = EventNotifier.JobsChannel().WithReplay(5, 10);
|
||||
|
||||
Assert.NotNull(notifier.Replay);
|
||||
Assert.Equal(5, notifier.Replay!.Ordinal);
|
||||
Assert.Equal(10, notifier.Replay.Total);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region OrchestratorEventType Tests
|
||||
|
||||
[Theory]
|
||||
[InlineData(OrchestratorEventType.JobCreated, "job.created")]
|
||||
[InlineData(OrchestratorEventType.JobCompleted, "job.completed")]
|
||||
[InlineData(OrchestratorEventType.JobFailed, "job.failed")]
|
||||
[InlineData(OrchestratorEventType.ExportCreated, "export.created")]
|
||||
[InlineData(OrchestratorEventType.ExportCompleted, "export.completed")]
|
||||
[InlineData(OrchestratorEventType.ScheduleTriggered, "schedule.triggered")]
|
||||
[InlineData(OrchestratorEventType.PolicyUpdated, "policy.updated")]
|
||||
[InlineData(OrchestratorEventType.PackRunCompleted, "pack_run.completed")]
|
||||
public void OrchestratorEventType_ToEventTypeName_ReturnsCanonicalName(
|
||||
OrchestratorEventType eventType, string expectedName)
|
||||
{
|
||||
Assert.Equal(expectedName, eventType.ToEventTypeName());
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData("job.created", OrchestratorEventType.JobCreated)]
|
||||
[InlineData("job.completed", OrchestratorEventType.JobCompleted)]
|
||||
[InlineData("export.failed", OrchestratorEventType.ExportFailed)]
|
||||
[InlineData("schedule.enabled", OrchestratorEventType.ScheduleEnabled)]
|
||||
[InlineData("pack_run.started", OrchestratorEventType.PackRunStarted)]
|
||||
public void OrchestratorEventType_FromEventTypeName_ParsesCanonicalName(
|
||||
string name, OrchestratorEventType expected)
|
||||
{
|
||||
Assert.Equal(expected, OrchestratorEventTypeExtensions.FromEventTypeName(name));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void OrchestratorEventType_FromEventTypeName_ReturnsNullForUnknown()
|
||||
{
|
||||
Assert.Null(OrchestratorEventTypeExtensions.FromEventTypeName("unknown.event"));
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData(OrchestratorEventType.JobFailed, true)]
|
||||
[InlineData(OrchestratorEventType.ExportFailed, true)]
|
||||
[InlineData(OrchestratorEventType.PackRunFailed, true)]
|
||||
[InlineData(OrchestratorEventType.JobCompleted, false)]
|
||||
[InlineData(OrchestratorEventType.ExportCreated, false)]
|
||||
public void OrchestratorEventType_IsFailure_IdentifiesFailures(
|
||||
OrchestratorEventType eventType, bool isFailure)
|
||||
{
|
||||
Assert.Equal(isFailure, eventType.IsFailure());
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData(OrchestratorEventType.JobCompleted, true)]
|
||||
[InlineData(OrchestratorEventType.ExportCompleted, true)]
|
||||
[InlineData(OrchestratorEventType.PackRunCompleted, true)]
|
||||
[InlineData(OrchestratorEventType.RetentionPruneCompleted, true)]
|
||||
[InlineData(OrchestratorEventType.JobFailed, false)]
|
||||
[InlineData(OrchestratorEventType.JobCreated, false)]
|
||||
public void OrchestratorEventType_IsCompletion_IdentifiesCompletions(
|
||||
OrchestratorEventType eventType, bool isCompletion)
|
||||
{
|
||||
Assert.Equal(isCompletion, eventType.IsCompletion());
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData(OrchestratorEventType.JobCompleted, true)]
|
||||
[InlineData(OrchestratorEventType.JobFailed, true)]
|
||||
[InlineData(OrchestratorEventType.JobCanceled, true)]
|
||||
[InlineData(OrchestratorEventType.ExportDeleted, true)]
|
||||
[InlineData(OrchestratorEventType.AlertResolved, true)]
|
||||
[InlineData(OrchestratorEventType.JobCreated, false)]
|
||||
[InlineData(OrchestratorEventType.JobStarted, false)]
|
||||
[InlineData(OrchestratorEventType.AlertCreated, false)]
|
||||
public void OrchestratorEventType_IsTerminal_IdentifiesTerminalEvents(
|
||||
OrchestratorEventType eventType, bool isTerminal)
|
||||
{
|
||||
Assert.Equal(isTerminal, eventType.IsTerminal());
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region InMemoryIdempotencyStore Tests
|
||||
|
||||
[Fact]
|
||||
public async Task InMemoryIdempotencyStore_TryMark_ReturnsTrueForNewKey()
|
||||
{
|
||||
var store = new InMemoryIdempotencyStore();
|
||||
|
||||
var result = await store.TryMarkAsync("key-1", TimeSpan.FromMinutes(5), CT);
|
||||
|
||||
Assert.True(result);
|
||||
Assert.Equal(1, store.Count);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task InMemoryIdempotencyStore_TryMark_ReturnsFalseForExistingKey()
|
||||
{
|
||||
var store = new InMemoryIdempotencyStore();
|
||||
await store.TryMarkAsync("key-1", TimeSpan.FromMinutes(5), CT);
|
||||
|
||||
var result = await store.TryMarkAsync("key-1", TimeSpan.FromMinutes(5), CT);
|
||||
|
||||
Assert.False(result);
|
||||
Assert.Equal(1, store.Count);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task InMemoryIdempotencyStore_Exists_ReturnsTrueForExisting()
|
||||
{
|
||||
var store = new InMemoryIdempotencyStore();
|
||||
await store.TryMarkAsync("key-1", TimeSpan.FromMinutes(5), CT);
|
||||
|
||||
Assert.True(await store.ExistsAsync("key-1", CT));
|
||||
Assert.False(await store.ExistsAsync("key-2", CT));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task InMemoryIdempotencyStore_Remove_RemovesKey()
|
||||
{
|
||||
var store = new InMemoryIdempotencyStore();
|
||||
await store.TryMarkAsync("key-1", TimeSpan.FromMinutes(5), CT);
|
||||
|
||||
await store.RemoveAsync("key-1", CT);
|
||||
|
||||
Assert.False(await store.ExistsAsync("key-1", CT));
|
||||
Assert.Equal(0, store.Count);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task InMemoryIdempotencyStore_Clear_RemovesAllKeys()
|
||||
{
|
||||
var store = new InMemoryIdempotencyStore();
|
||||
await store.TryMarkAsync("key-1", TimeSpan.FromMinutes(5), CT);
|
||||
await store.TryMarkAsync("key-2", TimeSpan.FromMinutes(5), CT);
|
||||
|
||||
store.Clear();
|
||||
|
||||
Assert.Equal(0, store.Count);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task InMemoryIdempotencyStore_ExpiresKeys()
|
||||
{
|
||||
var store = new InMemoryIdempotencyStore();
|
||||
await store.TryMarkAsync("key-1", TimeSpan.FromMilliseconds(1), CT);
|
||||
|
||||
await Task.Delay(10, CT);
|
||||
|
||||
// Key should be cleaned up on next operation
|
||||
Assert.False(await store.ExistsAsync("key-1", CT));
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region NullNotifierBus Tests
|
||||
|
||||
[Fact]
|
||||
public async Task NullNotifierBus_Send_RecordsMessage()
|
||||
{
|
||||
var bus = NullNotifierBus.Instance;
|
||||
bus.Clear();
|
||||
|
||||
await bus.SendAsync("orch.jobs", "test-message", CT);
|
||||
|
||||
var messages = bus.GetMessages();
|
||||
Assert.Single(messages);
|
||||
Assert.Equal("orch.jobs", messages[0].Channel);
|
||||
Assert.Equal("test-message", messages[0].Message);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task NullNotifierBus_SendBatch_RecordsAllMessages()
|
||||
{
|
||||
var bus = NullNotifierBus.Instance;
|
||||
bus.Clear();
|
||||
|
||||
await bus.SendBatchAsync("orch.exports", new[] { "msg1", "msg2", "msg3" }, CT);
|
||||
|
||||
var messages = bus.GetMessages("orch.exports");
|
||||
Assert.Equal(3, messages.Count);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task NullNotifierBus_GetMessages_FiltersByChannel()
|
||||
{
|
||||
var bus = NullNotifierBus.Instance;
|
||||
bus.Clear();
|
||||
|
||||
await bus.SendAsync("orch.jobs", "job-msg", CT);
|
||||
await bus.SendAsync("orch.exports", "export-msg", CT);
|
||||
|
||||
var jobMessages = bus.GetMessages("orch.jobs");
|
||||
var exportMessages = bus.GetMessages("orch.exports");
|
||||
|
||||
Assert.Single(jobMessages);
|
||||
Assert.Single(exportMessages);
|
||||
Assert.Equal("job-msg", jobMessages[0]);
|
||||
Assert.Equal("export-msg", exportMessages[0]);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region NullEventSigner Tests
|
||||
|
||||
[Fact]
|
||||
public async Task NullEventSigner_Sign_ReturnsDsseFormat()
|
||||
{
|
||||
var signer = NullEventSigner.Instance;
|
||||
var actor = EventActor.Service("test");
|
||||
var envelope = EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.JobCreated,
|
||||
tenantId: "tenant-1",
|
||||
actor: actor);
|
||||
|
||||
var signed = await signer.SignAsync(envelope, CT);
|
||||
|
||||
Assert.Contains("payloadType", signed);
|
||||
Assert.Contains("application/vnd.orch.event+json", signed);
|
||||
Assert.Contains("payload", signed);
|
||||
Assert.Contains("signatures", signed);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task NullEventSigner_Verify_ExtractsEnvelope()
|
||||
{
|
||||
var signer = NullEventSigner.Instance;
|
||||
var actor = EventActor.Service("test");
|
||||
var original = EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.ExportCompleted,
|
||||
tenantId: "tenant-1",
|
||||
actor: actor,
|
||||
projectId: "proj-1");
|
||||
|
||||
var signed = await signer.SignAsync(original, CT);
|
||||
var verified = await signer.VerifyAsync(signed, CT);
|
||||
|
||||
Assert.NotNull(verified);
|
||||
Assert.Equal(original.EventType, verified!.EventType);
|
||||
Assert.Equal(original.TenantId, verified.TenantId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task NullEventSigner_Verify_ReturnsNullForInvalidPayload()
|
||||
{
|
||||
var signer = NullEventSigner.Instance;
|
||||
|
||||
var result = await signer.VerifyAsync("invalid json", CT);
|
||||
|
||||
Assert.Null(result);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region NullEventPublisher Tests
|
||||
|
||||
[Fact]
|
||||
public async Task NullEventPublisher_Publish_ReturnsTrue()
|
||||
{
|
||||
var publisher = NullEventPublisher.Instance;
|
||||
var actor = EventActor.Service("test");
|
||||
var envelope = EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.JobCreated,
|
||||
tenantId: "tenant-1",
|
||||
actor: actor);
|
||||
|
||||
var result = await publisher.PublishAsync(envelope, CT);
|
||||
|
||||
Assert.True(result);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task NullEventPublisher_PublishBatch_ReturnsCorrectCount()
|
||||
{
|
||||
var publisher = NullEventPublisher.Instance;
|
||||
var actor = EventActor.Service("test");
|
||||
var envelopes = Enumerable.Range(1, 5).Select(i =>
|
||||
EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.JobCreated,
|
||||
tenantId: "tenant-1",
|
||||
actor: actor));
|
||||
|
||||
var result = await publisher.PublishBatchAsync(envelopes, CT);
|
||||
|
||||
Assert.Equal(5, result.Published);
|
||||
Assert.Equal(0, result.Deduplicated);
|
||||
Assert.Equal(0, result.Failed);
|
||||
Assert.False(result.HasErrors);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task NullEventPublisher_IsPublished_ReturnsFalse()
|
||||
{
|
||||
var publisher = NullEventPublisher.Instance;
|
||||
|
||||
var result = await publisher.IsPublishedAsync("any-key", CT);
|
||||
|
||||
Assert.False(result);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region OrchestratorEventPublisher Tests
|
||||
|
||||
[Fact]
|
||||
public async Task OrchestratorEventPublisher_Publish_PublishesToBus()
|
||||
{
|
||||
var bus = NullNotifierBus.Instance;
|
||||
bus.Clear();
|
||||
var store = new InMemoryIdempotencyStore();
|
||||
var options = Options.Create(EventPublishOptions.Default with { SignWithDsse = false });
|
||||
var publisher = new OrchestratorEventPublisher(
|
||||
store, bus, options, NullLogger<OrchestratorEventPublisher>.Instance);
|
||||
|
||||
var actor = EventActor.Service("test");
|
||||
var envelope = EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.JobCompleted,
|
||||
tenantId: "tenant-1",
|
||||
actor: actor);
|
||||
|
||||
var result = await publisher.PublishAsync(envelope, CT);
|
||||
|
||||
Assert.True(result);
|
||||
var messages = bus.GetMessages("orch.jobs");
|
||||
Assert.Single(messages);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task OrchestratorEventPublisher_Publish_DeduplicatesByIdempotencyKey()
|
||||
{
|
||||
var bus = NullNotifierBus.Instance;
|
||||
bus.Clear();
|
||||
var store = new InMemoryIdempotencyStore();
|
||||
var options = Options.Create(EventPublishOptions.Default with { SignWithDsse = false });
|
||||
var publisher = new OrchestratorEventPublisher(
|
||||
store, bus, options, NullLogger<OrchestratorEventPublisher>.Instance);
|
||||
|
||||
var actor = EventActor.Service("test");
|
||||
var job = EventJob.Completed("job-1", "test", 1);
|
||||
var envelope = EventEnvelope.ForJob(
|
||||
eventType: OrchestratorEventType.JobCompleted,
|
||||
tenantId: "tenant-1",
|
||||
actor: actor,
|
||||
job: job);
|
||||
|
||||
var result1 = await publisher.PublishAsync(envelope, CT);
|
||||
var result2 = await publisher.PublishAsync(envelope, CT);
|
||||
|
||||
Assert.True(result1);
|
||||
Assert.False(result2);
|
||||
Assert.Single(bus.GetMessages("orch.jobs"));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task OrchestratorEventPublisher_Publish_SignsWithDsse()
|
||||
{
|
||||
var bus = NullNotifierBus.Instance;
|
||||
bus.Clear();
|
||||
var store = new InMemoryIdempotencyStore();
|
||||
var signer = NullEventSigner.Instance;
|
||||
var options = Options.Create(EventPublishOptions.Default with { SignWithDsse = true });
|
||||
var publisher = new OrchestratorEventPublisher(
|
||||
store, bus, options, NullLogger<OrchestratorEventPublisher>.Instance, signer);
|
||||
|
||||
var actor = EventActor.Service("test");
|
||||
var envelope = EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.PolicyUpdated,
|
||||
tenantId: "tenant-1",
|
||||
actor: actor);
|
||||
|
||||
await publisher.PublishAsync(envelope, CT);
|
||||
|
||||
var messages = bus.GetMessages("orch.policy");
|
||||
Assert.Single(messages);
|
||||
Assert.Contains("payloadType", messages[0]);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task OrchestratorEventPublisher_Publish_RoutesToCorrectChannel()
|
||||
{
|
||||
var bus = NullNotifierBus.Instance;
|
||||
bus.Clear();
|
||||
var store = new InMemoryIdempotencyStore();
|
||||
var options = Options.Create(EventPublishOptions.Default with { SignWithDsse = false });
|
||||
var publisher = new OrchestratorEventPublisher(
|
||||
store, bus, options, NullLogger<OrchestratorEventPublisher>.Instance);
|
||||
|
||||
var actor = EventActor.Service("test");
|
||||
|
||||
// Export event
|
||||
await publisher.PublishAsync(EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.ExportCreated,
|
||||
tenantId: "t1",
|
||||
actor: actor), CT);
|
||||
|
||||
// Policy event
|
||||
await publisher.PublishAsync(EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.PolicyUpdated,
|
||||
tenantId: "t1",
|
||||
actor: actor), CT);
|
||||
|
||||
// Schedule event
|
||||
await publisher.PublishAsync(EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.ScheduleTriggered,
|
||||
tenantId: "t1",
|
||||
actor: actor), CT);
|
||||
|
||||
// Alert event
|
||||
await publisher.PublishAsync(EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.AlertCreated,
|
||||
tenantId: "t1",
|
||||
actor: actor), CT);
|
||||
|
||||
// Pack run event
|
||||
await publisher.PublishAsync(EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.PackRunStarted,
|
||||
tenantId: "t1",
|
||||
actor: actor), CT);
|
||||
|
||||
Assert.Single(bus.GetMessages("orch.exports"));
|
||||
Assert.Single(bus.GetMessages("orch.policy"));
|
||||
Assert.Single(bus.GetMessages("orch.schedules"));
|
||||
Assert.Single(bus.GetMessages("orch.alerts"));
|
||||
Assert.Single(bus.GetMessages("orch.pack_runs"));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task OrchestratorEventPublisher_Publish_UsesCustomChannel()
|
||||
{
|
||||
var bus = NullNotifierBus.Instance;
|
||||
bus.Clear();
|
||||
var store = new InMemoryIdempotencyStore();
|
||||
var options = Options.Create(EventPublishOptions.Default with { SignWithDsse = false });
|
||||
var publisher = new OrchestratorEventPublisher(
|
||||
store, bus, options, NullLogger<OrchestratorEventPublisher>.Instance);
|
||||
|
||||
var actor = EventActor.Service("test");
|
||||
var envelope = EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.JobCompleted,
|
||||
tenantId: "tenant-1",
|
||||
actor: actor,
|
||||
notifier: new EventNotifier("custom.channel", "raw", null));
|
||||
|
||||
await publisher.PublishAsync(envelope, CT);
|
||||
|
||||
Assert.Single(bus.GetMessages("custom.channel"));
|
||||
Assert.Empty(bus.GetMessages("orch.jobs"));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task OrchestratorEventPublisher_IsPublished_ChecksIdempotencyStore()
|
||||
{
|
||||
var bus = NullNotifierBus.Instance;
|
||||
bus.Clear();
|
||||
var store = new InMemoryIdempotencyStore();
|
||||
var options = Options.Create(EventPublishOptions.Default with { SignWithDsse = false });
|
||||
var publisher = new OrchestratorEventPublisher(
|
||||
store, bus, options, NullLogger<OrchestratorEventPublisher>.Instance);
|
||||
|
||||
var actor = EventActor.Service("test");
|
||||
var envelope = EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.JobCompleted,
|
||||
tenantId: "tenant-1",
|
||||
actor: actor);
|
||||
|
||||
Assert.False(await publisher.IsPublishedAsync(envelope.IdempotencyKey, CT));
|
||||
|
||||
await publisher.PublishAsync(envelope, CT);
|
||||
|
||||
Assert.True(await publisher.IsPublishedAsync(envelope.IdempotencyKey, CT));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task OrchestratorEventPublisher_PublishBatch_ReturnsCorrectCounts()
|
||||
{
|
||||
var bus = NullNotifierBus.Instance;
|
||||
bus.Clear();
|
||||
var store = new InMemoryIdempotencyStore();
|
||||
var options = Options.Create(EventPublishOptions.Default with { SignWithDsse = false });
|
||||
var publisher = new OrchestratorEventPublisher(
|
||||
store, bus, options, NullLogger<OrchestratorEventPublisher>.Instance);
|
||||
|
||||
var actor = EventActor.Service("test");
|
||||
var job = EventJob.Completed("job-1", "test", 1);
|
||||
var envelope = EventEnvelope.ForJob(
|
||||
eventType: OrchestratorEventType.JobCompleted,
|
||||
tenantId: "tenant-1",
|
||||
actor: actor,
|
||||
job: job);
|
||||
|
||||
// First batch - all new
|
||||
var result1 = await publisher.PublishBatchAsync(new[] { envelope }, CT);
|
||||
Assert.Equal(1, result1.Published);
|
||||
Assert.Equal(0, result1.Deduplicated);
|
||||
|
||||
// Second batch - all duplicates
|
||||
var result2 = await publisher.PublishBatchAsync(new[] { envelope }, CT);
|
||||
Assert.Equal(0, result2.Published);
|
||||
Assert.Equal(1, result2.Deduplicated);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region BatchPublishResult Tests
|
||||
|
||||
[Fact]
|
||||
public void BatchPublishResult_Total_ReturnsSum()
|
||||
{
|
||||
var result = new BatchPublishResult(10, 5, 2, new List<string>());
|
||||
|
||||
Assert.Equal(17, result.Total);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void BatchPublishResult_HasPublished_TrueWhenPublished()
|
||||
{
|
||||
var result1 = new BatchPublishResult(1, 0, 0, new List<string>());
|
||||
var result2 = new BatchPublishResult(0, 1, 0, new List<string>());
|
||||
|
||||
Assert.True(result1.HasPublished);
|
||||
Assert.False(result2.HasPublished);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void BatchPublishResult_HasErrors_TrueWhenFailedOrErrors()
|
||||
{
|
||||
var result1 = new BatchPublishResult(0, 0, 1, new List<string>());
|
||||
var result2 = new BatchPublishResult(0, 0, 0, new List<string> { "error" });
|
||||
var result3 = new BatchPublishResult(1, 0, 0, new List<string>());
|
||||
|
||||
Assert.True(result1.HasErrors);
|
||||
Assert.True(result2.HasErrors);
|
||||
Assert.False(result3.HasErrors);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void BatchPublishResult_Empty_ReturnsZeros()
|
||||
{
|
||||
var result = BatchPublishResult.Empty;
|
||||
|
||||
Assert.Equal(0, result.Published);
|
||||
Assert.Equal(0, result.Deduplicated);
|
||||
Assert.Equal(0, result.Failed);
|
||||
Assert.Empty(result.Errors);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void BatchPublishResult_SingleSuccess_ReturnsOne()
|
||||
{
|
||||
var result = BatchPublishResult.SingleSuccess;
|
||||
|
||||
Assert.Equal(1, result.Published);
|
||||
Assert.Equal(0, result.Deduplicated);
|
||||
Assert.False(result.HasErrors);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void BatchPublishResult_SingleDeduplicated_ReturnsOneDeduplicated()
|
||||
{
|
||||
var result = BatchPublishResult.SingleDeduplicated;
|
||||
|
||||
Assert.Equal(0, result.Published);
|
||||
Assert.Equal(1, result.Deduplicated);
|
||||
Assert.False(result.HasErrors);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region EventPublishOptions Tests
|
||||
|
||||
[Fact]
|
||||
public void EventPublishOptions_Default_HasExpectedValues()
|
||||
{
|
||||
var options = EventPublishOptions.Default;
|
||||
|
||||
Assert.True(options.SignWithDsse);
|
||||
Assert.Equal(3, options.MaxRetries);
|
||||
Assert.Equal(TimeSpan.FromSeconds(1), options.RetryDelay);
|
||||
Assert.Equal(TimeSpan.FromHours(24), options.IdempotencyTtl);
|
||||
Assert.True(options.IncludeProvenance);
|
||||
Assert.True(options.CompressLargePayloads);
|
||||
Assert.Equal(64 * 1024, options.CompressionThreshold);
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
@@ -0,0 +1,159 @@
|
||||
using StellaOps.Orchestrator.Core.Domain.Export;
|
||||
|
||||
namespace StellaOps.Orchestrator.Tests.Export;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for ExportDistribution metadata.
|
||||
/// </summary>
|
||||
public sealed class ExportDistributionTests
|
||||
{
|
||||
[Fact]
|
||||
public void ToJson_SerializesCorrectly()
|
||||
{
|
||||
var distribution = new ExportDistribution(
|
||||
PrimaryUri: "s3://bucket/exports/export-001.json.gz",
|
||||
DownloadUrl: "https://cdn.example.com/exports/export-001.json.gz?token=abc",
|
||||
DownloadUrlExpiresAt: new DateTimeOffset(2024, 12, 31, 23, 59, 59, TimeSpan.Zero),
|
||||
StorageProvider: "s3",
|
||||
Region: "us-east-1",
|
||||
StorageTier: "hot",
|
||||
Replicas: null,
|
||||
ReplicationStatus: null,
|
||||
ContentType: "application/gzip",
|
||||
AccessList: null,
|
||||
IsPublic: false,
|
||||
CreatedAt: new DateTimeOffset(2024, 1, 1, 0, 0, 0, TimeSpan.Zero));
|
||||
|
||||
var json = distribution.ToJson();
|
||||
|
||||
Assert.Contains("\"primaryUri\":\"s3://bucket/exports/export-001.json.gz\"", json);
|
||||
Assert.Contains("\"storageProvider\":\"s3\"", json);
|
||||
Assert.Contains("\"storageTier\":\"hot\"", json);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void FromJson_DeserializesCorrectly()
|
||||
{
|
||||
var original = new ExportDistribution(
|
||||
PrimaryUri: "s3://bucket/test.json",
|
||||
DownloadUrl: null,
|
||||
DownloadUrlExpiresAt: null,
|
||||
StorageProvider: "s3",
|
||||
Region: "eu-west-1",
|
||||
StorageTier: "cool",
|
||||
Replicas: null,
|
||||
ReplicationStatus: null,
|
||||
ContentType: "application/json",
|
||||
AccessList: null,
|
||||
IsPublic: true,
|
||||
CreatedAt: DateTimeOffset.UtcNow);
|
||||
|
||||
var json = original.ToJson();
|
||||
var deserialized = ExportDistribution.FromJson(json);
|
||||
|
||||
Assert.NotNull(deserialized);
|
||||
Assert.Equal(original.PrimaryUri, deserialized.PrimaryUri);
|
||||
Assert.Equal(original.StorageProvider, deserialized.StorageProvider);
|
||||
Assert.Equal(original.Region, deserialized.Region);
|
||||
Assert.Equal(original.IsPublic, deserialized.IsPublic);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void FromJson_ReturnsNullForInvalidJson()
|
||||
{
|
||||
var result = ExportDistribution.FromJson("not valid json");
|
||||
Assert.Null(result);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void WithDownloadUrl_SetsUrlAndExpiration()
|
||||
{
|
||||
var distribution = new ExportDistribution(
|
||||
PrimaryUri: "s3://bucket/test.json",
|
||||
DownloadUrl: null,
|
||||
DownloadUrlExpiresAt: null,
|
||||
StorageProvider: "s3",
|
||||
Region: null,
|
||||
StorageTier: "hot",
|
||||
Replicas: null,
|
||||
ReplicationStatus: null,
|
||||
ContentType: "application/json",
|
||||
AccessList: null,
|
||||
IsPublic: false,
|
||||
CreatedAt: DateTimeOffset.UtcNow);
|
||||
|
||||
var beforeUpdate = DateTimeOffset.UtcNow;
|
||||
var updated = distribution.WithDownloadUrl("https://download.example.com/test.json", TimeSpan.FromHours(1));
|
||||
var afterUpdate = DateTimeOffset.UtcNow.AddHours(1);
|
||||
|
||||
Assert.Equal("https://download.example.com/test.json", updated.DownloadUrl);
|
||||
Assert.NotNull(updated.DownloadUrlExpiresAt);
|
||||
Assert.True(updated.DownloadUrlExpiresAt >= beforeUpdate.AddHours(1).AddSeconds(-1));
|
||||
Assert.True(updated.DownloadUrlExpiresAt <= afterUpdate);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void WithReplica_AddsReplicaToEmptyDistribution()
|
||||
{
|
||||
var distribution = new ExportDistribution(
|
||||
PrimaryUri: "s3://primary/test.json",
|
||||
DownloadUrl: null,
|
||||
DownloadUrlExpiresAt: null,
|
||||
StorageProvider: "s3",
|
||||
Region: null,
|
||||
StorageTier: "hot",
|
||||
Replicas: null,
|
||||
ReplicationStatus: null,
|
||||
ContentType: "application/json",
|
||||
AccessList: null,
|
||||
IsPublic: false,
|
||||
CreatedAt: DateTimeOffset.UtcNow);
|
||||
|
||||
var updated = distribution.WithReplica("backup", "s3://backup/test.json", ReplicationStatus.Completed);
|
||||
|
||||
Assert.NotNull(updated.Replicas);
|
||||
Assert.Single(updated.Replicas);
|
||||
Assert.Equal("s3://backup/test.json", updated.Replicas["backup"]);
|
||||
Assert.NotNull(updated.ReplicationStatus);
|
||||
Assert.Equal(ReplicationStatus.Completed, updated.ReplicationStatus["backup"]);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void WithReplica_AddsMultipleReplicas()
|
||||
{
|
||||
var distribution = new ExportDistribution(
|
||||
PrimaryUri: "s3://primary/test.json",
|
||||
DownloadUrl: null,
|
||||
DownloadUrlExpiresAt: null,
|
||||
StorageProvider: "s3",
|
||||
Region: null,
|
||||
StorageTier: "hot",
|
||||
Replicas: null,
|
||||
ReplicationStatus: null,
|
||||
ContentType: "application/json",
|
||||
AccessList: null,
|
||||
IsPublic: false,
|
||||
CreatedAt: DateTimeOffset.UtcNow);
|
||||
|
||||
var updated = distribution
|
||||
.WithReplica("backup1", "s3://backup1/test.json", ReplicationStatus.Completed)
|
||||
.WithReplica("backup2", "s3://backup2/test.json", ReplicationStatus.InProgress);
|
||||
|
||||
Assert.NotNull(updated.Replicas);
|
||||
Assert.Equal(2, updated.Replicas.Count);
|
||||
Assert.Equal("s3://backup1/test.json", updated.Replicas["backup1"]);
|
||||
Assert.Equal("s3://backup2/test.json", updated.Replicas["backup2"]);
|
||||
Assert.Equal(ReplicationStatus.InProgress, updated.ReplicationStatus!["backup2"]);
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData(ReplicationStatus.Pending)]
|
||||
[InlineData(ReplicationStatus.InProgress)]
|
||||
[InlineData(ReplicationStatus.Completed)]
|
||||
[InlineData(ReplicationStatus.Failed)]
|
||||
[InlineData(ReplicationStatus.Skipped)]
|
||||
public void ReplicationStatus_AllValuesAreValid(ReplicationStatus status)
|
||||
{
|
||||
Assert.True(Enum.IsDefined(status));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,133 @@
|
||||
using StellaOps.Orchestrator.Core.Domain.Export;
|
||||
|
||||
namespace StellaOps.Orchestrator.Tests.Export;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for ExportJobPayload serialization and validation.
|
||||
/// </summary>
|
||||
public sealed class ExportJobPayloadTests
|
||||
{
|
||||
[Fact]
|
||||
public void Default_CreatesPayloadWithFormat()
|
||||
{
|
||||
var payload = ExportJobPayload.Default("json");
|
||||
|
||||
Assert.Equal("json", payload.Format);
|
||||
Assert.Null(payload.StartTime);
|
||||
Assert.Null(payload.EndTime);
|
||||
Assert.Null(payload.SourceId);
|
||||
Assert.Null(payload.ProjectId);
|
||||
Assert.Null(payload.EntityIds);
|
||||
Assert.Null(payload.MaxEntries);
|
||||
Assert.True(payload.IncludeProvenance);
|
||||
Assert.True(payload.SignOutput);
|
||||
Assert.Null(payload.Compression);
|
||||
Assert.Null(payload.DestinationUri);
|
||||
Assert.Null(payload.CallbackUrl);
|
||||
Assert.Null(payload.Options);
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData("json")]
|
||||
[InlineData("ndjson")]
|
||||
[InlineData("csv")]
|
||||
[InlineData("spdx")]
|
||||
[InlineData("cyclonedx")]
|
||||
public void Default_SupportsDifferentFormats(string format)
|
||||
{
|
||||
var payload = ExportJobPayload.Default(format);
|
||||
Assert.Equal(format, payload.Format);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ToJson_SerializesCorrectly()
|
||||
{
|
||||
var payload = new ExportJobPayload(
|
||||
Format: "json",
|
||||
StartTime: new DateTimeOffset(2024, 1, 1, 0, 0, 0, TimeSpan.Zero),
|
||||
EndTime: new DateTimeOffset(2024, 1, 31, 23, 59, 59, TimeSpan.Zero),
|
||||
SourceId: Guid.Parse("12345678-1234-1234-1234-123456789abc"),
|
||||
ProjectId: "project-1",
|
||||
EntityIds: [Guid.Parse("aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa")],
|
||||
MaxEntries: 1000,
|
||||
IncludeProvenance: true,
|
||||
SignOutput: false,
|
||||
Compression: "gzip",
|
||||
DestinationUri: "s3://bucket/exports/file.json.gz",
|
||||
CallbackUrl: "https://webhook.example.com/export-complete",
|
||||
Options: new Dictionary<string, string> { ["key"] = "value" });
|
||||
|
||||
var json = payload.ToJson();
|
||||
|
||||
Assert.Contains("\"format\":\"json\"", json);
|
||||
Assert.Contains("\"maxEntries\":1000", json);
|
||||
Assert.Contains("\"compression\":\"gzip\"", json);
|
||||
Assert.Contains("\"signOutput\":false", json);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void FromJson_DeserializesCorrectly()
|
||||
{
|
||||
var original = new ExportJobPayload(
|
||||
Format: "ndjson",
|
||||
StartTime: new DateTimeOffset(2024, 1, 1, 0, 0, 0, TimeSpan.Zero),
|
||||
EndTime: null,
|
||||
SourceId: Guid.Parse("12345678-1234-1234-1234-123456789abc"),
|
||||
ProjectId: null,
|
||||
EntityIds: null,
|
||||
MaxEntries: 500,
|
||||
IncludeProvenance: false,
|
||||
SignOutput: true,
|
||||
Compression: null,
|
||||
DestinationUri: null,
|
||||
CallbackUrl: null,
|
||||
Options: null);
|
||||
|
||||
var json = original.ToJson();
|
||||
var deserialized = ExportJobPayload.FromJson(json);
|
||||
|
||||
Assert.NotNull(deserialized);
|
||||
Assert.Equal(original.Format, deserialized.Format);
|
||||
Assert.Equal(original.StartTime, deserialized.StartTime);
|
||||
Assert.Equal(original.SourceId, deserialized.SourceId);
|
||||
Assert.Equal(original.MaxEntries, deserialized.MaxEntries);
|
||||
Assert.Equal(original.IncludeProvenance, deserialized.IncludeProvenance);
|
||||
Assert.Equal(original.SignOutput, deserialized.SignOutput);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ComputeDigest_ReturnsSha256Prefixed()
|
||||
{
|
||||
var payload = ExportJobPayload.Default("json");
|
||||
var digest = payload.ComputeDigest();
|
||||
|
||||
Assert.StartsWith("sha256:", digest);
|
||||
Assert.Equal(71, digest.Length); // "sha256:" (7) + 64 hex chars
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ComputeDigest_IsDeterministic()
|
||||
{
|
||||
var payload = ExportJobPayload.Default("json");
|
||||
var digest1 = payload.ComputeDigest();
|
||||
var digest2 = payload.ComputeDigest();
|
||||
|
||||
Assert.Equal(digest1, digest2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ComputeDigest_DifferentPayloadsHaveDifferentDigests()
|
||||
{
|
||||
var payload1 = ExportJobPayload.Default("json");
|
||||
var payload2 = ExportJobPayload.Default("ndjson");
|
||||
|
||||
Assert.NotEqual(payload1.ComputeDigest(), payload2.ComputeDigest());
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void FromJson_ReturnsNullForInvalidJson()
|
||||
{
|
||||
var result = ExportJobPayload.FromJson("invalid json");
|
||||
Assert.Null(result);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,151 @@
|
||||
using StellaOps.Orchestrator.Core.Domain.Export;
|
||||
|
||||
namespace StellaOps.Orchestrator.Tests.Export;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for ExportJobPolicy defaults and rate limits.
|
||||
/// </summary>
|
||||
public sealed class ExportJobPolicyTests
|
||||
{
|
||||
[Fact]
|
||||
public void QuotaDefaults_HaveReasonableValues()
|
||||
{
|
||||
Assert.Equal(5, ExportJobPolicy.QuotaDefaults.MaxActive);
|
||||
Assert.Equal(50, ExportJobPolicy.QuotaDefaults.MaxPerHour);
|
||||
Assert.Equal(10, ExportJobPolicy.QuotaDefaults.BurstCapacity);
|
||||
Assert.Equal(0.5, ExportJobPolicy.QuotaDefaults.RefillRate);
|
||||
Assert.Equal(-10, ExportJobPolicy.QuotaDefaults.DefaultPriority);
|
||||
Assert.Equal(3, ExportJobPolicy.QuotaDefaults.MaxAttempts);
|
||||
Assert.Equal(600, ExportJobPolicy.QuotaDefaults.DefaultLeaseSeconds);
|
||||
Assert.Equal(3600, ExportJobPolicy.QuotaDefaults.MaxLeaseSeconds);
|
||||
Assert.Equal(60, ExportJobPolicy.QuotaDefaults.RecommendedHeartbeatInterval);
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData(ExportJobTypes.Ledger, 3, 30, 120)]
|
||||
[InlineData(ExportJobTypes.Sbom, 5, 100, 30)]
|
||||
[InlineData(ExportJobTypes.Vex, 5, 100, 30)]
|
||||
[InlineData(ExportJobTypes.ScanResults, 3, 50, 60)]
|
||||
[InlineData(ExportJobTypes.PolicyEvaluation, 3, 50, 60)]
|
||||
[InlineData(ExportJobTypes.Attestation, 2, 20, 180)]
|
||||
[InlineData(ExportJobTypes.PortableBundle, 1, 10, 600)]
|
||||
public void RateLimits_GetForJobType_ReturnsExpectedValues(
|
||||
string jobType,
|
||||
int expectedMaxConcurrent,
|
||||
int expectedMaxPerHour,
|
||||
int expectedDuration)
|
||||
{
|
||||
var rateLimit = ExportJobPolicy.RateLimits.GetForJobType(jobType);
|
||||
|
||||
Assert.Equal(expectedMaxConcurrent, rateLimit.MaxConcurrent);
|
||||
Assert.Equal(expectedMaxPerHour, rateLimit.MaxPerHour);
|
||||
Assert.Equal(expectedDuration, rateLimit.EstimatedDurationSeconds);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RateLimits_GetForJobType_ReturnsDefaultForUnknownType()
|
||||
{
|
||||
var rateLimit = ExportJobPolicy.RateLimits.GetForJobType("export.unknown");
|
||||
|
||||
Assert.Equal(3, rateLimit.MaxConcurrent);
|
||||
Assert.Equal(30, rateLimit.MaxPerHour);
|
||||
Assert.Equal(120, rateLimit.EstimatedDurationSeconds);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Timeouts_HaveReasonableValues()
|
||||
{
|
||||
Assert.Equal(TimeSpan.FromHours(2), ExportJobPolicy.Timeouts.MaxJobDuration);
|
||||
Assert.Equal(TimeSpan.FromMinutes(5), ExportJobPolicy.Timeouts.HeartbeatTimeout);
|
||||
Assert.Equal(TimeSpan.FromMinutes(1), ExportJobPolicy.Timeouts.RetryBackoff);
|
||||
Assert.Equal(TimeSpan.FromMinutes(30), ExportJobPolicy.Timeouts.MaxRetryBackoff);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CreateDefaultQuota_CreatesValidQuota()
|
||||
{
|
||||
var quota = ExportJobPolicy.CreateDefaultQuota("tenant-1", ExportJobTypes.Ledger, "test-user");
|
||||
|
||||
Assert.NotEqual(Guid.Empty, quota.QuotaId);
|
||||
Assert.Equal("tenant-1", quota.TenantId);
|
||||
Assert.Equal(ExportJobTypes.Ledger, quota.JobType);
|
||||
Assert.Equal(3, quota.MaxActive); // Ledger specific
|
||||
Assert.Equal(30, quota.MaxPerHour); // Ledger specific
|
||||
Assert.Equal(ExportJobPolicy.QuotaDefaults.BurstCapacity, quota.BurstCapacity);
|
||||
Assert.Equal(ExportJobPolicy.QuotaDefaults.RefillRate, quota.RefillRate);
|
||||
Assert.Equal(quota.BurstCapacity, quota.CurrentTokens);
|
||||
Assert.Equal(0, quota.CurrentActive);
|
||||
Assert.Equal(0, quota.CurrentHourCount);
|
||||
Assert.False(quota.Paused);
|
||||
Assert.Null(quota.PauseReason);
|
||||
Assert.Null(quota.QuotaTicket);
|
||||
Assert.Equal("test-user", quota.UpdatedBy);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CreateDefaultQuota_WithoutJobType_UsesGlobalDefaults()
|
||||
{
|
||||
var quota = ExportJobPolicy.CreateDefaultQuota("tenant-1", jobType: null, "test-user");
|
||||
|
||||
Assert.Equal("tenant-1", quota.TenantId);
|
||||
Assert.Null(quota.JobType);
|
||||
Assert.Equal(ExportJobPolicy.QuotaDefaults.MaxActive, quota.MaxActive);
|
||||
Assert.Equal(ExportJobPolicy.QuotaDefaults.MaxPerHour, quota.MaxPerHour);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CreateDefaultQuota_SetsCurrentTimeFields()
|
||||
{
|
||||
var before = DateTimeOffset.UtcNow;
|
||||
var quota = ExportJobPolicy.CreateDefaultQuota("tenant-1", ExportJobTypes.Sbom, "test-user");
|
||||
var after = DateTimeOffset.UtcNow;
|
||||
|
||||
Assert.InRange(quota.CreatedAt, before, after);
|
||||
Assert.InRange(quota.UpdatedAt, before, after);
|
||||
Assert.InRange(quota.LastRefillAt, before, after);
|
||||
Assert.InRange(quota.CurrentHourStart, before, after);
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData(ExportJobTypes.Ledger)]
|
||||
[InlineData(ExportJobTypes.Sbom)]
|
||||
[InlineData(ExportJobTypes.Attestation)]
|
||||
[InlineData(ExportJobTypes.PortableBundle)]
|
||||
public void CreateDefaultQuota_UsesTypeSpecificLimits(string jobType)
|
||||
{
|
||||
var expectedLimit = ExportJobPolicy.RateLimits.GetForJobType(jobType);
|
||||
var quota = ExportJobPolicy.CreateDefaultQuota("tenant-1", jobType, "test-user");
|
||||
|
||||
Assert.Equal(expectedLimit.MaxConcurrent, quota.MaxActive);
|
||||
Assert.Equal(expectedLimit.MaxPerHour, quota.MaxPerHour);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RateLimits_PortableBundle_HasLowestLimits()
|
||||
{
|
||||
var portableBundle = ExportJobPolicy.RateLimits.PortableBundle;
|
||||
var ledger = ExportJobPolicy.RateLimits.Ledger;
|
||||
var sbom = ExportJobPolicy.RateLimits.Sbom;
|
||||
|
||||
// Portable bundle should have the most restrictive limits
|
||||
Assert.True(portableBundle.MaxConcurrent <= ledger.MaxConcurrent);
|
||||
Assert.True(portableBundle.MaxConcurrent <= sbom.MaxConcurrent);
|
||||
Assert.True(portableBundle.MaxPerHour <= ledger.MaxPerHour);
|
||||
Assert.True(portableBundle.MaxPerHour <= sbom.MaxPerHour);
|
||||
Assert.True(portableBundle.EstimatedDurationSeconds >= ledger.EstimatedDurationSeconds);
|
||||
Assert.True(portableBundle.EstimatedDurationSeconds >= sbom.EstimatedDurationSeconds);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RateLimits_AllDefinedTypesHaveLimits()
|
||||
{
|
||||
foreach (var jobType in ExportJobTypes.All)
|
||||
{
|
||||
var rateLimit = ExportJobPolicy.RateLimits.GetForJobType(jobType);
|
||||
|
||||
Assert.True(rateLimit.MaxConcurrent > 0, $"MaxConcurrent for {jobType} should be positive");
|
||||
Assert.True(rateLimit.MaxPerHour > 0, $"MaxPerHour for {jobType} should be positive");
|
||||
Assert.True(rateLimit.EstimatedDurationSeconds > 0, $"EstimatedDurationSeconds for {jobType} should be positive");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,97 @@
|
||||
using StellaOps.Orchestrator.Core.Domain.Export;
|
||||
|
||||
namespace StellaOps.Orchestrator.Tests.Export;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for ExportJobTypes constants and helpers.
|
||||
/// </summary>
|
||||
public sealed class ExportJobTypesTests
|
||||
{
|
||||
[Fact]
|
||||
public void Prefix_HasExpectedValue()
|
||||
{
|
||||
Assert.Equal("export.", ExportJobTypes.Prefix);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void All_ContainsAllDefinedTypes()
|
||||
{
|
||||
Assert.Contains(ExportJobTypes.Ledger, ExportJobTypes.All);
|
||||
Assert.Contains(ExportJobTypes.Sbom, ExportJobTypes.All);
|
||||
Assert.Contains(ExportJobTypes.Vex, ExportJobTypes.All);
|
||||
Assert.Contains(ExportJobTypes.ScanResults, ExportJobTypes.All);
|
||||
Assert.Contains(ExportJobTypes.PolicyEvaluation, ExportJobTypes.All);
|
||||
Assert.Contains(ExportJobTypes.Attestation, ExportJobTypes.All);
|
||||
Assert.Contains(ExportJobTypes.PortableBundle, ExportJobTypes.All);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void All_TypesStartWithPrefix()
|
||||
{
|
||||
foreach (var jobType in ExportJobTypes.All)
|
||||
{
|
||||
Assert.StartsWith(ExportJobTypes.Prefix, jobType);
|
||||
}
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData("export.ledger", true)]
|
||||
[InlineData("export.sbom", true)]
|
||||
[InlineData("export.vex", true)]
|
||||
[InlineData("export.scan-results", true)]
|
||||
[InlineData("export.policy-evaluation", true)]
|
||||
[InlineData("export.attestation", true)]
|
||||
[InlineData("export.portable-bundle", true)]
|
||||
[InlineData("export.custom", true)]
|
||||
[InlineData("EXPORT.LEDGER", true)]
|
||||
[InlineData("scan.image", false)]
|
||||
[InlineData("advisory.nvd", false)]
|
||||
[InlineData("", false)]
|
||||
[InlineData(null, false)]
|
||||
public void IsExportJob_ReturnsCorrectResult(string? jobType, bool expected)
|
||||
{
|
||||
Assert.Equal(expected, ExportJobTypes.IsExportJob(jobType));
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData("export.ledger", "ledger")]
|
||||
[InlineData("export.sbom", "sbom")]
|
||||
[InlineData("export.vex", "vex")]
|
||||
[InlineData("export.scan-results", "scan-results")]
|
||||
[InlineData("export.policy-evaluation", "policy-evaluation")]
|
||||
[InlineData("export.attestation", "attestation")]
|
||||
[InlineData("export.portable-bundle", "portable-bundle")]
|
||||
[InlineData("export.custom-format", "custom-format")]
|
||||
public void GetExportTarget_ReturnsTargetForExportJob(string jobType, string expectedTarget)
|
||||
{
|
||||
Assert.Equal(expectedTarget, ExportJobTypes.GetExportTarget(jobType));
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData("scan.image")]
|
||||
[InlineData("advisory.nvd")]
|
||||
[InlineData("")]
|
||||
[InlineData(null)]
|
||||
public void GetExportTarget_ReturnsNullForNonExportJob(string? jobType)
|
||||
{
|
||||
Assert.Null(ExportJobTypes.GetExportTarget(jobType));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void GetExportTarget_ReturnsNullForPrefixOnly()
|
||||
{
|
||||
Assert.Null(ExportJobTypes.GetExportTarget("export."));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void JobTypes_HaveExpectedValues()
|
||||
{
|
||||
Assert.Equal("export.ledger", ExportJobTypes.Ledger);
|
||||
Assert.Equal("export.sbom", ExportJobTypes.Sbom);
|
||||
Assert.Equal("export.vex", ExportJobTypes.Vex);
|
||||
Assert.Equal("export.scan-results", ExportJobTypes.ScanResults);
|
||||
Assert.Equal("export.policy-evaluation", ExportJobTypes.PolicyEvaluation);
|
||||
Assert.Equal("export.attestation", ExportJobTypes.Attestation);
|
||||
Assert.Equal("export.portable-bundle", ExportJobTypes.PortableBundle);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,338 @@
|
||||
using StellaOps.Orchestrator.Core.Domain.Export;
|
||||
|
||||
namespace StellaOps.Orchestrator.Tests.Export;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for ExportRetention policy.
|
||||
/// </summary>
|
||||
public sealed class ExportRetentionTests
|
||||
{
|
||||
[Fact]
|
||||
public void Default_CreatesDefaultPolicy()
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
var retention = ExportRetention.Default(now);
|
||||
|
||||
Assert.Equal(ExportRetention.PolicyNames.Default, retention.PolicyName);
|
||||
Assert.Equal(now, retention.AvailableAt);
|
||||
Assert.NotNull(retention.ArchiveAt);
|
||||
Assert.NotNull(retention.ExpiresAt);
|
||||
Assert.Null(retention.ArchivedAt);
|
||||
Assert.Null(retention.DeletedAt);
|
||||
Assert.False(retention.LegalHold);
|
||||
Assert.False(retention.RequiresRelease);
|
||||
Assert.Equal(0, retention.ExtensionCount);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Default_SetsCorrectPeriods()
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
var retention = ExportRetention.Default(now);
|
||||
|
||||
var archiveAt = retention.ArchiveAt!.Value;
|
||||
var expiresAt = retention.ExpiresAt!.Value;
|
||||
|
||||
Assert.Equal(now.Add(ExportRetention.DefaultPeriods.ArchiveDelay), archiveAt);
|
||||
Assert.Equal(now.Add(ExportRetention.DefaultPeriods.Default), expiresAt);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Temporary_CreatesShorterRetention()
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
var retention = ExportRetention.Temporary(now);
|
||||
|
||||
Assert.Equal(ExportRetention.PolicyNames.Temporary, retention.PolicyName);
|
||||
Assert.Null(retention.ArchiveAt); // No archive for temporary
|
||||
Assert.Equal(now.Add(ExportRetention.DefaultPeriods.Temporary), retention.ExpiresAt);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Compliance_RequiresRelease()
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
var retention = ExportRetention.Compliance(now, TimeSpan.FromDays(365));
|
||||
|
||||
Assert.Equal(ExportRetention.PolicyNames.Compliance, retention.PolicyName);
|
||||
Assert.True(retention.RequiresRelease);
|
||||
Assert.Equal(now.Add(TimeSpan.FromDays(365)), retention.ExpiresAt);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsExpired_ReturnsTrueWhenExpired()
|
||||
{
|
||||
var past = DateTimeOffset.UtcNow.AddDays(-1);
|
||||
var retention = new ExportRetention(
|
||||
PolicyName: "test",
|
||||
AvailableAt: past.AddDays(-2),
|
||||
ArchiveAt: null,
|
||||
ExpiresAt: past,
|
||||
ArchivedAt: null,
|
||||
DeletedAt: null,
|
||||
LegalHold: false,
|
||||
LegalHoldReason: null,
|
||||
RequiresRelease: false,
|
||||
ReleasedBy: null,
|
||||
ReleasedAt: null,
|
||||
ExtensionCount: 0,
|
||||
Metadata: null);
|
||||
|
||||
Assert.True(retention.IsExpired);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsExpired_ReturnsFalseWhenNotExpired()
|
||||
{
|
||||
var future = DateTimeOffset.UtcNow.AddDays(1);
|
||||
var retention = new ExportRetention(
|
||||
PolicyName: "test",
|
||||
AvailableAt: DateTimeOffset.UtcNow,
|
||||
ArchiveAt: null,
|
||||
ExpiresAt: future,
|
||||
ArchivedAt: null,
|
||||
DeletedAt: null,
|
||||
LegalHold: false,
|
||||
LegalHoldReason: null,
|
||||
RequiresRelease: false,
|
||||
ReleasedBy: null,
|
||||
ReleasedAt: null,
|
||||
ExtensionCount: 0,
|
||||
Metadata: null);
|
||||
|
||||
Assert.False(retention.IsExpired);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsExpired_ReturnsFalseWhenLegalHold()
|
||||
{
|
||||
var past = DateTimeOffset.UtcNow.AddDays(-1);
|
||||
var retention = new ExportRetention(
|
||||
PolicyName: "test",
|
||||
AvailableAt: past.AddDays(-2),
|
||||
ArchiveAt: null,
|
||||
ExpiresAt: past,
|
||||
ArchivedAt: null,
|
||||
DeletedAt: null,
|
||||
LegalHold: true,
|
||||
LegalHoldReason: "Investigation",
|
||||
RequiresRelease: false,
|
||||
ReleasedBy: null,
|
||||
ReleasedAt: null,
|
||||
ExtensionCount: 0,
|
||||
Metadata: null);
|
||||
|
||||
Assert.False(retention.IsExpired); // Legal hold prevents expiration
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ShouldArchive_ReturnsTrueWhenArchiveTimePassed()
|
||||
{
|
||||
var past = DateTimeOffset.UtcNow.AddDays(-1);
|
||||
var retention = new ExportRetention(
|
||||
PolicyName: "test",
|
||||
AvailableAt: past.AddDays(-2),
|
||||
ArchiveAt: past,
|
||||
ExpiresAt: DateTimeOffset.UtcNow.AddDays(30),
|
||||
ArchivedAt: null,
|
||||
DeletedAt: null,
|
||||
LegalHold: false,
|
||||
LegalHoldReason: null,
|
||||
RequiresRelease: false,
|
||||
ReleasedBy: null,
|
||||
ReleasedAt: null,
|
||||
ExtensionCount: 0,
|
||||
Metadata: null);
|
||||
|
||||
Assert.True(retention.ShouldArchive);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ShouldArchive_ReturnsFalseWhenAlreadyArchived()
|
||||
{
|
||||
var past = DateTimeOffset.UtcNow.AddDays(-1);
|
||||
var retention = new ExportRetention(
|
||||
PolicyName: "test",
|
||||
AvailableAt: past.AddDays(-2),
|
||||
ArchiveAt: past,
|
||||
ExpiresAt: DateTimeOffset.UtcNow.AddDays(30),
|
||||
ArchivedAt: past.AddHours(-1), // Already archived
|
||||
DeletedAt: null,
|
||||
LegalHold: false,
|
||||
LegalHoldReason: null,
|
||||
RequiresRelease: false,
|
||||
ReleasedBy: null,
|
||||
ReleasedAt: null,
|
||||
ExtensionCount: 0,
|
||||
Metadata: null);
|
||||
|
||||
Assert.False(retention.ShouldArchive);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CanDelete_RequiresExpirationAndRelease()
|
||||
{
|
||||
var past = DateTimeOffset.UtcNow.AddDays(-1);
|
||||
|
||||
// Expired but requires release
|
||||
var retention = new ExportRetention(
|
||||
PolicyName: "test",
|
||||
AvailableAt: past.AddDays(-2),
|
||||
ArchiveAt: null,
|
||||
ExpiresAt: past,
|
||||
ArchivedAt: null,
|
||||
DeletedAt: null,
|
||||
LegalHold: false,
|
||||
LegalHoldReason: null,
|
||||
RequiresRelease: true,
|
||||
ReleasedBy: null,
|
||||
ReleasedAt: null,
|
||||
ExtensionCount: 0,
|
||||
Metadata: null);
|
||||
|
||||
Assert.False(retention.CanDelete); // Not released
|
||||
|
||||
// Now release
|
||||
var released = retention.Release("admin@example.com");
|
||||
Assert.True(released.CanDelete);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ExtendRetention_ExtendsExpiration()
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
var retention = ExportRetention.Default(now);
|
||||
|
||||
var extended = retention.ExtendRetention(TimeSpan.FromDays(30), "Customer request");
|
||||
|
||||
Assert.Equal(1, extended.ExtensionCount);
|
||||
Assert.Equal(retention.ExpiresAt!.Value.AddDays(30), extended.ExpiresAt);
|
||||
Assert.NotNull(extended.Metadata);
|
||||
Assert.Contains("extension_1_reason", extended.Metadata.Keys);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ExtendRetention_CanExtendMultipleTimes()
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
var retention = ExportRetention.Default(now);
|
||||
|
||||
var extended = retention
|
||||
.ExtendRetention(TimeSpan.FromDays(10), "First extension")
|
||||
.ExtendRetention(TimeSpan.FromDays(20), "Second extension");
|
||||
|
||||
Assert.Equal(2, extended.ExtensionCount);
|
||||
Assert.Equal(retention.ExpiresAt!.Value.AddDays(30), extended.ExpiresAt);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PlaceLegalHold_SetsHoldAndReason()
|
||||
{
|
||||
var retention = ExportRetention.Default(DateTimeOffset.UtcNow);
|
||||
|
||||
var held = retention.PlaceLegalHold("Legal investigation pending");
|
||||
|
||||
Assert.True(held.LegalHold);
|
||||
Assert.Equal("Legal investigation pending", held.LegalHoldReason);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ReleaseLegalHold_ClearsHold()
|
||||
{
|
||||
var retention = ExportRetention.Default(DateTimeOffset.UtcNow)
|
||||
.PlaceLegalHold("Investigation");
|
||||
|
||||
var released = retention.ReleaseLegalHold();
|
||||
|
||||
Assert.False(released.LegalHold);
|
||||
Assert.Null(released.LegalHoldReason);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Release_SetsReleasedByAndAt()
|
||||
{
|
||||
var retention = ExportRetention.Compliance(DateTimeOffset.UtcNow, TimeSpan.FromDays(365));
|
||||
|
||||
var before = DateTimeOffset.UtcNow;
|
||||
var released = retention.Release("admin@example.com");
|
||||
var after = DateTimeOffset.UtcNow;
|
||||
|
||||
Assert.Equal("admin@example.com", released.ReleasedBy);
|
||||
Assert.NotNull(released.ReleasedAt);
|
||||
Assert.InRange(released.ReleasedAt.Value, before, after);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void MarkArchived_SetsArchivedAt()
|
||||
{
|
||||
var retention = ExportRetention.Default(DateTimeOffset.UtcNow);
|
||||
|
||||
var before = DateTimeOffset.UtcNow;
|
||||
var archived = retention.MarkArchived();
|
||||
var after = DateTimeOffset.UtcNow;
|
||||
|
||||
Assert.NotNull(archived.ArchivedAt);
|
||||
Assert.InRange(archived.ArchivedAt.Value, before, after);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void MarkDeleted_SetsDeletedAt()
|
||||
{
|
||||
var retention = ExportRetention.Temporary(DateTimeOffset.UtcNow);
|
||||
|
||||
var before = DateTimeOffset.UtcNow;
|
||||
var deleted = retention.MarkDeleted();
|
||||
var after = DateTimeOffset.UtcNow;
|
||||
|
||||
Assert.NotNull(deleted.DeletedAt);
|
||||
Assert.InRange(deleted.DeletedAt.Value, before, after);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ToJson_SerializesCorrectly()
|
||||
{
|
||||
var retention = ExportRetention.Default(DateTimeOffset.UtcNow);
|
||||
var json = retention.ToJson();
|
||||
|
||||
Assert.Contains("\"policyName\":\"default\"", json);
|
||||
Assert.Contains("\"legalHold\":false", json);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void FromJson_DeserializesCorrectly()
|
||||
{
|
||||
var original = ExportRetention.Default(DateTimeOffset.UtcNow);
|
||||
var json = original.ToJson();
|
||||
var deserialized = ExportRetention.FromJson(json);
|
||||
|
||||
Assert.NotNull(deserialized);
|
||||
Assert.Equal(original.PolicyName, deserialized.PolicyName);
|
||||
Assert.Equal(original.LegalHold, deserialized.LegalHold);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void FromJson_ReturnsNullForInvalidJson()
|
||||
{
|
||||
var result = ExportRetention.FromJson("not valid json");
|
||||
Assert.Null(result);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PolicyNames_ContainsExpectedValues()
|
||||
{
|
||||
Assert.Equal("default", ExportRetention.PolicyNames.Default);
|
||||
Assert.Equal("compliance", ExportRetention.PolicyNames.Compliance);
|
||||
Assert.Equal("temporary", ExportRetention.PolicyNames.Temporary);
|
||||
Assert.Equal("long-term", ExportRetention.PolicyNames.LongTerm);
|
||||
Assert.Equal("permanent", ExportRetention.PolicyNames.Permanent);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void DefaultPeriods_HaveReasonableValues()
|
||||
{
|
||||
Assert.Equal(TimeSpan.FromDays(7), ExportRetention.DefaultPeriods.Temporary);
|
||||
Assert.Equal(TimeSpan.FromDays(30), ExportRetention.DefaultPeriods.Default);
|
||||
Assert.Equal(TimeSpan.FromDays(365), ExportRetention.DefaultPeriods.LongTerm);
|
||||
Assert.Equal(TimeSpan.FromDays(90), ExportRetention.DefaultPeriods.ArchiveDelay);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,711 @@
|
||||
using StellaOps.Orchestrator.Core.Domain.Export;
|
||||
|
||||
namespace StellaOps.Orchestrator.Tests.Export;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for ExportSchedule and related scheduling types.
|
||||
/// </summary>
|
||||
public sealed class ExportScheduleTests
|
||||
{
|
||||
[Fact]
|
||||
public void Create_CreatesScheduleWithDefaults()
|
||||
{
|
||||
var before = DateTimeOffset.UtcNow;
|
||||
var payload = ExportJobPayload.Default("json");
|
||||
|
||||
var schedule = ExportSchedule.Create(
|
||||
tenantId: "tenant-1",
|
||||
name: "Daily SBOM Export",
|
||||
exportType: "export.sbom",
|
||||
cronExpression: "0 0 * * *",
|
||||
payloadTemplate: payload,
|
||||
createdBy: "admin@example.com");
|
||||
|
||||
var after = DateTimeOffset.UtcNow;
|
||||
|
||||
Assert.NotEqual(Guid.Empty, schedule.ScheduleId);
|
||||
Assert.Equal("tenant-1", schedule.TenantId);
|
||||
Assert.Equal("Daily SBOM Export", schedule.Name);
|
||||
Assert.Equal("export.sbom", schedule.ExportType);
|
||||
Assert.Equal("0 0 * * *", schedule.CronExpression);
|
||||
Assert.Equal("UTC", schedule.Timezone);
|
||||
Assert.True(schedule.Enabled);
|
||||
Assert.Equal("default", schedule.RetentionPolicy);
|
||||
Assert.Null(schedule.ProjectId);
|
||||
Assert.Equal(1, schedule.MaxConcurrent);
|
||||
Assert.True(schedule.SkipIfRunning);
|
||||
Assert.Null(schedule.LastRunAt);
|
||||
Assert.Null(schedule.LastJobId);
|
||||
Assert.Null(schedule.LastRunStatus);
|
||||
Assert.Null(schedule.NextRunAt);
|
||||
Assert.Equal(0, schedule.TotalRuns);
|
||||
Assert.Equal(0, schedule.SuccessfulRuns);
|
||||
Assert.Equal(0, schedule.FailedRuns);
|
||||
Assert.InRange(schedule.CreatedAt, before, after);
|
||||
Assert.Equal(schedule.CreatedAt, schedule.UpdatedAt);
|
||||
Assert.Equal("admin@example.com", schedule.CreatedBy);
|
||||
Assert.Equal("admin@example.com", schedule.UpdatedBy);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Create_AcceptsOptionalParameters()
|
||||
{
|
||||
var payload = ExportJobPayload.Default("json");
|
||||
|
||||
var schedule = ExportSchedule.Create(
|
||||
tenantId: "tenant-1",
|
||||
name: "Weekly Report",
|
||||
exportType: "export.report",
|
||||
cronExpression: "0 0 * * SUN",
|
||||
payloadTemplate: payload,
|
||||
createdBy: "admin@example.com",
|
||||
description: "Weekly compliance report",
|
||||
timezone: "America/New_York",
|
||||
retentionPolicy: "compliance",
|
||||
projectId: "project-123",
|
||||
maxConcurrent: 3,
|
||||
skipIfRunning: false);
|
||||
|
||||
Assert.Equal("Weekly compliance report", schedule.Description);
|
||||
Assert.Equal("America/New_York", schedule.Timezone);
|
||||
Assert.Equal("compliance", schedule.RetentionPolicy);
|
||||
Assert.Equal("project-123", schedule.ProjectId);
|
||||
Assert.Equal(3, schedule.MaxConcurrent);
|
||||
Assert.False(schedule.SkipIfRunning);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Enable_EnablesSchedule()
|
||||
{
|
||||
var payload = ExportJobPayload.Default("json");
|
||||
var schedule = ExportSchedule.Create(
|
||||
tenantId: "tenant-1",
|
||||
name: "Test",
|
||||
exportType: "export.sbom",
|
||||
cronExpression: "0 0 * * *",
|
||||
payloadTemplate: payload,
|
||||
createdBy: "admin@example.com");
|
||||
|
||||
var disabled = schedule.Disable();
|
||||
Assert.False(disabled.Enabled);
|
||||
|
||||
var enabled = disabled.Enable();
|
||||
Assert.True(enabled.Enabled);
|
||||
Assert.True(enabled.UpdatedAt > disabled.UpdatedAt);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Disable_DisablesSchedule()
|
||||
{
|
||||
var payload = ExportJobPayload.Default("json");
|
||||
var schedule = ExportSchedule.Create(
|
||||
tenantId: "tenant-1",
|
||||
name: "Test",
|
||||
exportType: "export.sbom",
|
||||
cronExpression: "0 0 * * *",
|
||||
payloadTemplate: payload,
|
||||
createdBy: "admin@example.com");
|
||||
|
||||
var disabled = schedule.Disable();
|
||||
|
||||
Assert.False(disabled.Enabled);
|
||||
Assert.True(disabled.UpdatedAt >= schedule.UpdatedAt);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RecordSuccess_UpdatesRunStatistics()
|
||||
{
|
||||
var payload = ExportJobPayload.Default("json");
|
||||
var schedule = ExportSchedule.Create(
|
||||
tenantId: "tenant-1",
|
||||
name: "Test",
|
||||
exportType: "export.sbom",
|
||||
cronExpression: "0 0 * * *",
|
||||
payloadTemplate: payload,
|
||||
createdBy: "admin@example.com");
|
||||
|
||||
var jobId = Guid.NewGuid();
|
||||
var nextRun = DateTimeOffset.UtcNow.AddDays(1);
|
||||
var before = DateTimeOffset.UtcNow;
|
||||
|
||||
var updated = schedule.RecordSuccess(jobId, nextRun);
|
||||
|
||||
Assert.NotNull(updated.LastRunAt);
|
||||
Assert.True(updated.LastRunAt >= before);
|
||||
Assert.Equal(jobId, updated.LastJobId);
|
||||
Assert.Equal("completed", updated.LastRunStatus);
|
||||
Assert.Equal(nextRun, updated.NextRunAt);
|
||||
Assert.Equal(1, updated.TotalRuns);
|
||||
Assert.Equal(1, updated.SuccessfulRuns);
|
||||
Assert.Equal(0, updated.FailedRuns);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RecordFailure_UpdatesRunStatistics()
|
||||
{
|
||||
var payload = ExportJobPayload.Default("json");
|
||||
var schedule = ExportSchedule.Create(
|
||||
tenantId: "tenant-1",
|
||||
name: "Test",
|
||||
exportType: "export.sbom",
|
||||
cronExpression: "0 0 * * *",
|
||||
payloadTemplate: payload,
|
||||
createdBy: "admin@example.com");
|
||||
|
||||
var jobId = Guid.NewGuid();
|
||||
var nextRun = DateTimeOffset.UtcNow.AddDays(1);
|
||||
|
||||
var updated = schedule.RecordFailure(jobId, "Database connection failed", nextRun);
|
||||
|
||||
Assert.NotNull(updated.LastRunAt);
|
||||
Assert.Equal(jobId, updated.LastJobId);
|
||||
Assert.Equal("failed: Database connection failed", updated.LastRunStatus);
|
||||
Assert.Equal(nextRun, updated.NextRunAt);
|
||||
Assert.Equal(1, updated.TotalRuns);
|
||||
Assert.Equal(0, updated.SuccessfulRuns);
|
||||
Assert.Equal(1, updated.FailedRuns);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RecordFailure_UsesUnknownWhenNoReason()
|
||||
{
|
||||
var payload = ExportJobPayload.Default("json");
|
||||
var schedule = ExportSchedule.Create(
|
||||
tenantId: "tenant-1",
|
||||
name: "Test",
|
||||
exportType: "export.sbom",
|
||||
cronExpression: "0 0 * * *",
|
||||
payloadTemplate: payload,
|
||||
createdBy: "admin@example.com");
|
||||
|
||||
var updated = schedule.RecordFailure(Guid.NewGuid());
|
||||
|
||||
Assert.Equal("failed: unknown", updated.LastRunStatus);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void SuccessRate_CalculatesCorrectly()
|
||||
{
|
||||
var payload = ExportJobPayload.Default("json");
|
||||
var schedule = ExportSchedule.Create(
|
||||
tenantId: "tenant-1",
|
||||
name: "Test",
|
||||
exportType: "export.sbom",
|
||||
cronExpression: "0 0 * * *",
|
||||
payloadTemplate: payload,
|
||||
createdBy: "admin@example.com");
|
||||
|
||||
Assert.Equal(0, schedule.SuccessRate); // No runs
|
||||
|
||||
var updated = schedule
|
||||
.RecordSuccess(Guid.NewGuid())
|
||||
.RecordSuccess(Guid.NewGuid())
|
||||
.RecordSuccess(Guid.NewGuid())
|
||||
.RecordFailure(Guid.NewGuid());
|
||||
|
||||
Assert.Equal(75.0, updated.SuccessRate);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void WithNextRun_SetsNextRunTime()
|
||||
{
|
||||
var payload = ExportJobPayload.Default("json");
|
||||
var schedule = ExportSchedule.Create(
|
||||
tenantId: "tenant-1",
|
||||
name: "Test",
|
||||
exportType: "export.sbom",
|
||||
cronExpression: "0 0 * * *",
|
||||
payloadTemplate: payload,
|
||||
createdBy: "admin@example.com");
|
||||
|
||||
var nextRun = DateTimeOffset.UtcNow.AddHours(6);
|
||||
var updated = schedule.WithNextRun(nextRun);
|
||||
|
||||
Assert.Equal(nextRun, updated.NextRunAt);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void WithCron_UpdatesCronExpression()
|
||||
{
|
||||
var payload = ExportJobPayload.Default("json");
|
||||
var schedule = ExportSchedule.Create(
|
||||
tenantId: "tenant-1",
|
||||
name: "Test",
|
||||
exportType: "export.sbom",
|
||||
cronExpression: "0 0 * * *",
|
||||
payloadTemplate: payload,
|
||||
createdBy: "admin@example.com");
|
||||
|
||||
var updated = schedule.WithCron("0 */6 * * *", "scheduler@example.com");
|
||||
|
||||
Assert.Equal("0 */6 * * *", updated.CronExpression);
|
||||
Assert.Equal("scheduler@example.com", updated.UpdatedBy);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void WithPayload_UpdatesPayloadTemplate()
|
||||
{
|
||||
var payload = ExportJobPayload.Default("json");
|
||||
var schedule = ExportSchedule.Create(
|
||||
tenantId: "tenant-1",
|
||||
name: "Test",
|
||||
exportType: "export.sbom",
|
||||
cronExpression: "0 0 * * *",
|
||||
payloadTemplate: payload,
|
||||
createdBy: "admin@example.com");
|
||||
|
||||
var newPayload = ExportJobPayload.Default("ndjson") with { ProjectId = "project-2" };
|
||||
|
||||
var updated = schedule.WithPayload(newPayload, "editor@example.com");
|
||||
|
||||
Assert.Equal("project-2", updated.PayloadTemplate.ProjectId);
|
||||
Assert.Equal("ndjson", updated.PayloadTemplate.Format);
|
||||
Assert.Equal("editor@example.com", updated.UpdatedBy);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Tests for RetentionPruneConfig.
|
||||
/// </summary>
|
||||
public sealed class RetentionPruneConfigTests
|
||||
{
|
||||
[Fact]
|
||||
public void Create_CreatesConfigWithDefaults()
|
||||
{
|
||||
var before = DateTimeOffset.UtcNow;
|
||||
var config = RetentionPruneConfig.Create();
|
||||
var after = DateTimeOffset.UtcNow;
|
||||
|
||||
Assert.NotEqual(Guid.Empty, config.PruneId);
|
||||
Assert.Null(config.TenantId);
|
||||
Assert.Null(config.ExportType);
|
||||
Assert.True(config.Enabled);
|
||||
Assert.Equal(RetentionPruneConfig.DefaultCronExpression, config.CronExpression);
|
||||
Assert.Equal(RetentionPruneConfig.DefaultBatchSize, config.BatchSize);
|
||||
Assert.True(config.ArchiveBeforeDelete);
|
||||
Assert.Null(config.ArchiveProvider);
|
||||
Assert.False(config.NotifyOnComplete);
|
||||
Assert.Null(config.NotificationChannel);
|
||||
Assert.Null(config.LastPruneAt);
|
||||
Assert.Equal(0, config.LastPruneCount);
|
||||
Assert.Equal(0, config.TotalPruned);
|
||||
Assert.InRange(config.CreatedAt, before, after);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Create_AcceptsOptionalParameters()
|
||||
{
|
||||
var config = RetentionPruneConfig.Create(
|
||||
tenantId: "tenant-1",
|
||||
exportType: "export.sbom",
|
||||
cronExpression: "0 3 * * *",
|
||||
batchSize: 50);
|
||||
|
||||
Assert.Equal("tenant-1", config.TenantId);
|
||||
Assert.Equal("export.sbom", config.ExportType);
|
||||
Assert.Equal("0 3 * * *", config.CronExpression);
|
||||
Assert.Equal(50, config.BatchSize);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void DefaultBatchSize_IsReasonable()
|
||||
{
|
||||
Assert.Equal(100, RetentionPruneConfig.DefaultBatchSize);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void DefaultCronExpression_IsDailyAt2AM()
|
||||
{
|
||||
Assert.Equal("0 2 * * *", RetentionPruneConfig.DefaultCronExpression);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RecordPrune_UpdatesStatistics()
|
||||
{
|
||||
var config = RetentionPruneConfig.Create();
|
||||
var before = DateTimeOffset.UtcNow;
|
||||
|
||||
var updated = config.RecordPrune(25);
|
||||
|
||||
Assert.NotNull(updated.LastPruneAt);
|
||||
Assert.True(updated.LastPruneAt >= before);
|
||||
Assert.Equal(25, updated.LastPruneCount);
|
||||
Assert.Equal(25, updated.TotalPruned);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RecordPrune_AccumulatesTotal()
|
||||
{
|
||||
var config = RetentionPruneConfig.Create();
|
||||
|
||||
var updated = config
|
||||
.RecordPrune(10)
|
||||
.RecordPrune(15)
|
||||
.RecordPrune(20);
|
||||
|
||||
Assert.Equal(20, updated.LastPruneCount);
|
||||
Assert.Equal(45, updated.TotalPruned);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Tests for ExportAlertConfig.
|
||||
/// </summary>
|
||||
public sealed class ExportAlertConfigTests
|
||||
{
|
||||
[Fact]
|
||||
public void Create_CreatesConfigWithDefaults()
|
||||
{
|
||||
var before = DateTimeOffset.UtcNow;
|
||||
|
||||
var config = ExportAlertConfig.Create(
|
||||
tenantId: "tenant-1",
|
||||
name: "SBOM Export Failures");
|
||||
|
||||
var after = DateTimeOffset.UtcNow;
|
||||
|
||||
Assert.NotEqual(Guid.Empty, config.AlertConfigId);
|
||||
Assert.Equal("tenant-1", config.TenantId);
|
||||
Assert.Equal("SBOM Export Failures", config.Name);
|
||||
Assert.Null(config.ExportType);
|
||||
Assert.True(config.Enabled);
|
||||
Assert.Equal(3, config.ConsecutiveFailuresThreshold);
|
||||
Assert.Equal(50.0, config.FailureRateThreshold);
|
||||
Assert.Equal(TimeSpan.FromHours(1), config.FailureRateWindow);
|
||||
Assert.Equal(ExportAlertSeverity.Warning, config.Severity);
|
||||
Assert.Equal("email", config.NotificationChannels);
|
||||
Assert.Equal(TimeSpan.FromMinutes(15), config.Cooldown);
|
||||
Assert.Null(config.LastAlertAt);
|
||||
Assert.Equal(0, config.TotalAlerts);
|
||||
Assert.InRange(config.CreatedAt, before, after);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Create_AcceptsOptionalParameters()
|
||||
{
|
||||
var config = ExportAlertConfig.Create(
|
||||
tenantId: "tenant-1",
|
||||
name: "Critical Export Failures",
|
||||
exportType: "export.report",
|
||||
consecutiveFailuresThreshold: 5,
|
||||
failureRateThreshold: 25.0,
|
||||
severity: ExportAlertSeverity.Critical);
|
||||
|
||||
Assert.Equal("export.report", config.ExportType);
|
||||
Assert.Equal(5, config.ConsecutiveFailuresThreshold);
|
||||
Assert.Equal(25.0, config.FailureRateThreshold);
|
||||
Assert.Equal(ExportAlertSeverity.Critical, config.Severity);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CanAlert_ReturnsTrueWhenNoLastAlert()
|
||||
{
|
||||
var config = ExportAlertConfig.Create(
|
||||
tenantId: "tenant-1",
|
||||
name: "Test Alert");
|
||||
|
||||
Assert.True(config.CanAlert);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CanAlert_ReturnsFalseWithinCooldown()
|
||||
{
|
||||
var config = ExportAlertConfig.Create(
|
||||
tenantId: "tenant-1",
|
||||
name: "Test Alert");
|
||||
|
||||
var alerted = config.RecordAlert();
|
||||
|
||||
Assert.False(alerted.CanAlert);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CanAlert_ReturnsTrueAfterCooldown()
|
||||
{
|
||||
var config = new ExportAlertConfig(
|
||||
AlertConfigId: Guid.NewGuid(),
|
||||
TenantId: "tenant-1",
|
||||
Name: "Test Alert",
|
||||
ExportType: null,
|
||||
Enabled: true,
|
||||
ConsecutiveFailuresThreshold: 3,
|
||||
FailureRateThreshold: 50.0,
|
||||
FailureRateWindow: TimeSpan.FromHours(1),
|
||||
Severity: ExportAlertSeverity.Warning,
|
||||
NotificationChannels: "email",
|
||||
Cooldown: TimeSpan.FromMinutes(15),
|
||||
LastAlertAt: DateTimeOffset.UtcNow.AddMinutes(-20), // Past cooldown
|
||||
TotalAlerts: 1,
|
||||
CreatedAt: DateTimeOffset.UtcNow.AddDays(-1),
|
||||
UpdatedAt: DateTimeOffset.UtcNow.AddMinutes(-20));
|
||||
|
||||
Assert.True(config.CanAlert);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RecordAlert_UpdatesTimestampAndCount()
|
||||
{
|
||||
var config = ExportAlertConfig.Create(
|
||||
tenantId: "tenant-1",
|
||||
name: "Test Alert");
|
||||
|
||||
var before = DateTimeOffset.UtcNow;
|
||||
var updated = config.RecordAlert();
|
||||
var after = DateTimeOffset.UtcNow;
|
||||
|
||||
Assert.NotNull(updated.LastAlertAt);
|
||||
Assert.InRange(updated.LastAlertAt.Value, before, after);
|
||||
Assert.Equal(1, updated.TotalAlerts);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RecordAlert_AccumulatesAlertCount()
|
||||
{
|
||||
var config = ExportAlertConfig.Create(
|
||||
tenantId: "tenant-1",
|
||||
name: "Test Alert");
|
||||
|
||||
// Simulate multiple alerts with cooldown passage
|
||||
var updated = config with
|
||||
{
|
||||
LastAlertAt = DateTimeOffset.UtcNow.AddMinutes(-20),
|
||||
TotalAlerts = 5
|
||||
};
|
||||
|
||||
var alerted = updated.RecordAlert();
|
||||
Assert.Equal(6, alerted.TotalAlerts);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Tests for ExportAlert.
|
||||
/// </summary>
|
||||
public sealed class ExportAlertTests
|
||||
{
|
||||
[Fact]
|
||||
public void CreateForConsecutiveFailures_CreatesAlert()
|
||||
{
|
||||
var configId = Guid.NewGuid();
|
||||
var failedJobs = new List<Guid> { Guid.NewGuid(), Guid.NewGuid(), Guid.NewGuid() };
|
||||
var before = DateTimeOffset.UtcNow;
|
||||
|
||||
var alert = ExportAlert.CreateForConsecutiveFailures(
|
||||
alertConfigId: configId,
|
||||
tenantId: "tenant-1",
|
||||
exportType: "export.sbom",
|
||||
severity: ExportAlertSeverity.Error,
|
||||
failedJobIds: failedJobs,
|
||||
consecutiveFailures: 3);
|
||||
|
||||
var after = DateTimeOffset.UtcNow;
|
||||
|
||||
Assert.NotEqual(Guid.Empty, alert.AlertId);
|
||||
Assert.Equal(configId, alert.AlertConfigId);
|
||||
Assert.Equal("tenant-1", alert.TenantId);
|
||||
Assert.Equal("export.sbom", alert.ExportType);
|
||||
Assert.Equal(ExportAlertSeverity.Error, alert.Severity);
|
||||
Assert.Contains("failed 3 consecutive times", alert.Message);
|
||||
Assert.Equal(3, alert.FailedJobIds.Count);
|
||||
Assert.Equal(3, alert.ConsecutiveFailures);
|
||||
Assert.Equal(0, alert.FailureRate);
|
||||
Assert.InRange(alert.TriggeredAt, before, after);
|
||||
Assert.Null(alert.AcknowledgedAt);
|
||||
Assert.Null(alert.AcknowledgedBy);
|
||||
Assert.Null(alert.ResolvedAt);
|
||||
Assert.Null(alert.ResolutionNotes);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CreateForHighFailureRate_CreatesAlert()
|
||||
{
|
||||
var configId = Guid.NewGuid();
|
||||
var failedJobs = new List<Guid> { Guid.NewGuid(), Guid.NewGuid() };
|
||||
|
||||
var alert = ExportAlert.CreateForHighFailureRate(
|
||||
alertConfigId: configId,
|
||||
tenantId: "tenant-1",
|
||||
exportType: "export.report",
|
||||
severity: ExportAlertSeverity.Warning,
|
||||
failureRate: 75.5,
|
||||
recentFailedJobIds: failedJobs);
|
||||
|
||||
Assert.Contains("failure rate is 75.5%", alert.Message);
|
||||
Assert.Equal(0, alert.ConsecutiveFailures);
|
||||
Assert.Equal(75.5, alert.FailureRate);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Acknowledge_SetsAcknowledgementInfo()
|
||||
{
|
||||
var alert = ExportAlert.CreateForConsecutiveFailures(
|
||||
alertConfigId: Guid.NewGuid(),
|
||||
tenantId: "tenant-1",
|
||||
exportType: "export.sbom",
|
||||
severity: ExportAlertSeverity.Error,
|
||||
failedJobIds: [Guid.NewGuid()],
|
||||
consecutiveFailures: 1);
|
||||
|
||||
var before = DateTimeOffset.UtcNow;
|
||||
var acknowledged = alert.Acknowledge("operator@example.com");
|
||||
var after = DateTimeOffset.UtcNow;
|
||||
|
||||
Assert.NotNull(acknowledged.AcknowledgedAt);
|
||||
Assert.InRange(acknowledged.AcknowledgedAt.Value, before, after);
|
||||
Assert.Equal("operator@example.com", acknowledged.AcknowledgedBy);
|
||||
Assert.True(acknowledged.IsActive); // Still active until resolved
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resolve_SetsResolutionInfo()
|
||||
{
|
||||
var alert = ExportAlert.CreateForConsecutiveFailures(
|
||||
alertConfigId: Guid.NewGuid(),
|
||||
tenantId: "tenant-1",
|
||||
exportType: "export.sbom",
|
||||
severity: ExportAlertSeverity.Error,
|
||||
failedJobIds: [Guid.NewGuid()],
|
||||
consecutiveFailures: 1);
|
||||
|
||||
var before = DateTimeOffset.UtcNow;
|
||||
var resolved = alert.Resolve("Fixed database connection issue");
|
||||
var after = DateTimeOffset.UtcNow;
|
||||
|
||||
Assert.NotNull(resolved.ResolvedAt);
|
||||
Assert.InRange(resolved.ResolvedAt.Value, before, after);
|
||||
Assert.Equal("Fixed database connection issue", resolved.ResolutionNotes);
|
||||
Assert.False(resolved.IsActive);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resolve_WorksWithoutNotes()
|
||||
{
|
||||
var alert = ExportAlert.CreateForConsecutiveFailures(
|
||||
alertConfigId: Guid.NewGuid(),
|
||||
tenantId: "tenant-1",
|
||||
exportType: "export.sbom",
|
||||
severity: ExportAlertSeverity.Error,
|
||||
failedJobIds: [Guid.NewGuid()],
|
||||
consecutiveFailures: 1);
|
||||
|
||||
var resolved = alert.Resolve();
|
||||
|
||||
Assert.NotNull(resolved.ResolvedAt);
|
||||
Assert.Null(resolved.ResolutionNotes);
|
||||
Assert.False(resolved.IsActive);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsActive_ReturnsTrueWhenNotResolved()
|
||||
{
|
||||
var alert = ExportAlert.CreateForConsecutiveFailures(
|
||||
alertConfigId: Guid.NewGuid(),
|
||||
tenantId: "tenant-1",
|
||||
exportType: "export.sbom",
|
||||
severity: ExportAlertSeverity.Error,
|
||||
failedJobIds: [Guid.NewGuid()],
|
||||
consecutiveFailures: 1);
|
||||
|
||||
Assert.True(alert.IsActive);
|
||||
|
||||
var acknowledged = alert.Acknowledge("user@example.com");
|
||||
Assert.True(acknowledged.IsActive);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsActive_ReturnsFalseWhenResolved()
|
||||
{
|
||||
var alert = ExportAlert.CreateForConsecutiveFailures(
|
||||
alertConfigId: Guid.NewGuid(),
|
||||
tenantId: "tenant-1",
|
||||
exportType: "export.sbom",
|
||||
severity: ExportAlertSeverity.Error,
|
||||
failedJobIds: [Guid.NewGuid()],
|
||||
consecutiveFailures: 1);
|
||||
|
||||
var resolved = alert.Resolve();
|
||||
Assert.False(resolved.IsActive);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Tests for ExportAlertSeverity.
|
||||
/// </summary>
|
||||
public sealed class ExportAlertSeverityTests
|
||||
{
|
||||
[Theory]
|
||||
[InlineData(ExportAlertSeverity.Info, 0)]
|
||||
[InlineData(ExportAlertSeverity.Warning, 1)]
|
||||
[InlineData(ExportAlertSeverity.Error, 2)]
|
||||
[InlineData(ExportAlertSeverity.Critical, 3)]
|
||||
public void AllSeverityValues_HaveCorrectValue(ExportAlertSeverity severity, int expected)
|
||||
{
|
||||
Assert.Equal(expected, (int)severity);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Severity_CanBeCompared()
|
||||
{
|
||||
Assert.True(ExportAlertSeverity.Critical > ExportAlertSeverity.Error);
|
||||
Assert.True(ExportAlertSeverity.Error > ExportAlertSeverity.Warning);
|
||||
Assert.True(ExportAlertSeverity.Warning > ExportAlertSeverity.Info);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Tests for RetentionPruneResult.
|
||||
/// </summary>
|
||||
public sealed class RetentionPruneResultTests
|
||||
{
|
||||
[Fact]
|
||||
public void TotalProcessed_SumsAllCounts()
|
||||
{
|
||||
var result = new RetentionPruneResult(
|
||||
ArchivedCount: 10,
|
||||
DeletedCount: 20,
|
||||
SkippedCount: 5,
|
||||
Errors: [],
|
||||
Duration: TimeSpan.FromSeconds(30));
|
||||
|
||||
Assert.Equal(35, result.TotalProcessed);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void HasErrors_ReturnsTrueWithErrors()
|
||||
{
|
||||
var result = new RetentionPruneResult(
|
||||
ArchivedCount: 10,
|
||||
DeletedCount: 20,
|
||||
SkippedCount: 5,
|
||||
Errors: ["Failed to delete export-123"],
|
||||
Duration: TimeSpan.FromSeconds(30));
|
||||
|
||||
Assert.True(result.HasErrors);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void HasErrors_ReturnsFalseWithoutErrors()
|
||||
{
|
||||
var result = new RetentionPruneResult(
|
||||
ArchivedCount: 10,
|
||||
DeletedCount: 20,
|
||||
SkippedCount: 5,
|
||||
Errors: [],
|
||||
Duration: TimeSpan.FromSeconds(30));
|
||||
|
||||
Assert.False(result.HasErrors);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Empty_ReturnsZeroResult()
|
||||
{
|
||||
var empty = RetentionPruneResult.Empty;
|
||||
|
||||
Assert.Equal(0, empty.ArchivedCount);
|
||||
Assert.Equal(0, empty.DeletedCount);
|
||||
Assert.Equal(0, empty.SkippedCount);
|
||||
Assert.Empty(empty.Errors);
|
||||
Assert.Equal(TimeSpan.Zero, empty.Duration);
|
||||
Assert.Equal(0, empty.TotalProcessed);
|
||||
Assert.False(empty.HasErrors);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,249 @@
|
||||
using StellaOps.Orchestrator.Core.Domain;
|
||||
using StellaOps.Orchestrator.WebService.Contracts;
|
||||
|
||||
namespace StellaOps.Orchestrator.Tests.PackRun;
|
||||
|
||||
public sealed class PackRunContractTests
|
||||
{
|
||||
[Fact]
|
||||
public void PackRunResponse_FromDomain_MapsAllFields()
|
||||
{
|
||||
var packRunId = Guid.NewGuid();
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
var startedAt = now.AddMinutes(-5);
|
||||
var completedAt = now;
|
||||
|
||||
var packRun = new Core.Domain.PackRun(
|
||||
PackRunId: packRunId,
|
||||
TenantId: "tenant-1",
|
||||
ProjectId: "proj-1",
|
||||
PackId: "pack-alpha",
|
||||
PackVersion: "1.2.3",
|
||||
Status: PackRunStatus.Succeeded,
|
||||
Priority: 5,
|
||||
Attempt: 2,
|
||||
MaxAttempts: 3,
|
||||
Parameters: "{\"key\":\"value\"}",
|
||||
ParametersDigest: "sha256:abc",
|
||||
IdempotencyKey: "idem-1",
|
||||
CorrelationId: "corr-1",
|
||||
LeaseId: null,
|
||||
TaskRunnerId: "runner-1",
|
||||
LeaseUntil: null,
|
||||
CreatedAt: now.AddMinutes(-10),
|
||||
ScheduledAt: now.AddMinutes(-8),
|
||||
LeasedAt: now.AddMinutes(-6),
|
||||
StartedAt: startedAt,
|
||||
CompletedAt: completedAt,
|
||||
NotBefore: null,
|
||||
Reason: "Completed successfully",
|
||||
ExitCode: 0,
|
||||
DurationMs: 300000,
|
||||
CreatedBy: "user@example.com",
|
||||
Metadata: null);
|
||||
|
||||
var response = PackRunResponse.FromDomain(packRun);
|
||||
|
||||
Assert.Equal(packRunId, response.PackRunId);
|
||||
Assert.Equal("pack-alpha", response.PackId);
|
||||
Assert.Equal("1.2.3", response.PackVersion);
|
||||
Assert.Equal("succeeded", response.Status);
|
||||
Assert.Equal(5, response.Priority);
|
||||
Assert.Equal(2, response.Attempt);
|
||||
Assert.Equal(3, response.MaxAttempts);
|
||||
Assert.Equal("corr-1", response.CorrelationId);
|
||||
Assert.Equal("runner-1", response.TaskRunnerId);
|
||||
Assert.Equal(now.AddMinutes(-10), response.CreatedAt);
|
||||
Assert.Equal(now.AddMinutes(-8), response.ScheduledAt);
|
||||
Assert.Equal(startedAt, response.StartedAt);
|
||||
Assert.Equal(completedAt, response.CompletedAt);
|
||||
Assert.Equal("Completed successfully", response.Reason);
|
||||
Assert.Equal(0, response.ExitCode);
|
||||
Assert.Equal(300000, response.DurationMs);
|
||||
Assert.Equal("user@example.com", response.CreatedBy);
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData(PackRunStatus.Pending, "pending")]
|
||||
[InlineData(PackRunStatus.Scheduled, "scheduled")]
|
||||
[InlineData(PackRunStatus.Leased, "leased")]
|
||||
[InlineData(PackRunStatus.Running, "running")]
|
||||
[InlineData(PackRunStatus.Succeeded, "succeeded")]
|
||||
[InlineData(PackRunStatus.Failed, "failed")]
|
||||
[InlineData(PackRunStatus.Canceled, "canceled")]
|
||||
[InlineData(PackRunStatus.TimedOut, "timedout")]
|
||||
public void PackRunResponse_FromDomain_StatusIsLowercase(PackRunStatus status, string expectedStatusString)
|
||||
{
|
||||
var packRun = CreatePackRunWithStatus(status);
|
||||
var response = PackRunResponse.FromDomain(packRun);
|
||||
|
||||
Assert.Equal(expectedStatusString, response.Status);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void LogEntryResponse_FromDomain_MapsAllFields()
|
||||
{
|
||||
var logId = Guid.NewGuid();
|
||||
var packRunId = Guid.NewGuid();
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
|
||||
var log = new PackRunLog(
|
||||
LogId: logId,
|
||||
TenantId: "tenant-1",
|
||||
PackRunId: packRunId,
|
||||
Sequence: 42,
|
||||
Level: LogLevel.Warn,
|
||||
Source: "stderr",
|
||||
Message: "Warning: something happened",
|
||||
Timestamp: now,
|
||||
Data: "{\"details\":true}");
|
||||
|
||||
var response = LogEntryResponse.FromDomain(log);
|
||||
|
||||
Assert.Equal(logId, response.LogId);
|
||||
Assert.Equal(42, response.Sequence);
|
||||
Assert.Equal("warn", response.Level);
|
||||
Assert.Equal("stderr", response.Source);
|
||||
Assert.Equal("Warning: something happened", response.Message);
|
||||
Assert.Equal(now, response.Timestamp);
|
||||
Assert.Equal("{\"details\":true}", response.Data);
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData(LogLevel.Trace, "trace")]
|
||||
[InlineData(LogLevel.Debug, "debug")]
|
||||
[InlineData(LogLevel.Info, "info")]
|
||||
[InlineData(LogLevel.Warn, "warn")]
|
||||
[InlineData(LogLevel.Error, "error")]
|
||||
[InlineData(LogLevel.Fatal, "fatal")]
|
||||
public void LogEntryResponse_FromDomain_LevelIsLowercase(LogLevel level, string expectedLevelString)
|
||||
{
|
||||
var log = new PackRunLog(
|
||||
LogId: Guid.NewGuid(),
|
||||
TenantId: "t1",
|
||||
PackRunId: Guid.NewGuid(),
|
||||
Sequence: 0,
|
||||
Level: level,
|
||||
Source: "test",
|
||||
Message: "test",
|
||||
Timestamp: DateTimeOffset.UtcNow,
|
||||
Data: null);
|
||||
|
||||
var response = LogEntryResponse.FromDomain(log);
|
||||
|
||||
Assert.Equal(expectedLevelString, response.Level);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void SchedulePackRunRequest_AllFieldsAccessible()
|
||||
{
|
||||
var request = new SchedulePackRunRequest(
|
||||
PackId: "pack-1",
|
||||
PackVersion: "2.0.0",
|
||||
Parameters: "{\"param\":1}",
|
||||
ProjectId: "proj-1",
|
||||
IdempotencyKey: "key-1",
|
||||
CorrelationId: "corr-1",
|
||||
Priority: 10,
|
||||
MaxAttempts: 5,
|
||||
Metadata: "{\"source\":\"api\"}");
|
||||
|
||||
Assert.Equal("pack-1", request.PackId);
|
||||
Assert.Equal("2.0.0", request.PackVersion);
|
||||
Assert.Equal("{\"param\":1}", request.Parameters);
|
||||
Assert.Equal("proj-1", request.ProjectId);
|
||||
Assert.Equal("key-1", request.IdempotencyKey);
|
||||
Assert.Equal("corr-1", request.CorrelationId);
|
||||
Assert.Equal(10, request.Priority);
|
||||
Assert.Equal(5, request.MaxAttempts);
|
||||
Assert.Equal("{\"source\":\"api\"}", request.Metadata);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ClaimPackRunRequest_AllFieldsAccessible()
|
||||
{
|
||||
var request = new ClaimPackRunRequest(
|
||||
TaskRunnerId: "runner-1",
|
||||
PackId: "pack-filter",
|
||||
LeaseSeconds: 600,
|
||||
IdempotencyKey: "claim-key-1");
|
||||
|
||||
Assert.Equal("runner-1", request.TaskRunnerId);
|
||||
Assert.Equal("pack-filter", request.PackId);
|
||||
Assert.Equal(600, request.LeaseSeconds);
|
||||
Assert.Equal("claim-key-1", request.IdempotencyKey);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CompletePackRunRequest_AllFieldsAccessible()
|
||||
{
|
||||
var artifacts = new List<PackRunArtifactRequest>
|
||||
{
|
||||
new("report", "s3://bucket/report.json", "sha256:abc", "application/json", 1024, null),
|
||||
new("log", "s3://bucket/log.txt", "sha256:def", "text/plain", 2048, "{\"lines\":500}")
|
||||
};
|
||||
|
||||
var request = new CompletePackRunRequest(
|
||||
LeaseId: Guid.NewGuid(),
|
||||
Success: true,
|
||||
ExitCode: 0,
|
||||
Reason: "All tests passed",
|
||||
Artifacts: artifacts);
|
||||
|
||||
Assert.True(request.Success);
|
||||
Assert.Equal(0, request.ExitCode);
|
||||
Assert.Equal("All tests passed", request.Reason);
|
||||
Assert.NotNull(request.Artifacts);
|
||||
Assert.Equal(2, request.Artifacts.Count);
|
||||
Assert.Equal("report", request.Artifacts[0].ArtifactType);
|
||||
Assert.Equal("log", request.Artifacts[1].ArtifactType);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PackRunErrorResponse_AllFieldsAccessible()
|
||||
{
|
||||
var packRunId = Guid.NewGuid();
|
||||
var error = new PackRunErrorResponse(
|
||||
Code: "lease_expired",
|
||||
Message: "The lease has expired",
|
||||
PackRunId: packRunId,
|
||||
RetryAfterSeconds: 30);
|
||||
|
||||
Assert.Equal("lease_expired", error.Code);
|
||||
Assert.Equal("The lease has expired", error.Message);
|
||||
Assert.Equal(packRunId, error.PackRunId);
|
||||
Assert.Equal(30, error.RetryAfterSeconds);
|
||||
}
|
||||
|
||||
private static Core.Domain.PackRun CreatePackRunWithStatus(PackRunStatus status)
|
||||
{
|
||||
return new Core.Domain.PackRun(
|
||||
PackRunId: Guid.NewGuid(),
|
||||
TenantId: "t1",
|
||||
ProjectId: null,
|
||||
PackId: "pack",
|
||||
PackVersion: "1.0.0",
|
||||
Status: status,
|
||||
Priority: 0,
|
||||
Attempt: 1,
|
||||
MaxAttempts: 3,
|
||||
Parameters: "{}",
|
||||
ParametersDigest: "abc",
|
||||
IdempotencyKey: "key",
|
||||
CorrelationId: null,
|
||||
LeaseId: null,
|
||||
TaskRunnerId: null,
|
||||
LeaseUntil: null,
|
||||
CreatedAt: DateTimeOffset.UtcNow,
|
||||
ScheduledAt: null,
|
||||
LeasedAt: null,
|
||||
StartedAt: null,
|
||||
CompletedAt: null,
|
||||
NotBefore: null,
|
||||
Reason: null,
|
||||
ExitCode: null,
|
||||
DurationMs: null,
|
||||
CreatedBy: "system",
|
||||
Metadata: null);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,217 @@
|
||||
using StellaOps.Orchestrator.Core.Domain;
|
||||
|
||||
namespace StellaOps.Orchestrator.Tests.PackRun;
|
||||
|
||||
public sealed class PackRunLogTests
|
||||
{
|
||||
private const string TestTenantId = "tenant-test";
|
||||
private readonly Guid _packRunId = Guid.NewGuid();
|
||||
|
||||
[Fact]
|
||||
public void Create_InitializesAllFields()
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
|
||||
var log = PackRunLog.Create(
|
||||
packRunId: _packRunId,
|
||||
tenantId: TestTenantId,
|
||||
sequence: 5,
|
||||
level: LogLevel.Info,
|
||||
source: "stdout",
|
||||
message: "Test message",
|
||||
data: "{\"key\":\"value\"}",
|
||||
timestamp: now);
|
||||
|
||||
Assert.NotEqual(Guid.Empty, log.LogId);
|
||||
Assert.Equal(TestTenantId, log.TenantId);
|
||||
Assert.Equal(_packRunId, log.PackRunId);
|
||||
Assert.Equal(5, log.Sequence);
|
||||
Assert.Equal(LogLevel.Info, log.Level);
|
||||
Assert.Equal("stdout", log.Source);
|
||||
Assert.Equal("Test message", log.Message);
|
||||
Assert.Equal(now, log.Timestamp);
|
||||
Assert.Equal("{\"key\":\"value\"}", log.Data);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Create_WithNullTimestamp_UsesUtcNow()
|
||||
{
|
||||
var beforeCreate = DateTimeOffset.UtcNow;
|
||||
|
||||
var log = PackRunLog.Create(
|
||||
packRunId: _packRunId,
|
||||
tenantId: TestTenantId,
|
||||
sequence: 0,
|
||||
level: LogLevel.Debug,
|
||||
source: "test",
|
||||
message: "Test");
|
||||
|
||||
var afterCreate = DateTimeOffset.UtcNow;
|
||||
|
||||
Assert.True(log.Timestamp >= beforeCreate);
|
||||
Assert.True(log.Timestamp <= afterCreate);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Stdout_CreatesInfoLevelStdoutLog()
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
|
||||
var log = PackRunLog.Stdout(_packRunId, TestTenantId, 10, "Hello stdout", now);
|
||||
|
||||
Assert.Equal(LogLevel.Info, log.Level);
|
||||
Assert.Equal("stdout", log.Source);
|
||||
Assert.Equal("Hello stdout", log.Message);
|
||||
Assert.Equal(10, log.Sequence);
|
||||
Assert.Null(log.Data);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Stderr_CreatesWarnLevelStderrLog()
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
|
||||
var log = PackRunLog.Stderr(_packRunId, TestTenantId, 20, "Warning message", now);
|
||||
|
||||
Assert.Equal(LogLevel.Warn, log.Level);
|
||||
Assert.Equal("stderr", log.Source);
|
||||
Assert.Equal("Warning message", log.Message);
|
||||
Assert.Equal(20, log.Sequence);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void System_CreatesSystemSourceLog()
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
|
||||
var log = PackRunLog.System(_packRunId, TestTenantId, 30, LogLevel.Error, "System error", "{\"code\":500}", now);
|
||||
|
||||
Assert.Equal(LogLevel.Error, log.Level);
|
||||
Assert.Equal("system", log.Source);
|
||||
Assert.Equal("System error", log.Message);
|
||||
Assert.Equal("{\"code\":500}", log.Data);
|
||||
Assert.Equal(30, log.Sequence);
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData(LogLevel.Trace, 0)]
|
||||
[InlineData(LogLevel.Debug, 1)]
|
||||
[InlineData(LogLevel.Info, 2)]
|
||||
[InlineData(LogLevel.Warn, 3)]
|
||||
[InlineData(LogLevel.Error, 4)]
|
||||
[InlineData(LogLevel.Fatal, 5)]
|
||||
public void LogLevel_HasCorrectOrdinalValues(LogLevel level, int expectedValue)
|
||||
{
|
||||
Assert.Equal(expectedValue, (int)level);
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class PackRunLogBatchTests
|
||||
{
|
||||
private const string TestTenantId = "tenant-test";
|
||||
private readonly Guid _packRunId = Guid.NewGuid();
|
||||
|
||||
[Fact]
|
||||
public void FromLogs_EmptyList_ReturnsEmptyBatch()
|
||||
{
|
||||
var batch = PackRunLogBatch.FromLogs(_packRunId, TestTenantId, []);
|
||||
|
||||
Assert.Equal(_packRunId, batch.PackRunId);
|
||||
Assert.Equal(TestTenantId, batch.TenantId);
|
||||
Assert.Equal(0, batch.StartSequence);
|
||||
Assert.Empty(batch.Logs);
|
||||
Assert.Equal(0, batch.NextSequence);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void FromLogs_WithLogs_SetsCorrectStartSequence()
|
||||
{
|
||||
var logs = new List<PackRunLog>
|
||||
{
|
||||
PackRunLog.Create(_packRunId, TestTenantId, 5, LogLevel.Info, "src", "msg1"),
|
||||
PackRunLog.Create(_packRunId, TestTenantId, 6, LogLevel.Info, "src", "msg2"),
|
||||
PackRunLog.Create(_packRunId, TestTenantId, 7, LogLevel.Info, "src", "msg3")
|
||||
};
|
||||
|
||||
var batch = PackRunLogBatch.FromLogs(_packRunId, TestTenantId, logs);
|
||||
|
||||
Assert.Equal(5, batch.StartSequence);
|
||||
Assert.Equal(3, batch.Logs.Count);
|
||||
Assert.Equal(8, batch.NextSequence); // StartSequence + Count
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void NextSequence_CalculatesCorrectly()
|
||||
{
|
||||
var batch = new PackRunLogBatch(
|
||||
PackRunId: _packRunId,
|
||||
TenantId: TestTenantId,
|
||||
StartSequence: 100,
|
||||
Logs:
|
||||
[
|
||||
PackRunLog.Create(_packRunId, TestTenantId, 100, LogLevel.Info, "src", "msg1"),
|
||||
PackRunLog.Create(_packRunId, TestTenantId, 101, LogLevel.Info, "src", "msg2")
|
||||
]);
|
||||
|
||||
Assert.Equal(102, batch.NextSequence);
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class PackRunLogCursorTests
|
||||
{
|
||||
private readonly Guid _packRunId = Guid.NewGuid();
|
||||
|
||||
[Fact]
|
||||
public void Start_CreatesInitialCursor()
|
||||
{
|
||||
var cursor = PackRunLogCursor.Start(_packRunId);
|
||||
|
||||
Assert.Equal(_packRunId, cursor.PackRunId);
|
||||
Assert.Equal(-1, cursor.LastSequence);
|
||||
Assert.False(cursor.IsComplete);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resume_CreatesCursorFromSequence()
|
||||
{
|
||||
var cursor = PackRunLogCursor.Resume(_packRunId, 50);
|
||||
|
||||
Assert.Equal(_packRunId, cursor.PackRunId);
|
||||
Assert.Equal(50, cursor.LastSequence);
|
||||
Assert.False(cursor.IsComplete);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Complete_MarksCursorAsComplete()
|
||||
{
|
||||
var cursor = PackRunLogCursor.Start(_packRunId);
|
||||
var completed = cursor.Complete();
|
||||
|
||||
Assert.True(completed.IsComplete);
|
||||
Assert.Equal(cursor.PackRunId, completed.PackRunId);
|
||||
Assert.Equal(cursor.LastSequence, completed.LastSequence);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Advance_UpdatesLastSequence()
|
||||
{
|
||||
var cursor = PackRunLogCursor.Start(_packRunId);
|
||||
var advanced = cursor.Advance(100);
|
||||
|
||||
Assert.Equal(100, advanced.LastSequence);
|
||||
Assert.False(advanced.IsComplete);
|
||||
Assert.Equal(cursor.PackRunId, advanced.PackRunId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Advance_ThenComplete_WorksCorrectly()
|
||||
{
|
||||
var cursor = PackRunLogCursor.Start(_packRunId)
|
||||
.Advance(50)
|
||||
.Advance(100)
|
||||
.Complete();
|
||||
|
||||
Assert.Equal(100, cursor.LastSequence);
|
||||
Assert.True(cursor.IsComplete);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,202 @@
|
||||
using StellaOps.Orchestrator.Core.Domain;
|
||||
|
||||
namespace StellaOps.Orchestrator.Tests.PackRun;
|
||||
|
||||
public sealed class PackRunTests
|
||||
{
|
||||
private const string TestTenantId = "tenant-test";
|
||||
private const string TestPackId = "pack-alpha";
|
||||
private const string TestPackVersion = "1.0.0";
|
||||
private const string TestParameters = "{\"key\":\"value\"}";
|
||||
private const string TestParametersDigest = "abc123def456";
|
||||
private const string TestIdempotencyKey = "idem-key-001";
|
||||
private const string TestCreatedBy = "system";
|
||||
|
||||
[Fact]
|
||||
public void Create_InitializesWithCorrectDefaults()
|
||||
{
|
||||
var packRunId = Guid.NewGuid();
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
|
||||
var packRun = Core.Domain.PackRun.Create(
|
||||
packRunId: packRunId,
|
||||
tenantId: TestTenantId,
|
||||
projectId: "proj-1",
|
||||
packId: TestPackId,
|
||||
packVersion: TestPackVersion,
|
||||
parameters: TestParameters,
|
||||
parametersDigest: TestParametersDigest,
|
||||
idempotencyKey: TestIdempotencyKey,
|
||||
correlationId: "corr-123",
|
||||
createdBy: TestCreatedBy,
|
||||
priority: 5,
|
||||
maxAttempts: 3,
|
||||
metadata: "{\"source\":\"test\"}",
|
||||
createdAt: now);
|
||||
|
||||
Assert.Equal(packRunId, packRun.PackRunId);
|
||||
Assert.Equal(TestTenantId, packRun.TenantId);
|
||||
Assert.Equal("proj-1", packRun.ProjectId);
|
||||
Assert.Equal(TestPackId, packRun.PackId);
|
||||
Assert.Equal(TestPackVersion, packRun.PackVersion);
|
||||
Assert.Equal(PackRunStatus.Pending, packRun.Status);
|
||||
Assert.Equal(5, packRun.Priority);
|
||||
Assert.Equal(1, packRun.Attempt);
|
||||
Assert.Equal(3, packRun.MaxAttempts);
|
||||
Assert.Equal(TestParameters, packRun.Parameters);
|
||||
Assert.Equal(TestParametersDigest, packRun.ParametersDigest);
|
||||
Assert.Equal(TestIdempotencyKey, packRun.IdempotencyKey);
|
||||
Assert.Equal("corr-123", packRun.CorrelationId);
|
||||
Assert.Null(packRun.LeaseId);
|
||||
Assert.Null(packRun.TaskRunnerId);
|
||||
Assert.Null(packRun.LeaseUntil);
|
||||
Assert.Equal(now, packRun.CreatedAt);
|
||||
Assert.Null(packRun.ScheduledAt);
|
||||
Assert.Null(packRun.LeasedAt);
|
||||
Assert.Null(packRun.StartedAt);
|
||||
Assert.Null(packRun.CompletedAt);
|
||||
Assert.Null(packRun.NotBefore);
|
||||
Assert.Null(packRun.Reason);
|
||||
Assert.Null(packRun.ExitCode);
|
||||
Assert.Null(packRun.DurationMs);
|
||||
Assert.Equal(TestCreatedBy, packRun.CreatedBy);
|
||||
Assert.Equal("{\"source\":\"test\"}", packRun.Metadata);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Create_WithDefaultPriorityAndMaxAttempts()
|
||||
{
|
||||
var packRun = Core.Domain.PackRun.Create(
|
||||
packRunId: Guid.NewGuid(),
|
||||
tenantId: TestTenantId,
|
||||
projectId: null,
|
||||
packId: TestPackId,
|
||||
packVersion: TestPackVersion,
|
||||
parameters: TestParameters,
|
||||
parametersDigest: TestParametersDigest,
|
||||
idempotencyKey: TestIdempotencyKey,
|
||||
correlationId: null,
|
||||
createdBy: TestCreatedBy);
|
||||
|
||||
Assert.Equal(0, packRun.Priority);
|
||||
Assert.Equal(3, packRun.MaxAttempts);
|
||||
Assert.Null(packRun.ProjectId);
|
||||
Assert.Null(packRun.CorrelationId);
|
||||
Assert.Null(packRun.Metadata);
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData(PackRunStatus.Succeeded, true)]
|
||||
[InlineData(PackRunStatus.Failed, true)]
|
||||
[InlineData(PackRunStatus.Canceled, true)]
|
||||
[InlineData(PackRunStatus.TimedOut, true)]
|
||||
[InlineData(PackRunStatus.Pending, false)]
|
||||
[InlineData(PackRunStatus.Scheduled, false)]
|
||||
[InlineData(PackRunStatus.Leased, false)]
|
||||
[InlineData(PackRunStatus.Running, false)]
|
||||
public void IsTerminal_ReturnsCorrectValue(PackRunStatus status, bool expectedIsTerminal)
|
||||
{
|
||||
var packRun = CreatePackRunWithStatus(status);
|
||||
Assert.Equal(expectedIsTerminal, packRun.IsTerminal);
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData(PackRunStatus.Failed, 1, 3, true)] // First attempt, can retry
|
||||
[InlineData(PackRunStatus.Failed, 2, 3, true)] // Second attempt, can retry
|
||||
[InlineData(PackRunStatus.Failed, 3, 3, false)] // Third attempt, max reached
|
||||
[InlineData(PackRunStatus.Succeeded, 1, 3, false)] // Succeeded, no retry
|
||||
[InlineData(PackRunStatus.Canceled, 1, 3, false)] // Canceled, no retry
|
||||
[InlineData(PackRunStatus.Running, 1, 3, false)] // Not failed, no retry
|
||||
public void CanRetry_ReturnsCorrectValue(PackRunStatus status, int attempt, int maxAttempts, bool expectedCanRetry)
|
||||
{
|
||||
var packRun = CreatePackRunWithStatusAndAttempts(status, attempt, maxAttempts);
|
||||
Assert.Equal(expectedCanRetry, packRun.CanRetry);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Create_WithNullCreatedAt_UsesUtcNow()
|
||||
{
|
||||
var beforeCreate = DateTimeOffset.UtcNow;
|
||||
|
||||
var packRun = Core.Domain.PackRun.Create(
|
||||
packRunId: Guid.NewGuid(),
|
||||
tenantId: TestTenantId,
|
||||
projectId: null,
|
||||
packId: TestPackId,
|
||||
packVersion: TestPackVersion,
|
||||
parameters: TestParameters,
|
||||
parametersDigest: TestParametersDigest,
|
||||
idempotencyKey: TestIdempotencyKey,
|
||||
correlationId: null,
|
||||
createdBy: TestCreatedBy);
|
||||
|
||||
var afterCreate = DateTimeOffset.UtcNow;
|
||||
|
||||
Assert.True(packRun.CreatedAt >= beforeCreate);
|
||||
Assert.True(packRun.CreatedAt <= afterCreate);
|
||||
}
|
||||
|
||||
private static Core.Domain.PackRun CreatePackRunWithStatus(PackRunStatus status)
|
||||
{
|
||||
return new Core.Domain.PackRun(
|
||||
PackRunId: Guid.NewGuid(),
|
||||
TenantId: TestTenantId,
|
||||
ProjectId: null,
|
||||
PackId: TestPackId,
|
||||
PackVersion: TestPackVersion,
|
||||
Status: status,
|
||||
Priority: 0,
|
||||
Attempt: 1,
|
||||
MaxAttempts: 3,
|
||||
Parameters: TestParameters,
|
||||
ParametersDigest: TestParametersDigest,
|
||||
IdempotencyKey: TestIdempotencyKey,
|
||||
CorrelationId: null,
|
||||
LeaseId: null,
|
||||
TaskRunnerId: null,
|
||||
LeaseUntil: null,
|
||||
CreatedAt: DateTimeOffset.UtcNow,
|
||||
ScheduledAt: null,
|
||||
LeasedAt: null,
|
||||
StartedAt: null,
|
||||
CompletedAt: null,
|
||||
NotBefore: null,
|
||||
Reason: null,
|
||||
ExitCode: null,
|
||||
DurationMs: null,
|
||||
CreatedBy: TestCreatedBy,
|
||||
Metadata: null);
|
||||
}
|
||||
|
||||
private static Core.Domain.PackRun CreatePackRunWithStatusAndAttempts(PackRunStatus status, int attempt, int maxAttempts)
|
||||
{
|
||||
return new Core.Domain.PackRun(
|
||||
PackRunId: Guid.NewGuid(),
|
||||
TenantId: TestTenantId,
|
||||
ProjectId: null,
|
||||
PackId: TestPackId,
|
||||
PackVersion: TestPackVersion,
|
||||
Status: status,
|
||||
Priority: 0,
|
||||
Attempt: attempt,
|
||||
MaxAttempts: maxAttempts,
|
||||
Parameters: TestParameters,
|
||||
ParametersDigest: TestParametersDigest,
|
||||
IdempotencyKey: TestIdempotencyKey,
|
||||
CorrelationId: null,
|
||||
LeaseId: null,
|
||||
TaskRunnerId: null,
|
||||
LeaseUntil: null,
|
||||
CreatedAt: DateTimeOffset.UtcNow,
|
||||
ScheduledAt: null,
|
||||
LeasedAt: null,
|
||||
StartedAt: null,
|
||||
CompletedAt: null,
|
||||
NotBefore: null,
|
||||
Reason: null,
|
||||
ExitCode: null,
|
||||
DurationMs: null,
|
||||
CreatedBy: TestCreatedBy,
|
||||
Metadata: null);
|
||||
}
|
||||
}
|
||||
@@ -122,9 +122,14 @@
|
||||
|
||||
|
||||
<ProjectReference Include="..\StellaOps.Orchestrator.Infrastructure\StellaOps.Orchestrator.Infrastructure.csproj"/>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<ProjectReference Include="..\StellaOps.Orchestrator.WebService\StellaOps.Orchestrator.WebService.csproj"/>
|
||||
|
||||
|
||||
|
||||
|
||||
</ItemGroup>
|
||||
|
||||
|
||||
@@ -0,0 +1,338 @@
|
||||
using StellaOps.Orchestrator.Core.Domain;
|
||||
|
||||
namespace StellaOps.Orchestrator.WebService.Contracts;
|
||||
|
||||
// ========== Scheduling Requests/Responses ==========
|
||||
|
||||
/// <summary>
|
||||
/// Request to schedule a new pack run.
|
||||
/// </summary>
|
||||
public sealed record SchedulePackRunRequest(
|
||||
/// <summary>Authority pack ID to execute.</summary>
|
||||
string PackId,
|
||||
|
||||
/// <summary>Pack version (e.g., "1.2.3", "latest").</summary>
|
||||
string PackVersion,
|
||||
|
||||
/// <summary>Pack input parameters JSON.</summary>
|
||||
string? Parameters,
|
||||
|
||||
/// <summary>Optional project scope.</summary>
|
||||
string? ProjectId,
|
||||
|
||||
/// <summary>Idempotency key for deduplication.</summary>
|
||||
string? IdempotencyKey,
|
||||
|
||||
/// <summary>Correlation ID for tracing.</summary>
|
||||
string? CorrelationId,
|
||||
|
||||
/// <summary>Priority (higher = more urgent).</summary>
|
||||
int? Priority,
|
||||
|
||||
/// <summary>Maximum retry attempts.</summary>
|
||||
int? MaxAttempts,
|
||||
|
||||
/// <summary>Optional metadata JSON.</summary>
|
||||
string? Metadata);
|
||||
|
||||
/// <summary>
|
||||
/// Response for a scheduled pack run.
|
||||
/// </summary>
|
||||
public sealed record SchedulePackRunResponse(
|
||||
Guid PackRunId,
|
||||
string PackId,
|
||||
string PackVersion,
|
||||
string Status,
|
||||
string IdempotencyKey,
|
||||
DateTimeOffset CreatedAt,
|
||||
bool WasAlreadyScheduled);
|
||||
|
||||
/// <summary>
|
||||
/// Response representing a pack run.
|
||||
/// </summary>
|
||||
public sealed record PackRunResponse(
|
||||
Guid PackRunId,
|
||||
string PackId,
|
||||
string PackVersion,
|
||||
string Status,
|
||||
int Priority,
|
||||
int Attempt,
|
||||
int MaxAttempts,
|
||||
string? CorrelationId,
|
||||
string? TaskRunnerId,
|
||||
DateTimeOffset CreatedAt,
|
||||
DateTimeOffset? ScheduledAt,
|
||||
DateTimeOffset? StartedAt,
|
||||
DateTimeOffset? CompletedAt,
|
||||
string? Reason,
|
||||
int? ExitCode,
|
||||
long? DurationMs,
|
||||
string CreatedBy)
|
||||
{
|
||||
public static PackRunResponse FromDomain(PackRun packRun) => new(
|
||||
packRun.PackRunId,
|
||||
packRun.PackId,
|
||||
packRun.PackVersion,
|
||||
packRun.Status.ToString().ToLowerInvariant(),
|
||||
packRun.Priority,
|
||||
packRun.Attempt,
|
||||
packRun.MaxAttempts,
|
||||
packRun.CorrelationId,
|
||||
packRun.TaskRunnerId,
|
||||
packRun.CreatedAt,
|
||||
packRun.ScheduledAt,
|
||||
packRun.StartedAt,
|
||||
packRun.CompletedAt,
|
||||
packRun.Reason,
|
||||
packRun.ExitCode,
|
||||
packRun.DurationMs,
|
||||
packRun.CreatedBy);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Response containing a list of pack runs.
|
||||
/// </summary>
|
||||
public sealed record PackRunListResponse(
|
||||
IReadOnlyList<PackRunResponse> PackRuns,
|
||||
int TotalCount,
|
||||
string? NextCursor);
|
||||
|
||||
// ========== Task Runner (Worker) Requests/Responses ==========
|
||||
|
||||
/// <summary>
|
||||
/// Request to claim a pack run for execution.
|
||||
/// </summary>
|
||||
public sealed record ClaimPackRunRequest(
|
||||
/// <summary>Task runner ID claiming the pack run.</summary>
|
||||
string TaskRunnerId,
|
||||
|
||||
/// <summary>Optional pack ID filter (only claim runs for this pack).</summary>
|
||||
string? PackId,
|
||||
|
||||
/// <summary>Requested lease duration in seconds.</summary>
|
||||
int? LeaseSeconds,
|
||||
|
||||
/// <summary>Idempotency key for claim deduplication.</summary>
|
||||
string? IdempotencyKey);
|
||||
|
||||
/// <summary>
|
||||
/// Response for a claimed pack run.
|
||||
/// </summary>
|
||||
public sealed record ClaimPackRunResponse(
|
||||
Guid PackRunId,
|
||||
Guid LeaseId,
|
||||
string PackId,
|
||||
string PackVersion,
|
||||
string Parameters,
|
||||
string ParametersDigest,
|
||||
int Attempt,
|
||||
int MaxAttempts,
|
||||
DateTimeOffset LeaseUntil,
|
||||
string IdempotencyKey,
|
||||
string? CorrelationId,
|
||||
string? ProjectId,
|
||||
string? Metadata);
|
||||
|
||||
/// <summary>
|
||||
/// Request to extend a pack run lease (heartbeat).
|
||||
/// </summary>
|
||||
public sealed record PackRunHeartbeatRequest(
|
||||
/// <summary>Current lease ID.</summary>
|
||||
Guid LeaseId,
|
||||
|
||||
/// <summary>Lease extension in seconds.</summary>
|
||||
int? ExtendSeconds);
|
||||
|
||||
/// <summary>
|
||||
/// Response for a pack run heartbeat.
|
||||
/// </summary>
|
||||
public sealed record PackRunHeartbeatResponse(
|
||||
Guid PackRunId,
|
||||
Guid LeaseId,
|
||||
DateTimeOffset LeaseUntil,
|
||||
bool Acknowledged);
|
||||
|
||||
/// <summary>
|
||||
/// Request to report pack run start.
|
||||
/// </summary>
|
||||
public sealed record PackRunStartRequest(
|
||||
/// <summary>Current lease ID.</summary>
|
||||
Guid LeaseId);
|
||||
|
||||
/// <summary>
|
||||
/// Response for pack run start.
|
||||
/// </summary>
|
||||
public sealed record PackRunStartResponse(
|
||||
Guid PackRunId,
|
||||
bool Acknowledged,
|
||||
DateTimeOffset StartedAt);
|
||||
|
||||
/// <summary>
|
||||
/// Request to complete a pack run.
|
||||
/// </summary>
|
||||
public sealed record CompletePackRunRequest(
|
||||
/// <summary>Current lease ID.</summary>
|
||||
Guid LeaseId,
|
||||
|
||||
/// <summary>Whether the pack run succeeded (exit code 0).</summary>
|
||||
bool Success,
|
||||
|
||||
/// <summary>Exit code from pack execution.</summary>
|
||||
int ExitCode,
|
||||
|
||||
/// <summary>Reason for failure/success.</summary>
|
||||
string? Reason,
|
||||
|
||||
/// <summary>Artifacts produced by the pack run.</summary>
|
||||
IReadOnlyList<PackRunArtifactRequest>? Artifacts);
|
||||
|
||||
/// <summary>
|
||||
/// Artifact metadata for pack run completion.
|
||||
/// </summary>
|
||||
public sealed record PackRunArtifactRequest(
|
||||
/// <summary>Artifact type (e.g., "report", "log", "manifest").</summary>
|
||||
string ArtifactType,
|
||||
|
||||
/// <summary>Storage URI.</summary>
|
||||
string Uri,
|
||||
|
||||
/// <summary>Content digest (SHA-256).</summary>
|
||||
string Digest,
|
||||
|
||||
/// <summary>MIME type.</summary>
|
||||
string? MimeType,
|
||||
|
||||
/// <summary>Size in bytes.</summary>
|
||||
long? SizeBytes,
|
||||
|
||||
/// <summary>Optional metadata JSON.</summary>
|
||||
string? Metadata);
|
||||
|
||||
/// <summary>
|
||||
/// Response for pack run completion.
|
||||
/// </summary>
|
||||
public sealed record CompletePackRunResponse(
|
||||
Guid PackRunId,
|
||||
string Status,
|
||||
DateTimeOffset CompletedAt,
|
||||
IReadOnlyList<Guid> ArtifactIds,
|
||||
long DurationMs);
|
||||
|
||||
// ========== Log Requests/Responses ==========
|
||||
|
||||
/// <summary>
|
||||
/// Request to append logs to a pack run.
|
||||
/// </summary>
|
||||
public sealed record AppendLogsRequest(
|
||||
/// <summary>Current lease ID.</summary>
|
||||
Guid LeaseId,
|
||||
|
||||
/// <summary>Log entries to append.</summary>
|
||||
IReadOnlyList<LogEntryRequest> Logs);
|
||||
|
||||
/// <summary>
|
||||
/// A single log entry to append.
|
||||
/// </summary>
|
||||
public sealed record LogEntryRequest(
|
||||
/// <summary>Log level (trace, debug, info, warn, error, fatal).</summary>
|
||||
string Level,
|
||||
|
||||
/// <summary>Log source (stdout, stderr, system, pack).</summary>
|
||||
string Source,
|
||||
|
||||
/// <summary>Log message.</summary>
|
||||
string Message,
|
||||
|
||||
/// <summary>Timestamp (defaults to server time if not provided).</summary>
|
||||
DateTimeOffset? Timestamp,
|
||||
|
||||
/// <summary>Optional structured data JSON.</summary>
|
||||
string? Data);
|
||||
|
||||
/// <summary>
|
||||
/// Response for appending logs.
|
||||
/// </summary>
|
||||
public sealed record AppendLogsResponse(
|
||||
Guid PackRunId,
|
||||
int LogsAppended,
|
||||
long LatestSequence);
|
||||
|
||||
/// <summary>
|
||||
/// Response for a log entry.
|
||||
/// </summary>
|
||||
public sealed record LogEntryResponse(
|
||||
Guid LogId,
|
||||
long Sequence,
|
||||
string Level,
|
||||
string Source,
|
||||
string Message,
|
||||
DateTimeOffset Timestamp,
|
||||
string? Data)
|
||||
{
|
||||
public static LogEntryResponse FromDomain(PackRunLog log) => new(
|
||||
log.LogId,
|
||||
log.Sequence,
|
||||
log.Level.ToString().ToLowerInvariant(),
|
||||
log.Source,
|
||||
log.Message,
|
||||
log.Timestamp,
|
||||
log.Data);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Response containing a batch of logs.
|
||||
/// </summary>
|
||||
public sealed record LogBatchResponse(
|
||||
Guid PackRunId,
|
||||
IReadOnlyList<LogEntryResponse> Logs,
|
||||
long StartSequence,
|
||||
long? NextSequence,
|
||||
bool HasMore);
|
||||
|
||||
// ========== Cancel/Retry Requests ==========
|
||||
|
||||
/// <summary>
|
||||
/// Request to cancel a pack run.
|
||||
/// </summary>
|
||||
public sealed record CancelPackRunRequest(
|
||||
/// <summary>Reason for cancellation.</summary>
|
||||
string Reason);
|
||||
|
||||
/// <summary>
|
||||
/// Response for cancel operation.
|
||||
/// </summary>
|
||||
public sealed record CancelPackRunResponse(
|
||||
Guid PackRunId,
|
||||
string Status,
|
||||
string Reason,
|
||||
DateTimeOffset CanceledAt);
|
||||
|
||||
/// <summary>
|
||||
/// Request to retry a failed pack run.
|
||||
/// </summary>
|
||||
public sealed record RetryPackRunRequest(
|
||||
/// <summary>Override parameters for retry (optional).</summary>
|
||||
string? Parameters,
|
||||
|
||||
/// <summary>New idempotency key for the retry.</summary>
|
||||
string? IdempotencyKey);
|
||||
|
||||
/// <summary>
|
||||
/// Response for retry operation.
|
||||
/// </summary>
|
||||
public sealed record RetryPackRunResponse(
|
||||
Guid OriginalPackRunId,
|
||||
Guid NewPackRunId,
|
||||
string Status,
|
||||
DateTimeOffset CreatedAt);
|
||||
|
||||
// ========== Error Response ==========
|
||||
|
||||
/// <summary>
|
||||
/// Error response for pack run operations.
|
||||
/// </summary>
|
||||
public sealed record PackRunErrorResponse(
|
||||
string Code,
|
||||
string Message,
|
||||
Guid? PackRunId,
|
||||
int? RetryAfterSeconds);
|
||||
@@ -0,0 +1,381 @@
|
||||
using Microsoft.AspNetCore.Http.HttpResults;
|
||||
using StellaOps.Orchestrator.Core.Domain;
|
||||
using StellaOps.Orchestrator.Core.Domain.Export;
|
||||
using StellaOps.Orchestrator.Core.Services;
|
||||
using StellaOps.Orchestrator.WebService.Contracts;
|
||||
|
||||
namespace StellaOps.Orchestrator.WebService.Endpoints;
|
||||
|
||||
/// <summary>
|
||||
/// REST API endpoints for export job management.
|
||||
/// </summary>
|
||||
public static class ExportJobEndpoints
|
||||
{
|
||||
/// <summary>
|
||||
/// Maps export job endpoints to the route builder.
|
||||
/// </summary>
|
||||
public static void MapExportJobEndpoints(this IEndpointRouteBuilder app)
|
||||
{
|
||||
var group = app.MapGroup("/api/v1/orchestrator/export")
|
||||
.WithTags("Export Jobs");
|
||||
|
||||
group.MapPost("jobs", CreateExportJob)
|
||||
.WithName("Orchestrator_CreateExportJob")
|
||||
.WithDescription("Create a new export job");
|
||||
|
||||
group.MapGet("jobs", ListExportJobs)
|
||||
.WithName("Orchestrator_ListExportJobs")
|
||||
.WithDescription("List export jobs with optional filters");
|
||||
|
||||
group.MapGet("jobs/{jobId:guid}", GetExportJob)
|
||||
.WithName("Orchestrator_GetExportJob")
|
||||
.WithDescription("Get a specific export job");
|
||||
|
||||
group.MapPost("jobs/{jobId:guid}/cancel", CancelExportJob)
|
||||
.WithName("Orchestrator_CancelExportJob")
|
||||
.WithDescription("Cancel a pending or running export job");
|
||||
|
||||
group.MapGet("quota", GetQuotaStatus)
|
||||
.WithName("Orchestrator_GetExportQuotaStatus")
|
||||
.WithDescription("Get export job quota status for the tenant");
|
||||
|
||||
group.MapPost("quota", EnsureQuota)
|
||||
.WithName("Orchestrator_EnsureExportQuota")
|
||||
.WithDescription("Ensure quota exists for an export type (creates with defaults if needed)");
|
||||
|
||||
group.MapGet("types", GetExportTypes)
|
||||
.WithName("Orchestrator_GetExportTypes")
|
||||
.WithDescription("Get available export job types and their rate limits");
|
||||
}
|
||||
|
||||
private static async Task<Results<Created<ExportJobResponse>, BadRequest<ErrorResponse>, Conflict<ErrorResponse>>> CreateExportJob(
|
||||
CreateExportJobRequest request,
|
||||
IExportJobService exportJobService,
|
||||
HttpContext context,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
var tenantId = GetTenantId(context);
|
||||
|
||||
if (string.IsNullOrWhiteSpace(request.ExportType))
|
||||
{
|
||||
return TypedResults.BadRequest(new ErrorResponse("invalid_export_type", "Export type is required"));
|
||||
}
|
||||
|
||||
if (!ExportJobTypes.IsExportJob(request.ExportType) && !ExportJobTypes.All.Contains(request.ExportType))
|
||||
{
|
||||
return TypedResults.BadRequest(new ErrorResponse("invalid_export_type", $"Unknown export type: {request.ExportType}"));
|
||||
}
|
||||
|
||||
var payload = new ExportJobPayload(
|
||||
Format: request.Format ?? "json",
|
||||
StartTime: request.StartTime,
|
||||
EndTime: request.EndTime,
|
||||
SourceId: request.SourceId,
|
||||
ProjectId: request.ProjectId,
|
||||
EntityIds: request.EntityIds,
|
||||
MaxEntries: request.MaxEntries,
|
||||
IncludeProvenance: request.IncludeProvenance ?? true,
|
||||
SignOutput: request.SignOutput ?? true,
|
||||
Compression: request.Compression,
|
||||
DestinationUri: request.DestinationUri,
|
||||
CallbackUrl: request.CallbackUrl,
|
||||
Options: request.Options);
|
||||
|
||||
try
|
||||
{
|
||||
var job = await exportJobService.CreateExportJobAsync(
|
||||
tenantId,
|
||||
request.ExportType,
|
||||
payload,
|
||||
GetActorId(context),
|
||||
request.ProjectId,
|
||||
request.CorrelationId,
|
||||
request.Priority,
|
||||
cancellationToken);
|
||||
|
||||
var response = MapToResponse(job);
|
||||
return TypedResults.Created($"/api/v1/orchestrator/export/jobs/{job.JobId}", response);
|
||||
}
|
||||
catch (InvalidOperationException ex)
|
||||
{
|
||||
return TypedResults.Conflict(new ErrorResponse("quota_exceeded", ex.Message));
|
||||
}
|
||||
}
|
||||
|
||||
private static async Task<Ok<ExportJobListResponse>> ListExportJobs(
|
||||
IExportJobService exportJobService,
|
||||
HttpContext context,
|
||||
string? exportType = null,
|
||||
string? status = null,
|
||||
string? projectId = null,
|
||||
DateTimeOffset? createdAfter = null,
|
||||
DateTimeOffset? createdBefore = null,
|
||||
int limit = 50,
|
||||
int offset = 0,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var tenantId = GetTenantId(context);
|
||||
|
||||
JobStatus? statusFilter = null;
|
||||
if (!string.IsNullOrEmpty(status) && Enum.TryParse<JobStatus>(status, true, out var parsed))
|
||||
{
|
||||
statusFilter = parsed;
|
||||
}
|
||||
|
||||
var jobs = await exportJobService.ListExportJobsAsync(
|
||||
tenantId,
|
||||
exportType,
|
||||
statusFilter,
|
||||
projectId,
|
||||
createdAfter,
|
||||
createdBefore,
|
||||
limit,
|
||||
offset,
|
||||
cancellationToken);
|
||||
|
||||
var response = new ExportJobListResponse(
|
||||
Items: jobs.Select(MapToResponse).ToList(),
|
||||
Limit: limit,
|
||||
Offset: offset,
|
||||
HasMore: jobs.Count == limit);
|
||||
|
||||
return TypedResults.Ok(response);
|
||||
}
|
||||
|
||||
private static async Task<Results<Ok<ExportJobResponse>, NotFound>> GetExportJob(
|
||||
Guid jobId,
|
||||
IExportJobService exportJobService,
|
||||
HttpContext context,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
var tenantId = GetTenantId(context);
|
||||
|
||||
var job = await exportJobService.GetExportJobAsync(tenantId, jobId, cancellationToken);
|
||||
if (job is null)
|
||||
{
|
||||
return TypedResults.NotFound();
|
||||
}
|
||||
|
||||
return TypedResults.Ok(MapToResponse(job));
|
||||
}
|
||||
|
||||
private static async Task<Results<Ok<CancelExportJobResponse>, NotFound, BadRequest<ErrorResponse>>> CancelExportJob(
|
||||
Guid jobId,
|
||||
CancelExportJobRequest request,
|
||||
IExportJobService exportJobService,
|
||||
HttpContext context,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
var tenantId = GetTenantId(context);
|
||||
|
||||
var success = await exportJobService.CancelExportJobAsync(
|
||||
tenantId,
|
||||
jobId,
|
||||
request.Reason ?? "Canceled by user",
|
||||
GetActorId(context),
|
||||
cancellationToken);
|
||||
|
||||
if (!success)
|
||||
{
|
||||
var job = await exportJobService.GetExportJobAsync(tenantId, jobId, cancellationToken);
|
||||
if (job is null)
|
||||
{
|
||||
return TypedResults.NotFound();
|
||||
}
|
||||
|
||||
return TypedResults.BadRequest(new ErrorResponse(
|
||||
"cannot_cancel",
|
||||
$"Cannot cancel job in status: {job.Status}"));
|
||||
}
|
||||
|
||||
return TypedResults.Ok(new CancelExportJobResponse(jobId, true, DateTimeOffset.UtcNow));
|
||||
}
|
||||
|
||||
private static async Task<Ok<ExportQuotaStatusResponse>> GetQuotaStatus(
|
||||
IExportJobService exportJobService,
|
||||
HttpContext context,
|
||||
string? exportType = null,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var tenantId = GetTenantId(context);
|
||||
|
||||
var status = await exportJobService.GetQuotaStatusAsync(tenantId, exportType, cancellationToken);
|
||||
|
||||
var response = new ExportQuotaStatusResponse(
|
||||
MaxActive: status.MaxActive,
|
||||
CurrentActive: status.CurrentActive,
|
||||
MaxPerHour: status.MaxPerHour,
|
||||
CurrentHourCount: status.CurrentHourCount,
|
||||
AvailableTokens: status.AvailableTokens,
|
||||
Paused: status.Paused,
|
||||
PauseReason: status.PauseReason,
|
||||
CanCreateJob: status.CanCreateJob,
|
||||
EstimatedWaitSeconds: status.EstimatedWaitTime?.TotalSeconds);
|
||||
|
||||
return TypedResults.Ok(response);
|
||||
}
|
||||
|
||||
private static async Task<Created<QuotaResponse>> EnsureQuota(
|
||||
EnsureExportQuotaRequest request,
|
||||
IExportJobService exportJobService,
|
||||
HttpContext context,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
var tenantId = GetTenantId(context);
|
||||
|
||||
var quota = await exportJobService.EnsureQuotaAsync(
|
||||
tenantId,
|
||||
request.ExportType,
|
||||
GetActorId(context),
|
||||
cancellationToken);
|
||||
|
||||
var response = QuotaResponse.FromDomain(quota);
|
||||
|
||||
return TypedResults.Created($"/api/v1/orchestrator/quotas/{quota.QuotaId}", response);
|
||||
}
|
||||
|
||||
private static Ok<ExportTypesResponse> GetExportTypes()
|
||||
{
|
||||
var types = ExportJobTypes.All.Select(jobType =>
|
||||
{
|
||||
var rateLimit = ExportJobPolicy.RateLimits.GetForJobType(jobType);
|
||||
var target = ExportJobTypes.GetExportTarget(jobType) ?? "unknown";
|
||||
|
||||
return new ExportTypeInfo(
|
||||
JobType: jobType,
|
||||
Target: target,
|
||||
MaxConcurrent: rateLimit.MaxConcurrent,
|
||||
MaxPerHour: rateLimit.MaxPerHour,
|
||||
EstimatedDurationSeconds: rateLimit.EstimatedDurationSeconds);
|
||||
}).ToList();
|
||||
|
||||
return TypedResults.Ok(new ExportTypesResponse(
|
||||
Types: types,
|
||||
DefaultQuota: new DefaultQuotaInfo(
|
||||
MaxActive: ExportJobPolicy.QuotaDefaults.MaxActive,
|
||||
MaxPerHour: ExportJobPolicy.QuotaDefaults.MaxPerHour,
|
||||
BurstCapacity: ExportJobPolicy.QuotaDefaults.BurstCapacity,
|
||||
RefillRate: ExportJobPolicy.QuotaDefaults.RefillRate,
|
||||
DefaultPriority: ExportJobPolicy.QuotaDefaults.DefaultPriority,
|
||||
MaxAttempts: ExportJobPolicy.QuotaDefaults.MaxAttempts,
|
||||
DefaultLeaseSeconds: ExportJobPolicy.QuotaDefaults.DefaultLeaseSeconds,
|
||||
RecommendedHeartbeatInterval: ExportJobPolicy.QuotaDefaults.RecommendedHeartbeatInterval)));
|
||||
}
|
||||
|
||||
private static string GetTenantId(HttpContext context) =>
|
||||
context.Request.Headers["X-StellaOps-Tenant"].FirstOrDefault() ?? "default";
|
||||
|
||||
private static string GetActorId(HttpContext context) =>
|
||||
context.User.Identity?.Name ?? "anonymous";
|
||||
|
||||
private static ExportJobResponse MapToResponse(Job job) => new(
|
||||
JobId: job.JobId,
|
||||
TenantId: job.TenantId,
|
||||
ProjectId: job.ProjectId,
|
||||
ExportType: job.JobType,
|
||||
Status: job.Status.ToString(),
|
||||
Priority: job.Priority,
|
||||
Attempt: job.Attempt,
|
||||
MaxAttempts: job.MaxAttempts,
|
||||
PayloadDigest: job.PayloadDigest,
|
||||
IdempotencyKey: job.IdempotencyKey,
|
||||
CorrelationId: job.CorrelationId,
|
||||
WorkerId: job.WorkerId,
|
||||
LeaseUntil: job.LeaseUntil,
|
||||
CreatedAt: job.CreatedAt,
|
||||
ScheduledAt: job.ScheduledAt,
|
||||
LeasedAt: job.LeasedAt,
|
||||
CompletedAt: job.CompletedAt,
|
||||
Reason: job.Reason,
|
||||
CreatedBy: job.CreatedBy);
|
||||
}
|
||||
|
||||
// Request/Response records
|
||||
|
||||
public sealed record CreateExportJobRequest(
|
||||
string ExportType,
|
||||
string? Format,
|
||||
DateTimeOffset? StartTime,
|
||||
DateTimeOffset? EndTime,
|
||||
Guid? SourceId,
|
||||
string? ProjectId,
|
||||
IReadOnlyList<Guid>? EntityIds,
|
||||
int? MaxEntries,
|
||||
bool? IncludeProvenance,
|
||||
bool? SignOutput,
|
||||
string? Compression,
|
||||
string? DestinationUri,
|
||||
string? CallbackUrl,
|
||||
string? CorrelationId,
|
||||
int? Priority,
|
||||
IReadOnlyDictionary<string, string>? Options);
|
||||
|
||||
public sealed record ExportJobResponse(
|
||||
Guid JobId,
|
||||
string TenantId,
|
||||
string? ProjectId,
|
||||
string ExportType,
|
||||
string Status,
|
||||
int Priority,
|
||||
int Attempt,
|
||||
int MaxAttempts,
|
||||
string PayloadDigest,
|
||||
string IdempotencyKey,
|
||||
string? CorrelationId,
|
||||
string? WorkerId,
|
||||
DateTimeOffset? LeaseUntil,
|
||||
DateTimeOffset CreatedAt,
|
||||
DateTimeOffset? ScheduledAt,
|
||||
DateTimeOffset? LeasedAt,
|
||||
DateTimeOffset? CompletedAt,
|
||||
string? Reason,
|
||||
string CreatedBy);
|
||||
|
||||
public sealed record ExportJobListResponse(
|
||||
IReadOnlyList<ExportJobResponse> Items,
|
||||
int Limit,
|
||||
int Offset,
|
||||
bool HasMore);
|
||||
|
||||
public sealed record CancelExportJobRequest(string? Reason);
|
||||
|
||||
public sealed record CancelExportJobResponse(
|
||||
Guid JobId,
|
||||
bool Canceled,
|
||||
DateTimeOffset CanceledAt);
|
||||
|
||||
public sealed record ExportQuotaStatusResponse(
|
||||
int MaxActive,
|
||||
int CurrentActive,
|
||||
int MaxPerHour,
|
||||
int CurrentHourCount,
|
||||
double AvailableTokens,
|
||||
bool Paused,
|
||||
string? PauseReason,
|
||||
bool CanCreateJob,
|
||||
double? EstimatedWaitSeconds);
|
||||
|
||||
public sealed record EnsureExportQuotaRequest(string ExportType);
|
||||
|
||||
public sealed record ExportTypesResponse(
|
||||
IReadOnlyList<ExportTypeInfo> Types,
|
||||
DefaultQuotaInfo DefaultQuota);
|
||||
|
||||
public sealed record ExportTypeInfo(
|
||||
string JobType,
|
||||
string Target,
|
||||
int MaxConcurrent,
|
||||
int MaxPerHour,
|
||||
int EstimatedDurationSeconds);
|
||||
|
||||
public sealed record DefaultQuotaInfo(
|
||||
int MaxActive,
|
||||
int MaxPerHour,
|
||||
int BurstCapacity,
|
||||
double RefillRate,
|
||||
int DefaultPriority,
|
||||
int MaxAttempts,
|
||||
int DefaultLeaseSeconds,
|
||||
int RecommendedHeartbeatInterval);
|
||||
|
||||
public sealed record ErrorResponse(string Error, string Message);
|
||||
@@ -0,0 +1,837 @@
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using Microsoft.AspNetCore.Mvc;
|
||||
using StellaOps.Orchestrator.Core.Domain;
|
||||
using StellaOps.Orchestrator.Core.Domain.Events;
|
||||
using StellaOps.Orchestrator.Infrastructure;
|
||||
using StellaOps.Orchestrator.Infrastructure.Repositories;
|
||||
using StellaOps.Orchestrator.WebService.Contracts;
|
||||
using StellaOps.Orchestrator.WebService.Services;
|
||||
using PackLogLevel = StellaOps.Orchestrator.Core.Domain.LogLevel;
|
||||
|
||||
namespace StellaOps.Orchestrator.WebService.Endpoints;
|
||||
|
||||
/// <summary>
|
||||
/// Pack run endpoints for scheduling, execution, and log management.
|
||||
/// </summary>
|
||||
public static class PackRunEndpoints
|
||||
{
|
||||
private const int DefaultLeaseSeconds = 300; // 5 minutes
|
||||
private const int MaxLeaseSeconds = 3600; // 1 hour
|
||||
private const int DefaultExtendSeconds = 300;
|
||||
private const int MaxExtendSeconds = 1800; // 30 minutes
|
||||
private const int DefaultLogLimit = 100;
|
||||
private const int MaxLogLimit = 1000;
|
||||
|
||||
/// <summary>
|
||||
/// Maps pack run endpoints to the route builder.
|
||||
/// </summary>
|
||||
public static RouteGroupBuilder MapPackRunEndpoints(this IEndpointRouteBuilder app)
|
||||
{
|
||||
var group = app.MapGroup("/api/v1/orchestrator/pack-runs")
|
||||
.WithTags("Orchestrator Pack Runs");
|
||||
|
||||
// Scheduling endpoints
|
||||
group.MapPost("", SchedulePackRun)
|
||||
.WithName("Orchestrator_SchedulePackRun")
|
||||
.WithDescription("Schedule a new pack run");
|
||||
|
||||
group.MapGet("{packRunId:guid}", GetPackRun)
|
||||
.WithName("Orchestrator_GetPackRun")
|
||||
.WithDescription("Get pack run details");
|
||||
|
||||
group.MapGet("", ListPackRuns)
|
||||
.WithName("Orchestrator_ListPackRuns")
|
||||
.WithDescription("List pack runs with filters");
|
||||
|
||||
// Task runner (worker) endpoints
|
||||
group.MapPost("claim", ClaimPackRun)
|
||||
.WithName("Orchestrator_ClaimPackRun")
|
||||
.WithDescription("Claim a pack run for execution");
|
||||
|
||||
group.MapPost("{packRunId:guid}/heartbeat", Heartbeat)
|
||||
.WithName("Orchestrator_PackRunHeartbeat")
|
||||
.WithDescription("Extend pack run lease");
|
||||
|
||||
group.MapPost("{packRunId:guid}/start", StartPackRun)
|
||||
.WithName("Orchestrator_StartPackRun")
|
||||
.WithDescription("Mark pack run as started");
|
||||
|
||||
group.MapPost("{packRunId:guid}/complete", CompletePackRun)
|
||||
.WithName("Orchestrator_CompletePackRun")
|
||||
.WithDescription("Complete a pack run");
|
||||
|
||||
// Log endpoints
|
||||
group.MapPost("{packRunId:guid}/logs", AppendLogs)
|
||||
.WithName("Orchestrator_AppendPackRunLogs")
|
||||
.WithDescription("Append logs to a pack run");
|
||||
|
||||
group.MapGet("{packRunId:guid}/logs", GetLogs)
|
||||
.WithName("Orchestrator_GetPackRunLogs")
|
||||
.WithDescription("Get pack run logs with cursor pagination");
|
||||
|
||||
// Cancel/retry endpoints
|
||||
group.MapPost("{packRunId:guid}/cancel", CancelPackRun)
|
||||
.WithName("Orchestrator_CancelPackRun")
|
||||
.WithDescription("Cancel a pack run");
|
||||
|
||||
group.MapPost("{packRunId:guid}/retry", RetryPackRun)
|
||||
.WithName("Orchestrator_RetryPackRun")
|
||||
.WithDescription("Retry a failed pack run");
|
||||
|
||||
return group;
|
||||
}
|
||||
|
||||
// ========== Scheduling Endpoints ==========
|
||||
|
||||
private static async Task<IResult> SchedulePackRun(
|
||||
HttpContext context,
|
||||
[FromBody] SchedulePackRunRequest request,
|
||||
[FromServices] TenantResolver tenantResolver,
|
||||
[FromServices] IPackRunRepository packRunRepository,
|
||||
[FromServices] IEventPublisher eventPublisher,
|
||||
[FromServices] TimeProvider timeProvider,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
// Validate request
|
||||
if (string.IsNullOrWhiteSpace(request.PackId))
|
||||
{
|
||||
return Results.BadRequest(new PackRunErrorResponse(
|
||||
"invalid_request", "PackId is required", null, null));
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(request.PackVersion))
|
||||
{
|
||||
return Results.BadRequest(new PackRunErrorResponse(
|
||||
"invalid_request", "PackVersion is required", null, null));
|
||||
}
|
||||
|
||||
var tenantId = tenantResolver.Resolve(context);
|
||||
var now = timeProvider.GetUtcNow();
|
||||
var parameters = request.Parameters ?? "{}";
|
||||
var parametersDigest = ComputeDigest(parameters);
|
||||
var idempotencyKey = request.IdempotencyKey ?? $"pack-run:{request.PackId}:{parametersDigest}:{now:yyyyMMddHHmm}";
|
||||
|
||||
// Check for existing pack run with same idempotency key
|
||||
var existing = await packRunRepository.GetByIdempotencyKeyAsync(tenantId, idempotencyKey, cancellationToken);
|
||||
if (existing is not null)
|
||||
{
|
||||
return Results.Ok(new SchedulePackRunResponse(
|
||||
existing.PackRunId,
|
||||
existing.PackId,
|
||||
existing.PackVersion,
|
||||
existing.Status.ToString().ToLowerInvariant(),
|
||||
existing.IdempotencyKey,
|
||||
existing.CreatedAt,
|
||||
WasAlreadyScheduled: true));
|
||||
}
|
||||
|
||||
// Create new pack run
|
||||
var packRunId = Guid.NewGuid();
|
||||
var packRun = PackRun.Create(
|
||||
packRunId: packRunId,
|
||||
tenantId: tenantId,
|
||||
projectId: request.ProjectId,
|
||||
packId: request.PackId,
|
||||
packVersion: request.PackVersion,
|
||||
parameters: parameters,
|
||||
parametersDigest: parametersDigest,
|
||||
idempotencyKey: idempotencyKey,
|
||||
correlationId: request.CorrelationId,
|
||||
createdBy: context.User?.Identity?.Name ?? "system",
|
||||
priority: request.Priority ?? 0,
|
||||
maxAttempts: request.MaxAttempts ?? 3,
|
||||
metadata: request.Metadata,
|
||||
createdAt: now);
|
||||
|
||||
await packRunRepository.CreateAsync(packRun, cancellationToken);
|
||||
|
||||
OrchestratorMetrics.PackRunCreated(tenantId, request.PackId);
|
||||
|
||||
// Publish event
|
||||
var envelope = EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.PackRunCreated,
|
||||
tenantId: tenantId,
|
||||
actor: EventActor.User(context.User?.Identity?.Name ?? "system", "webservice"),
|
||||
correlationId: request.CorrelationId,
|
||||
projectId: request.ProjectId,
|
||||
payload: ToPayload(new { packRunId, packId = request.PackId, packVersion = request.PackVersion }));
|
||||
await eventPublisher.PublishAsync(envelope, cancellationToken);
|
||||
|
||||
return Results.Created($"/api/v1/orchestrator/pack-runs/{packRunId}", new SchedulePackRunResponse(
|
||||
packRunId,
|
||||
request.PackId,
|
||||
request.PackVersion,
|
||||
packRun.Status.ToString().ToLowerInvariant(),
|
||||
idempotencyKey,
|
||||
now,
|
||||
WasAlreadyScheduled: false));
|
||||
}
|
||||
|
||||
private static async Task<IResult> GetPackRun(
|
||||
HttpContext context,
|
||||
[FromRoute] Guid packRunId,
|
||||
[FromServices] TenantResolver tenantResolver,
|
||||
[FromServices] IPackRunRepository packRunRepository,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
var tenantId = tenantResolver.Resolve(context);
|
||||
var packRun = await packRunRepository.GetByIdAsync(tenantId, packRunId, cancellationToken);
|
||||
|
||||
if (packRun is null)
|
||||
{
|
||||
return Results.NotFound(new PackRunErrorResponse(
|
||||
"not_found", $"Pack run {packRunId} not found", packRunId, null));
|
||||
}
|
||||
|
||||
return Results.Ok(PackRunResponse.FromDomain(packRun));
|
||||
}
|
||||
|
||||
private static async Task<IResult> ListPackRuns(
|
||||
HttpContext context,
|
||||
[FromQuery] string? packId,
|
||||
[FromQuery] string? status,
|
||||
[FromQuery] string? projectId,
|
||||
[FromQuery] DateTimeOffset? createdAfter,
|
||||
[FromQuery] DateTimeOffset? createdBefore,
|
||||
[FromQuery] int? limit,
|
||||
[FromQuery] int? offset,
|
||||
[FromServices] TenantResolver tenantResolver,
|
||||
[FromServices] IPackRunRepository packRunRepository,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
var tenantId = tenantResolver.Resolve(context);
|
||||
var effectiveLimit = Math.Min(limit ?? 50, 100);
|
||||
var effectiveOffset = offset ?? 0;
|
||||
|
||||
PackRunStatus? statusFilter = null;
|
||||
if (!string.IsNullOrEmpty(status) && Enum.TryParse<PackRunStatus>(status, true, out var parsed))
|
||||
{
|
||||
statusFilter = parsed;
|
||||
}
|
||||
|
||||
var packRuns = await packRunRepository.ListAsync(
|
||||
tenantId, packId, statusFilter, projectId,
|
||||
createdAfter, createdBefore,
|
||||
effectiveLimit, effectiveOffset, cancellationToken);
|
||||
|
||||
var totalCount = await packRunRepository.CountAsync(
|
||||
tenantId, packId, statusFilter, projectId, cancellationToken);
|
||||
|
||||
var responses = packRuns.Select(PackRunResponse.FromDomain).ToList();
|
||||
var nextCursor = responses.Count == effectiveLimit
|
||||
? (effectiveOffset + effectiveLimit).ToString()
|
||||
: null;
|
||||
|
||||
return Results.Ok(new PackRunListResponse(responses, totalCount, nextCursor));
|
||||
}
|
||||
|
||||
// ========== Task Runner Endpoints ==========
|
||||
|
||||
private static async Task<IResult> ClaimPackRun(
|
||||
HttpContext context,
|
||||
[FromBody] ClaimPackRunRequest request,
|
||||
[FromServices] TenantResolver tenantResolver,
|
||||
[FromServices] IPackRunRepository packRunRepository,
|
||||
[FromServices] TimeProvider timeProvider,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(request.TaskRunnerId))
|
||||
{
|
||||
return Results.BadRequest(new PackRunErrorResponse(
|
||||
"invalid_request", "TaskRunnerId is required", null, null));
|
||||
}
|
||||
|
||||
var tenantId = tenantResolver.Resolve(context);
|
||||
var leaseSeconds = Math.Min(request.LeaseSeconds ?? DefaultLeaseSeconds, MaxLeaseSeconds);
|
||||
var now = timeProvider.GetUtcNow();
|
||||
var leaseUntil = now.AddSeconds(leaseSeconds);
|
||||
var leaseId = Guid.NewGuid();
|
||||
|
||||
// Idempotency check
|
||||
if (!string.IsNullOrEmpty(request.IdempotencyKey))
|
||||
{
|
||||
var existingRun = await packRunRepository.GetByIdempotencyKeyAsync(
|
||||
tenantId, $"claim:{request.IdempotencyKey}", cancellationToken);
|
||||
|
||||
if (existingRun is not null && existingRun.Status == PackRunStatus.Leased &&
|
||||
existingRun.TaskRunnerId == request.TaskRunnerId)
|
||||
{
|
||||
return Results.Ok(CreateClaimResponse(existingRun));
|
||||
}
|
||||
}
|
||||
|
||||
var packRun = await packRunRepository.LeaseNextAsync(
|
||||
tenantId, request.PackId, leaseId, request.TaskRunnerId, leaseUntil, cancellationToken);
|
||||
|
||||
if (packRun is null)
|
||||
{
|
||||
return Results.Json(
|
||||
new PackRunErrorResponse("no_pack_runs_available", "No pack runs available for claim", null, 5),
|
||||
statusCode: StatusCodes.Status204NoContent);
|
||||
}
|
||||
|
||||
OrchestratorMetrics.PackRunLeased(tenantId, packRun.PackId);
|
||||
|
||||
return Results.Ok(CreateClaimResponse(packRun));
|
||||
}
|
||||
|
||||
private static async Task<IResult> Heartbeat(
|
||||
HttpContext context,
|
||||
[FromRoute] Guid packRunId,
|
||||
[FromBody] PackRunHeartbeatRequest request,
|
||||
[FromServices] TenantResolver tenantResolver,
|
||||
[FromServices] IPackRunRepository packRunRepository,
|
||||
[FromServices] TimeProvider timeProvider,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
var tenantId = tenantResolver.Resolve(context);
|
||||
|
||||
var packRun = await packRunRepository.GetByIdAsync(tenantId, packRunId, cancellationToken);
|
||||
if (packRun is null)
|
||||
{
|
||||
return Results.NotFound(new PackRunErrorResponse(
|
||||
"not_found", $"Pack run {packRunId} not found", packRunId, null));
|
||||
}
|
||||
|
||||
if (packRun.LeaseId != request.LeaseId)
|
||||
{
|
||||
return Results.Json(
|
||||
new PackRunErrorResponse("invalid_lease", "Lease ID does not match", packRunId, null),
|
||||
statusCode: StatusCodes.Status409Conflict);
|
||||
}
|
||||
|
||||
if (packRun.Status != PackRunStatus.Leased && packRun.Status != PackRunStatus.Running)
|
||||
{
|
||||
return Results.Json(
|
||||
new PackRunErrorResponse("invalid_status", $"Pack run is not in leased/running status: {packRun.Status}", packRunId, null),
|
||||
statusCode: StatusCodes.Status409Conflict);
|
||||
}
|
||||
|
||||
var extendSeconds = Math.Min(request.ExtendSeconds ?? DefaultExtendSeconds, MaxExtendSeconds);
|
||||
var now = timeProvider.GetUtcNow();
|
||||
var newLeaseUntil = now.AddSeconds(extendSeconds);
|
||||
|
||||
var extended = await packRunRepository.ExtendLeaseAsync(
|
||||
tenantId, packRunId, request.LeaseId, newLeaseUntil, cancellationToken);
|
||||
|
||||
if (!extended)
|
||||
{
|
||||
return Results.Json(
|
||||
new PackRunErrorResponse("lease_expired", "Lease has expired", packRunId, null),
|
||||
statusCode: StatusCodes.Status409Conflict);
|
||||
}
|
||||
|
||||
OrchestratorMetrics.PackRunHeartbeatReceived(tenantId, packRun.PackId);
|
||||
|
||||
return Results.Ok(new PackRunHeartbeatResponse(packRunId, request.LeaseId, newLeaseUntil, Acknowledged: true));
|
||||
}
|
||||
|
||||
private static async Task<IResult> StartPackRun(
|
||||
HttpContext context,
|
||||
[FromRoute] Guid packRunId,
|
||||
[FromBody] PackRunStartRequest request,
|
||||
[FromServices] TenantResolver tenantResolver,
|
||||
[FromServices] IPackRunRepository packRunRepository,
|
||||
[FromServices] IPackRunLogRepository logRepository,
|
||||
[FromServices] IEventPublisher eventPublisher,
|
||||
[FromServices] TimeProvider timeProvider,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
var tenantId = tenantResolver.Resolve(context);
|
||||
|
||||
var packRun = await packRunRepository.GetByIdAsync(tenantId, packRunId, cancellationToken);
|
||||
if (packRun is null)
|
||||
{
|
||||
return Results.NotFound(new PackRunErrorResponse(
|
||||
"not_found", $"Pack run {packRunId} not found", packRunId, null));
|
||||
}
|
||||
|
||||
if (packRun.LeaseId != request.LeaseId)
|
||||
{
|
||||
return Results.Json(
|
||||
new PackRunErrorResponse("invalid_lease", "Lease ID does not match", packRunId, null),
|
||||
statusCode: StatusCodes.Status409Conflict);
|
||||
}
|
||||
|
||||
if (packRun.Status != PackRunStatus.Leased)
|
||||
{
|
||||
return Results.Json(
|
||||
new PackRunErrorResponse("invalid_status", $"Pack run is not in leased status: {packRun.Status}", packRunId, null),
|
||||
statusCode: StatusCodes.Status409Conflict);
|
||||
}
|
||||
|
||||
var now = timeProvider.GetUtcNow();
|
||||
|
||||
await packRunRepository.UpdateStatusAsync(
|
||||
tenantId, packRunId,
|
||||
PackRunStatus.Running,
|
||||
packRun.Attempt,
|
||||
packRun.LeaseId,
|
||||
packRun.TaskRunnerId,
|
||||
packRun.LeaseUntil,
|
||||
packRun.ScheduledAt,
|
||||
packRun.LeasedAt,
|
||||
now, // startedAt
|
||||
null, null, null, null, null,
|
||||
cancellationToken);
|
||||
|
||||
// Append system log entry
|
||||
var log = PackRunLog.System(packRunId, tenantId, 0, PackLogLevel.Info, "Pack run started", null, now);
|
||||
await logRepository.AppendAsync(log, cancellationToken);
|
||||
|
||||
OrchestratorMetrics.PackRunStarted(tenantId, packRun.PackId);
|
||||
|
||||
// Publish event
|
||||
var envelope = EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.PackRunStarted,
|
||||
tenantId: tenantId,
|
||||
actor: EventActor.System("task-runner", packRun.TaskRunnerId ?? "unknown"),
|
||||
correlationId: packRun.CorrelationId,
|
||||
projectId: packRun.ProjectId,
|
||||
payload: ToPayload(new { packRunId, packId = packRun.PackId, packVersion = packRun.PackVersion }));
|
||||
await eventPublisher.PublishAsync(envelope, cancellationToken);
|
||||
|
||||
return Results.Ok(new PackRunStartResponse(packRunId, Acknowledged: true, StartedAt: now));
|
||||
}
|
||||
|
||||
private static async Task<IResult> CompletePackRun(
|
||||
HttpContext context,
|
||||
[FromRoute] Guid packRunId,
|
||||
[FromBody] CompletePackRunRequest request,
|
||||
[FromServices] TenantResolver tenantResolver,
|
||||
[FromServices] IPackRunRepository packRunRepository,
|
||||
[FromServices] IPackRunLogRepository logRepository,
|
||||
[FromServices] IArtifactRepository artifactRepository,
|
||||
[FromServices] IEventPublisher eventPublisher,
|
||||
[FromServices] TimeProvider timeProvider,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
var tenantId = tenantResolver.Resolve(context);
|
||||
|
||||
var packRun = await packRunRepository.GetByIdAsync(tenantId, packRunId, cancellationToken);
|
||||
if (packRun is null)
|
||||
{
|
||||
return Results.NotFound(new PackRunErrorResponse(
|
||||
"not_found", $"Pack run {packRunId} not found", packRunId, null));
|
||||
}
|
||||
|
||||
if (packRun.LeaseId != request.LeaseId)
|
||||
{
|
||||
return Results.Json(
|
||||
new PackRunErrorResponse("invalid_lease", "Lease ID does not match", packRunId, null),
|
||||
statusCode: StatusCodes.Status409Conflict);
|
||||
}
|
||||
|
||||
if (packRun.Status != PackRunStatus.Leased && packRun.Status != PackRunStatus.Running)
|
||||
{
|
||||
return Results.Json(
|
||||
new PackRunErrorResponse("invalid_status", $"Pack run is not in leased/running status: {packRun.Status}", packRunId, null),
|
||||
statusCode: StatusCodes.Status409Conflict);
|
||||
}
|
||||
|
||||
var now = timeProvider.GetUtcNow();
|
||||
var newStatus = request.Success ? PackRunStatus.Succeeded : PackRunStatus.Failed;
|
||||
var durationMs = packRun.StartedAt.HasValue
|
||||
? (long)(now - packRun.StartedAt.Value).TotalMilliseconds
|
||||
: (packRun.LeasedAt.HasValue ? (long)(now - packRun.LeasedAt.Value).TotalMilliseconds : 0);
|
||||
|
||||
// Create artifacts
|
||||
var artifactIds = new List<Guid>();
|
||||
if (request.Artifacts is { Count: > 0 })
|
||||
{
|
||||
var artifacts = request.Artifacts.Select(a => new Artifact(
|
||||
ArtifactId: Guid.NewGuid(),
|
||||
TenantId: tenantId,
|
||||
JobId: Guid.Empty, // Pack runs don't have a job ID
|
||||
RunId: null, // Pack runs are not part of a run
|
||||
ArtifactType: a.ArtifactType,
|
||||
Uri: a.Uri,
|
||||
Digest: a.Digest,
|
||||
MimeType: a.MimeType,
|
||||
SizeBytes: a.SizeBytes,
|
||||
CreatedAt: now,
|
||||
Metadata: $"{{\"packRunId\":\"{packRunId}\",\"packId\":\"{packRun.PackId}\"{(a.Metadata != null ? "," + a.Metadata.TrimStart('{').TrimEnd('}') : "")}}}")).ToList();
|
||||
|
||||
await artifactRepository.CreateBatchAsync(artifacts, cancellationToken);
|
||||
artifactIds.AddRange(artifacts.Select(a => a.ArtifactId));
|
||||
|
||||
foreach (var artifact in artifacts)
|
||||
{
|
||||
OrchestratorMetrics.ArtifactCreated(tenantId, artifact.ArtifactType);
|
||||
}
|
||||
}
|
||||
|
||||
// Update status
|
||||
await packRunRepository.UpdateStatusAsync(
|
||||
tenantId, packRunId,
|
||||
newStatus,
|
||||
packRun.Attempt,
|
||||
null, // clear lease
|
||||
null, // clear task runner
|
||||
null, // clear lease until
|
||||
packRun.ScheduledAt,
|
||||
packRun.LeasedAt,
|
||||
packRun.StartedAt,
|
||||
now, // completedAt
|
||||
null,
|
||||
request.Reason,
|
||||
request.ExitCode,
|
||||
durationMs,
|
||||
cancellationToken);
|
||||
|
||||
// Append system log entry
|
||||
var (logCount, latestSeq) = await logRepository.GetLogStatsAsync(tenantId, packRunId, cancellationToken);
|
||||
var completionLog = PackRunLog.System(
|
||||
packRunId, tenantId, latestSeq + 1,
|
||||
request.Success ? PackLogLevel.Info : PackLogLevel.Error,
|
||||
$"Pack run {(request.Success ? "succeeded" : "failed")} with exit code {request.ExitCode}",
|
||||
null, now);
|
||||
await logRepository.AppendAsync(completionLog, cancellationToken);
|
||||
|
||||
// Record metrics
|
||||
var durationSeconds = durationMs / 1000.0;
|
||||
if (request.Success)
|
||||
{
|
||||
OrchestratorMetrics.PackRunCompleted(tenantId, packRun.PackId, "succeeded");
|
||||
}
|
||||
else
|
||||
{
|
||||
OrchestratorMetrics.PackRunFailed(tenantId, packRun.PackId);
|
||||
}
|
||||
OrchestratorMetrics.RecordPackRunDuration(tenantId, packRun.PackId, durationSeconds);
|
||||
OrchestratorMetrics.RecordPackRunLogCount(tenantId, packRun.PackId, logCount + 1);
|
||||
|
||||
// Publish event
|
||||
var eventType = request.Success
|
||||
? OrchestratorEventType.PackRunCompleted
|
||||
: OrchestratorEventType.PackRunFailed;
|
||||
var envelope = EventEnvelope.Create(
|
||||
eventType: eventType,
|
||||
tenantId: tenantId,
|
||||
actor: EventActor.System("task-runner", packRun.TaskRunnerId ?? "unknown"),
|
||||
correlationId: packRun.CorrelationId,
|
||||
projectId: packRun.ProjectId,
|
||||
payload: ToPayload(new
|
||||
{
|
||||
packRunId,
|
||||
packId = packRun.PackId,
|
||||
packVersion = packRun.PackVersion,
|
||||
exitCode = request.ExitCode,
|
||||
durationMs,
|
||||
artifactCount = artifactIds.Count
|
||||
}));
|
||||
await eventPublisher.PublishAsync(envelope, cancellationToken);
|
||||
|
||||
return Results.Ok(new CompletePackRunResponse(
|
||||
packRunId,
|
||||
newStatus.ToString().ToLowerInvariant(),
|
||||
now,
|
||||
artifactIds,
|
||||
durationMs));
|
||||
}
|
||||
|
||||
// ========== Log Endpoints ==========
|
||||
|
||||
private static async Task<IResult> AppendLogs(
|
||||
HttpContext context,
|
||||
[FromRoute] Guid packRunId,
|
||||
[FromBody] AppendLogsRequest request,
|
||||
[FromServices] TenantResolver tenantResolver,
|
||||
[FromServices] IPackRunRepository packRunRepository,
|
||||
[FromServices] IPackRunLogRepository logRepository,
|
||||
[FromServices] IEventPublisher eventPublisher,
|
||||
[FromServices] TimeProvider timeProvider,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
var tenantId = tenantResolver.Resolve(context);
|
||||
|
||||
var packRun = await packRunRepository.GetByIdAsync(tenantId, packRunId, cancellationToken);
|
||||
if (packRun is null)
|
||||
{
|
||||
return Results.NotFound(new PackRunErrorResponse(
|
||||
"not_found", $"Pack run {packRunId} not found", packRunId, null));
|
||||
}
|
||||
|
||||
if (packRun.LeaseId != request.LeaseId)
|
||||
{
|
||||
return Results.Json(
|
||||
new PackRunErrorResponse("invalid_lease", "Lease ID does not match", packRunId, null),
|
||||
statusCode: StatusCodes.Status409Conflict);
|
||||
}
|
||||
|
||||
if (request.Logs.Count == 0)
|
||||
{
|
||||
return Results.Ok(new AppendLogsResponse(packRunId, 0, 0));
|
||||
}
|
||||
|
||||
// Get current sequence
|
||||
var (_, currentSeq) = await logRepository.GetLogStatsAsync(tenantId, packRunId, cancellationToken);
|
||||
var now = timeProvider.GetUtcNow();
|
||||
|
||||
var logs = new List<PackRunLog>();
|
||||
var seq = currentSeq;
|
||||
foreach (var entry in request.Logs)
|
||||
{
|
||||
seq++;
|
||||
var level = Enum.TryParse<PackLogLevel>(entry.Level, true, out var parsedLevel)
|
||||
? parsedLevel
|
||||
: PackLogLevel.Info;
|
||||
|
||||
logs.Add(PackRunLog.Create(
|
||||
packRunId, tenantId, seq, level,
|
||||
entry.Source,
|
||||
entry.Message,
|
||||
entry.Data,
|
||||
entry.Timestamp ?? now));
|
||||
}
|
||||
|
||||
await logRepository.AppendBatchAsync(logs, cancellationToken);
|
||||
|
||||
OrchestratorMetrics.PackRunLogAppended(tenantId, packRun.PackId, logs.Count);
|
||||
|
||||
// Publish log event for streaming
|
||||
var envelope = EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.PackRunLog,
|
||||
tenantId: tenantId,
|
||||
actor: EventActor.System("task-runner", packRun.TaskRunnerId ?? "unknown"),
|
||||
correlationId: packRun.CorrelationId,
|
||||
projectId: packRun.ProjectId,
|
||||
payload: ToPayload(new { packRunId, logCount = logs.Count, latestSequence = seq }));
|
||||
await eventPublisher.PublishAsync(envelope, cancellationToken);
|
||||
|
||||
return Results.Ok(new AppendLogsResponse(packRunId, logs.Count, seq));
|
||||
}
|
||||
|
||||
private static async Task<IResult> GetLogs(
|
||||
HttpContext context,
|
||||
[FromRoute] Guid packRunId,
|
||||
[FromQuery] long? afterSequence,
|
||||
[FromQuery] string? level,
|
||||
[FromQuery] string? search,
|
||||
[FromQuery] int? limit,
|
||||
[FromServices] TenantResolver tenantResolver,
|
||||
[FromServices] IPackRunRepository packRunRepository,
|
||||
[FromServices] IPackRunLogRepository logRepository,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
var tenantId = tenantResolver.Resolve(context);
|
||||
|
||||
var packRun = await packRunRepository.GetByIdAsync(tenantId, packRunId, cancellationToken);
|
||||
if (packRun is null)
|
||||
{
|
||||
return Results.NotFound(new PackRunErrorResponse(
|
||||
"not_found", $"Pack run {packRunId} not found", packRunId, null));
|
||||
}
|
||||
|
||||
var effectiveLimit = Math.Min(limit ?? DefaultLogLimit, MaxLogLimit);
|
||||
var after = afterSequence ?? -1;
|
||||
|
||||
PackRunLogBatch batch;
|
||||
|
||||
if (!string.IsNullOrEmpty(search))
|
||||
{
|
||||
batch = await logRepository.SearchLogsAsync(tenantId, packRunId, search, after, effectiveLimit, cancellationToken);
|
||||
}
|
||||
else if (!string.IsNullOrEmpty(level) && Enum.TryParse<PackLogLevel>(level, true, out var minLevel))
|
||||
{
|
||||
batch = await logRepository.GetLogsByLevelAsync(tenantId, packRunId, minLevel, after, effectiveLimit, cancellationToken);
|
||||
}
|
||||
else
|
||||
{
|
||||
batch = await logRepository.GetLogsAsync(tenantId, packRunId, after, effectiveLimit, cancellationToken);
|
||||
}
|
||||
|
||||
var responses = batch.Logs.Select(LogEntryResponse.FromDomain).ToList();
|
||||
var hasMore = responses.Count == effectiveLimit;
|
||||
long? nextSeq = hasMore && responses.Count > 0 ? responses[^1].Sequence : null;
|
||||
|
||||
return Results.Ok(new LogBatchResponse(
|
||||
packRunId,
|
||||
responses,
|
||||
batch.StartSequence,
|
||||
nextSeq,
|
||||
hasMore));
|
||||
}
|
||||
|
||||
// ========== Cancel/Retry Endpoints ==========
|
||||
|
||||
private static async Task<IResult> CancelPackRun(
|
||||
HttpContext context,
|
||||
[FromRoute] Guid packRunId,
|
||||
[FromBody] CancelPackRunRequest request,
|
||||
[FromServices] TenantResolver tenantResolver,
|
||||
[FromServices] IPackRunRepository packRunRepository,
|
||||
[FromServices] IPackRunLogRepository logRepository,
|
||||
[FromServices] IEventPublisher eventPublisher,
|
||||
[FromServices] TimeProvider timeProvider,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
var tenantId = tenantResolver.Resolve(context);
|
||||
|
||||
var packRun = await packRunRepository.GetByIdAsync(tenantId, packRunId, cancellationToken);
|
||||
if (packRun is null)
|
||||
{
|
||||
return Results.NotFound(new PackRunErrorResponse(
|
||||
"not_found", $"Pack run {packRunId} not found", packRunId, null));
|
||||
}
|
||||
|
||||
if (packRun.IsTerminal)
|
||||
{
|
||||
return Results.Json(
|
||||
new PackRunErrorResponse("already_terminal", $"Pack run is already in terminal status: {packRun.Status}", packRunId, null),
|
||||
statusCode: StatusCodes.Status409Conflict);
|
||||
}
|
||||
|
||||
var now = timeProvider.GetUtcNow();
|
||||
|
||||
await packRunRepository.UpdateStatusAsync(
|
||||
tenantId, packRunId,
|
||||
PackRunStatus.Canceled,
|
||||
packRun.Attempt,
|
||||
null, null, null,
|
||||
packRun.ScheduledAt,
|
||||
packRun.LeasedAt,
|
||||
packRun.StartedAt,
|
||||
now,
|
||||
null,
|
||||
request.Reason,
|
||||
null, null,
|
||||
cancellationToken);
|
||||
|
||||
// Append system log entry
|
||||
var (_, latestSeq) = await logRepository.GetLogStatsAsync(tenantId, packRunId, cancellationToken);
|
||||
var cancelLog = PackRunLog.System(
|
||||
packRunId, tenantId, latestSeq + 1,
|
||||
PackLogLevel.Warn, $"Pack run canceled: {request.Reason}", null, now);
|
||||
await logRepository.AppendAsync(cancelLog, cancellationToken);
|
||||
|
||||
OrchestratorMetrics.PackRunCanceled(tenantId, packRun.PackId);
|
||||
|
||||
// Publish event
|
||||
var envelope = EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.PackRunFailed, // Use Failed for canceled
|
||||
tenantId: tenantId,
|
||||
actor: EventActor.User(context.User?.Identity?.Name ?? "system", "webservice"),
|
||||
correlationId: packRun.CorrelationId,
|
||||
projectId: packRun.ProjectId,
|
||||
payload: ToPayload(new { packRunId, packId = packRun.PackId, status = "canceled", reason = request.Reason }));
|
||||
await eventPublisher.PublishAsync(envelope, cancellationToken);
|
||||
|
||||
return Results.Ok(new CancelPackRunResponse(packRunId, "canceled", request.Reason, now));
|
||||
}
|
||||
|
||||
private static async Task<IResult> RetryPackRun(
|
||||
HttpContext context,
|
||||
[FromRoute] Guid packRunId,
|
||||
[FromBody] RetryPackRunRequest request,
|
||||
[FromServices] TenantResolver tenantResolver,
|
||||
[FromServices] IPackRunRepository packRunRepository,
|
||||
[FromServices] IEventPublisher eventPublisher,
|
||||
[FromServices] TimeProvider timeProvider,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
var tenantId = tenantResolver.Resolve(context);
|
||||
|
||||
var packRun = await packRunRepository.GetByIdAsync(tenantId, packRunId, cancellationToken);
|
||||
if (packRun is null)
|
||||
{
|
||||
return Results.NotFound(new PackRunErrorResponse(
|
||||
"not_found", $"Pack run {packRunId} not found", packRunId, null));
|
||||
}
|
||||
|
||||
if (!packRun.IsTerminal)
|
||||
{
|
||||
return Results.Json(
|
||||
new PackRunErrorResponse("not_terminal", $"Pack run is not in terminal status: {packRun.Status}", packRunId, null),
|
||||
statusCode: StatusCodes.Status409Conflict);
|
||||
}
|
||||
|
||||
if (packRun.Status == PackRunStatus.Succeeded)
|
||||
{
|
||||
return Results.Json(
|
||||
new PackRunErrorResponse("already_succeeded", "Cannot retry a successful pack run", packRunId, null),
|
||||
statusCode: StatusCodes.Status409Conflict);
|
||||
}
|
||||
|
||||
var now = timeProvider.GetUtcNow();
|
||||
var newPackRunId = Guid.NewGuid();
|
||||
var parameters = request.Parameters ?? packRun.Parameters;
|
||||
var parametersDigest = request.Parameters != null ? ComputeDigest(parameters) : packRun.ParametersDigest;
|
||||
var idempotencyKey = request.IdempotencyKey ?? $"retry:{packRunId}:{now:yyyyMMddHHmmss}";
|
||||
|
||||
var newPackRun = PackRun.Create(
|
||||
packRunId: newPackRunId,
|
||||
tenantId: tenantId,
|
||||
projectId: packRun.ProjectId,
|
||||
packId: packRun.PackId,
|
||||
packVersion: packRun.PackVersion,
|
||||
parameters: parameters,
|
||||
parametersDigest: parametersDigest,
|
||||
idempotencyKey: idempotencyKey,
|
||||
correlationId: packRun.CorrelationId,
|
||||
createdBy: context.User?.Identity?.Name ?? "system",
|
||||
priority: packRun.Priority,
|
||||
maxAttempts: packRun.MaxAttempts,
|
||||
metadata: $"{{\"retriedFrom\":\"{packRunId}\"}}",
|
||||
createdAt: now);
|
||||
|
||||
await packRunRepository.CreateAsync(newPackRun, cancellationToken);
|
||||
|
||||
OrchestratorMetrics.PackRunCreated(tenantId, packRun.PackId);
|
||||
|
||||
// Publish event
|
||||
var envelope = EventEnvelope.Create(
|
||||
eventType: OrchestratorEventType.PackRunCreated,
|
||||
tenantId: tenantId,
|
||||
actor: EventActor.User(context.User?.Identity?.Name ?? "system", "webservice"),
|
||||
correlationId: packRun.CorrelationId,
|
||||
projectId: packRun.ProjectId,
|
||||
payload: ToPayload(new { packRunId = newPackRunId, packId = packRun.PackId, retriedFrom = packRunId }));
|
||||
await eventPublisher.PublishAsync(envelope, cancellationToken);
|
||||
|
||||
return Results.Created($"/api/v1/orchestrator/pack-runs/{newPackRunId}", new RetryPackRunResponse(
|
||||
packRunId,
|
||||
newPackRunId,
|
||||
newPackRun.Status.ToString().ToLowerInvariant(),
|
||||
now));
|
||||
}
|
||||
|
||||
// ========== Helper Methods ==========
|
||||
|
||||
private static ClaimPackRunResponse CreateClaimResponse(PackRun packRun)
|
||||
{
|
||||
return new ClaimPackRunResponse(
|
||||
packRun.PackRunId,
|
||||
packRun.LeaseId!.Value,
|
||||
packRun.PackId,
|
||||
packRun.PackVersion,
|
||||
packRun.Parameters,
|
||||
packRun.ParametersDigest,
|
||||
packRun.Attempt,
|
||||
packRun.MaxAttempts,
|
||||
packRun.LeaseUntil!.Value,
|
||||
packRun.IdempotencyKey,
|
||||
packRun.CorrelationId,
|
||||
packRun.ProjectId,
|
||||
packRun.Metadata);
|
||||
}
|
||||
|
||||
private static string ComputeDigest(string content)
|
||||
{
|
||||
var bytes = Encoding.UTF8.GetBytes(content);
|
||||
var hash = SHA256.HashData(bytes);
|
||||
return Convert.ToHexStringLower(hash);
|
||||
}
|
||||
|
||||
private static JsonElement? ToPayload<T>(T value)
|
||||
{
|
||||
var json = JsonSerializer.SerializeToUtf8Bytes(value, new JsonSerializerOptions
|
||||
{
|
||||
PropertyNamingPolicy = JsonNamingPolicy.CamelCase
|
||||
});
|
||||
var doc = JsonDocument.Parse(json);
|
||||
return doc.RootElement.Clone();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
namespace StellaOps.Policy.Storage.Postgres.Models;
|
||||
|
||||
/// <summary>
|
||||
/// Entity representing an audit log entry for the policy module.
|
||||
/// </summary>
|
||||
public sealed class PolicyAuditEntity
|
||||
{
|
||||
public long Id { get; init; }
|
||||
public required string TenantId { get; init; }
|
||||
public Guid? UserId { get; init; }
|
||||
public required string Action { get; init; }
|
||||
public required string ResourceType { get; init; }
|
||||
public string? ResourceId { get; init; }
|
||||
public string? OldValue { get; init; }
|
||||
public string? NewValue { get; init; }
|
||||
public string? CorrelationId { get; init; }
|
||||
public DateTimeOffset CreatedAt { get; init; }
|
||||
}
|
||||
@@ -0,0 +1,162 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Infrastructure.Postgres.Repositories;
|
||||
using StellaOps.Policy.Storage.Postgres.Models;
|
||||
|
||||
namespace StellaOps.Policy.Storage.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL repository for explanation operations.
|
||||
/// </summary>
|
||||
public sealed class ExplanationRepository : RepositoryBase<PolicyDataSource>, IExplanationRepository
|
||||
{
|
||||
public ExplanationRepository(PolicyDataSource dataSource, ILogger<ExplanationRepository> logger)
|
||||
: base(dataSource, logger) { }
|
||||
|
||||
public async Task<ExplanationEntity?> GetByIdAsync(Guid id, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT id, evaluation_run_id, rule_id, rule_name, result, severity, message, details, remediation, resource_path, line_number, created_at
|
||||
FROM policy.explanations WHERE id = @id
|
||||
""";
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "id", id);
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
return await reader.ReadAsync(cancellationToken).ConfigureAwait(false) ? MapExplanation(reader) : null;
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<ExplanationEntity>> GetByEvaluationRunIdAsync(Guid evaluationRunId, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT id, evaluation_run_id, rule_id, rule_name, result, severity, message, details, remediation, resource_path, line_number, created_at
|
||||
FROM policy.explanations WHERE evaluation_run_id = @evaluation_run_id
|
||||
ORDER BY created_at
|
||||
""";
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "evaluation_run_id", evaluationRunId);
|
||||
var results = new List<ExplanationEntity>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
results.Add(MapExplanation(reader));
|
||||
return results;
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<ExplanationEntity>> GetByEvaluationRunIdAndResultAsync(Guid evaluationRunId, RuleResult result, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT id, evaluation_run_id, rule_id, rule_name, result, severity, message, details, remediation, resource_path, line_number, created_at
|
||||
FROM policy.explanations WHERE evaluation_run_id = @evaluation_run_id AND result = @result
|
||||
ORDER BY severity DESC, created_at
|
||||
""";
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "evaluation_run_id", evaluationRunId);
|
||||
AddParameter(command, "result", ResultToString(result));
|
||||
var results = new List<ExplanationEntity>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
results.Add(MapExplanation(reader));
|
||||
return results;
|
||||
}
|
||||
|
||||
public async Task<ExplanationEntity> CreateAsync(ExplanationEntity explanation, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
INSERT INTO policy.explanations (id, evaluation_run_id, rule_id, rule_name, result, severity, message, details, remediation, resource_path, line_number)
|
||||
VALUES (@id, @evaluation_run_id, @rule_id, @rule_name, @result, @severity, @message, @details::jsonb, @remediation, @resource_path, @line_number)
|
||||
RETURNING *
|
||||
""";
|
||||
var id = explanation.Id == Guid.Empty ? Guid.NewGuid() : explanation.Id;
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "id", id);
|
||||
AddParameter(command, "evaluation_run_id", explanation.EvaluationRunId);
|
||||
AddParameter(command, "rule_id", explanation.RuleId);
|
||||
AddParameter(command, "rule_name", explanation.RuleName);
|
||||
AddParameter(command, "result", ResultToString(explanation.Result));
|
||||
AddParameter(command, "severity", explanation.Severity);
|
||||
AddParameter(command, "message", explanation.Message);
|
||||
AddJsonbParameter(command, "details", explanation.Details);
|
||||
AddParameter(command, "remediation", explanation.Remediation);
|
||||
AddParameter(command, "resource_path", explanation.ResourcePath);
|
||||
AddParameter(command, "line_number", explanation.LineNumber);
|
||||
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
await reader.ReadAsync(cancellationToken).ConfigureAwait(false);
|
||||
return MapExplanation(reader);
|
||||
}
|
||||
|
||||
public async Task<int> CreateBatchAsync(IEnumerable<ExplanationEntity> explanations, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
INSERT INTO policy.explanations (id, evaluation_run_id, rule_id, rule_name, result, severity, message, details, remediation, resource_path, line_number)
|
||||
VALUES (@id, @evaluation_run_id, @rule_id, @rule_name, @result, @severity, @message, @details::jsonb, @remediation, @resource_path, @line_number)
|
||||
""";
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
var count = 0;
|
||||
foreach (var explanation in explanations)
|
||||
{
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
var id = explanation.Id == Guid.Empty ? Guid.NewGuid() : explanation.Id;
|
||||
AddParameter(command, "id", id);
|
||||
AddParameter(command, "evaluation_run_id", explanation.EvaluationRunId);
|
||||
AddParameter(command, "rule_id", explanation.RuleId);
|
||||
AddParameter(command, "rule_name", explanation.RuleName);
|
||||
AddParameter(command, "result", ResultToString(explanation.Result));
|
||||
AddParameter(command, "severity", explanation.Severity);
|
||||
AddParameter(command, "message", explanation.Message);
|
||||
AddJsonbParameter(command, "details", explanation.Details);
|
||||
AddParameter(command, "remediation", explanation.Remediation);
|
||||
AddParameter(command, "resource_path", explanation.ResourcePath);
|
||||
AddParameter(command, "line_number", explanation.LineNumber);
|
||||
count += await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
public async Task<bool> DeleteByEvaluationRunIdAsync(Guid evaluationRunId, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = "DELETE FROM policy.explanations WHERE evaluation_run_id = @evaluation_run_id";
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "evaluation_run_id", evaluationRunId);
|
||||
var rows = await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
return rows > 0;
|
||||
}
|
||||
|
||||
private static ExplanationEntity MapExplanation(NpgsqlDataReader reader) => new()
|
||||
{
|
||||
Id = reader.GetGuid(0),
|
||||
EvaluationRunId = reader.GetGuid(1),
|
||||
RuleId = GetNullableGuid(reader, 2),
|
||||
RuleName = reader.GetString(3),
|
||||
Result = ParseResult(reader.GetString(4)),
|
||||
Severity = reader.GetString(5),
|
||||
Message = GetNullableString(reader, 6),
|
||||
Details = reader.GetString(7),
|
||||
Remediation = GetNullableString(reader, 8),
|
||||
ResourcePath = GetNullableString(reader, 9),
|
||||
LineNumber = reader.IsDBNull(10) ? null : reader.GetInt32(10),
|
||||
CreatedAt = reader.GetFieldValue<DateTimeOffset>(11)
|
||||
};
|
||||
|
||||
private static string ResultToString(RuleResult result) => result switch
|
||||
{
|
||||
RuleResult.Pass => "pass",
|
||||
RuleResult.Fail => "fail",
|
||||
RuleResult.Skip => "skip",
|
||||
RuleResult.Error => "error",
|
||||
_ => throw new ArgumentException($"Unknown result: {result}")
|
||||
};
|
||||
|
||||
private static RuleResult ParseResult(string result) => result switch
|
||||
{
|
||||
"pass" => RuleResult.Pass,
|
||||
"fail" => RuleResult.Fail,
|
||||
"skip" => RuleResult.Skip,
|
||||
"error" => RuleResult.Error,
|
||||
_ => throw new ArgumentException($"Unknown result: {result}")
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
using StellaOps.Policy.Storage.Postgres.Models;
|
||||
|
||||
namespace StellaOps.Policy.Storage.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// Repository interface for explanation operations.
|
||||
/// </summary>
|
||||
public interface IExplanationRepository
|
||||
{
|
||||
Task<ExplanationEntity?> GetByIdAsync(Guid id, CancellationToken cancellationToken = default);
|
||||
Task<IReadOnlyList<ExplanationEntity>> GetByEvaluationRunIdAsync(Guid evaluationRunId, CancellationToken cancellationToken = default);
|
||||
Task<IReadOnlyList<ExplanationEntity>> GetByEvaluationRunIdAndResultAsync(Guid evaluationRunId, RuleResult result, CancellationToken cancellationToken = default);
|
||||
Task<ExplanationEntity> CreateAsync(ExplanationEntity explanation, CancellationToken cancellationToken = default);
|
||||
Task<int> CreateBatchAsync(IEnumerable<ExplanationEntity> explanations, CancellationToken cancellationToken = default);
|
||||
Task<bool> DeleteByEvaluationRunIdAsync(Guid evaluationRunId, CancellationToken cancellationToken = default);
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
using StellaOps.Policy.Storage.Postgres.Models;
|
||||
|
||||
namespace StellaOps.Policy.Storage.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// Repository interface for policy audit operations.
|
||||
/// </summary>
|
||||
public interface IPolicyAuditRepository
|
||||
{
|
||||
Task<long> CreateAsync(PolicyAuditEntity audit, CancellationToken cancellationToken = default);
|
||||
Task<IReadOnlyList<PolicyAuditEntity>> ListAsync(string tenantId, int limit = 100, int offset = 0, CancellationToken cancellationToken = default);
|
||||
Task<IReadOnlyList<PolicyAuditEntity>> GetByResourceAsync(string tenantId, string resourceType, string? resourceId = null, int limit = 100, CancellationToken cancellationToken = default);
|
||||
Task<IReadOnlyList<PolicyAuditEntity>> GetByCorrelationIdAsync(string tenantId, string correlationId, CancellationToken cancellationToken = default);
|
||||
Task<int> DeleteOldAsync(DateTimeOffset cutoff, CancellationToken cancellationToken = default);
|
||||
}
|
||||
@@ -0,0 +1,105 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Infrastructure.Postgres.Repositories;
|
||||
using StellaOps.Policy.Storage.Postgres.Models;
|
||||
|
||||
namespace StellaOps.Policy.Storage.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL repository for policy audit operations.
|
||||
/// </summary>
|
||||
public sealed class PolicyAuditRepository : RepositoryBase<PolicyDataSource>, IPolicyAuditRepository
|
||||
{
|
||||
public PolicyAuditRepository(PolicyDataSource dataSource, ILogger<PolicyAuditRepository> logger)
|
||||
: base(dataSource, logger) { }
|
||||
|
||||
public async Task<long> CreateAsync(PolicyAuditEntity audit, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
INSERT INTO policy.audit (tenant_id, user_id, action, resource_type, resource_id, old_value, new_value, correlation_id)
|
||||
VALUES (@tenant_id, @user_id, @action, @resource_type, @resource_id, @old_value::jsonb, @new_value::jsonb, @correlation_id)
|
||||
RETURNING id
|
||||
""";
|
||||
await using var connection = await DataSource.OpenConnectionAsync(audit.TenantId, "writer", cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "tenant_id", audit.TenantId);
|
||||
AddParameter(command, "user_id", audit.UserId);
|
||||
AddParameter(command, "action", audit.Action);
|
||||
AddParameter(command, "resource_type", audit.ResourceType);
|
||||
AddParameter(command, "resource_id", audit.ResourceId);
|
||||
AddJsonbParameter(command, "old_value", audit.OldValue);
|
||||
AddJsonbParameter(command, "new_value", audit.NewValue);
|
||||
AddParameter(command, "correlation_id", audit.CorrelationId);
|
||||
|
||||
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
|
||||
return (long)result!;
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<PolicyAuditEntity>> ListAsync(string tenantId, int limit = 100, int offset = 0, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT id, tenant_id, user_id, action, resource_type, resource_id, old_value, new_value, correlation_id, created_at
|
||||
FROM policy.audit WHERE tenant_id = @tenant_id
|
||||
ORDER BY created_at DESC LIMIT @limit OFFSET @offset
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "limit", limit);
|
||||
AddParameter(cmd, "offset", offset);
|
||||
}, MapAudit, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<PolicyAuditEntity>> GetByResourceAsync(string tenantId, string resourceType, string? resourceId = null, int limit = 100, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var sql = """
|
||||
SELECT id, tenant_id, user_id, action, resource_type, resource_id, old_value, new_value, correlation_id, created_at
|
||||
FROM policy.audit WHERE tenant_id = @tenant_id AND resource_type = @resource_type
|
||||
""";
|
||||
if (resourceId != null) sql += " AND resource_id = @resource_id";
|
||||
sql += " ORDER BY created_at DESC LIMIT @limit";
|
||||
|
||||
return await QueryAsync(tenantId, sql, cmd =>
|
||||
{
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "resource_type", resourceType);
|
||||
if (resourceId != null) AddParameter(cmd, "resource_id", resourceId);
|
||||
AddParameter(cmd, "limit", limit);
|
||||
}, MapAudit, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<PolicyAuditEntity>> GetByCorrelationIdAsync(string tenantId, string correlationId, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT id, tenant_id, user_id, action, resource_type, resource_id, old_value, new_value, correlation_id, created_at
|
||||
FROM policy.audit WHERE tenant_id = @tenant_id AND correlation_id = @correlation_id
|
||||
ORDER BY created_at
|
||||
""";
|
||||
return await QueryAsync(tenantId, sql,
|
||||
cmd => { AddParameter(cmd, "tenant_id", tenantId); AddParameter(cmd, "correlation_id", correlationId); },
|
||||
MapAudit, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<int> DeleteOldAsync(DateTimeOffset cutoff, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = "DELETE FROM policy.audit WHERE created_at < @cutoff";
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "cutoff", cutoff);
|
||||
return await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private static PolicyAuditEntity MapAudit(NpgsqlDataReader reader) => new()
|
||||
{
|
||||
Id = reader.GetInt64(0),
|
||||
TenantId = reader.GetString(1),
|
||||
UserId = GetNullableGuid(reader, 2),
|
||||
Action = reader.GetString(3),
|
||||
ResourceType = reader.GetString(4),
|
||||
ResourceId = GetNullableString(reader, 5),
|
||||
OldValue = GetNullableString(reader, 6),
|
||||
NewValue = GetNullableString(reader, 7),
|
||||
CorrelationId = GetNullableString(reader, 8),
|
||||
CreatedAt = reader.GetFieldValue<DateTimeOffset>(9)
|
||||
};
|
||||
}
|
||||
@@ -35,6 +35,8 @@ public static class ServiceCollectionExtensions
|
||||
services.AddScoped<IEvaluationRunRepository, EvaluationRunRepository>();
|
||||
services.AddScoped<IExceptionRepository, ExceptionRepository>();
|
||||
services.AddScoped<IReceiptRepository, PostgresReceiptRepository>();
|
||||
services.AddScoped<IExplanationRepository, ExplanationRepository>();
|
||||
services.AddScoped<IPolicyAuditRepository, PolicyAuditRepository>();
|
||||
|
||||
return services;
|
||||
}
|
||||
@@ -60,6 +62,8 @@ public static class ServiceCollectionExtensions
|
||||
services.AddScoped<IEvaluationRunRepository, EvaluationRunRepository>();
|
||||
services.AddScoped<IExceptionRepository, ExceptionRepository>();
|
||||
services.AddScoped<IReceiptRepository, PostgresReceiptRepository>();
|
||||
services.AddScoped<IExplanationRepository, ExplanationRepository>();
|
||||
services.AddScoped<IPolicyAuditRepository, PolicyAuditRepository>();
|
||||
|
||||
return services;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,250 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Policy.Storage.Postgres.Models;
|
||||
using StellaOps.Policy.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Policy.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(PolicyPostgresCollection.Name)]
|
||||
public sealed class EvaluationRunRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly PolicyPostgresFixture _fixture;
|
||||
private readonly EvaluationRunRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public EvaluationRunRepositoryTests(PolicyPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new PolicyDataSource(Options.Create(options), NullLogger<PolicyDataSource>.Instance);
|
||||
_repository = new EvaluationRunRepository(dataSource, NullLogger<EvaluationRunRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task CreateAndGetById_RoundTripsEvaluationRun()
|
||||
{
|
||||
// Arrange
|
||||
var run = new EvaluationRunEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
ProjectId = "project-123",
|
||||
ArtifactId = "registry.example.com/app:v1.0",
|
||||
PackId = Guid.NewGuid(),
|
||||
PackVersion = 1,
|
||||
Status = EvaluationStatus.Pending
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(run);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, run.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(run.Id);
|
||||
fetched.ProjectId.Should().Be("project-123");
|
||||
fetched.Status.Should().Be(EvaluationStatus.Pending);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByProjectId_ReturnsProjectEvaluations()
|
||||
{
|
||||
// Arrange
|
||||
var run = CreateRun("project-abc");
|
||||
await _repository.CreateAsync(run);
|
||||
|
||||
// Act
|
||||
var runs = await _repository.GetByProjectIdAsync(_tenantId, "project-abc");
|
||||
|
||||
// Assert
|
||||
runs.Should().HaveCount(1);
|
||||
runs[0].ProjectId.Should().Be("project-abc");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByArtifactId_ReturnsArtifactEvaluations()
|
||||
{
|
||||
// Arrange
|
||||
var artifactId = "registry.example.com/app:v2.0";
|
||||
var run = new EvaluationRunEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
ArtifactId = artifactId,
|
||||
Status = EvaluationStatus.Pending
|
||||
};
|
||||
await _repository.CreateAsync(run);
|
||||
|
||||
// Act
|
||||
var runs = await _repository.GetByArtifactIdAsync(_tenantId, artifactId);
|
||||
|
||||
// Assert
|
||||
runs.Should().HaveCount(1);
|
||||
runs[0].ArtifactId.Should().Be(artifactId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByStatus_ReturnsRunsWithStatus()
|
||||
{
|
||||
// Arrange
|
||||
var pendingRun = CreateRun("project-1");
|
||||
var completedRun = new EvaluationRunEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
ProjectId = "project-2",
|
||||
Status = EvaluationStatus.Completed,
|
||||
Result = EvaluationResult.Pass
|
||||
};
|
||||
await _repository.CreateAsync(pendingRun);
|
||||
await _repository.CreateAsync(completedRun);
|
||||
|
||||
// Act
|
||||
var pendingRuns = await _repository.GetByStatusAsync(_tenantId, EvaluationStatus.Pending);
|
||||
|
||||
// Assert
|
||||
pendingRuns.Should().HaveCount(1);
|
||||
pendingRuns[0].ProjectId.Should().Be("project-1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetRecent_ReturnsRecentEvaluations()
|
||||
{
|
||||
// Arrange
|
||||
await _repository.CreateAsync(CreateRun("project-1"));
|
||||
await _repository.CreateAsync(CreateRun("project-2"));
|
||||
|
||||
// Act
|
||||
var recentRuns = await _repository.GetRecentAsync(_tenantId, limit: 10);
|
||||
|
||||
// Assert
|
||||
recentRuns.Should().HaveCount(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MarkStarted_UpdatesStatusAndStartedAt()
|
||||
{
|
||||
// Arrange
|
||||
var run = CreateRun("project-start");
|
||||
await _repository.CreateAsync(run);
|
||||
|
||||
// Act
|
||||
var result = await _repository.MarkStartedAsync(_tenantId, run.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, run.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.Status.Should().Be(EvaluationStatus.Running);
|
||||
fetched.StartedAt.Should().NotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MarkCompleted_UpdatesAllCompletionFields()
|
||||
{
|
||||
// Arrange
|
||||
var run = CreateRun("project-complete");
|
||||
await _repository.CreateAsync(run);
|
||||
await _repository.MarkStartedAsync(_tenantId, run.Id);
|
||||
|
||||
// Act
|
||||
var result = await _repository.MarkCompletedAsync(
|
||||
_tenantId,
|
||||
run.Id,
|
||||
EvaluationResult.Fail,
|
||||
score: 65.5m,
|
||||
findingsCount: 10,
|
||||
criticalCount: 2,
|
||||
highCount: 3,
|
||||
mediumCount: 4,
|
||||
lowCount: 1,
|
||||
durationMs: 1500);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, run.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.Status.Should().Be(EvaluationStatus.Completed);
|
||||
fetched.Result.Should().Be(EvaluationResult.Fail);
|
||||
fetched.Score.Should().Be(65.5m);
|
||||
fetched.FindingsCount.Should().Be(10);
|
||||
fetched.CriticalCount.Should().Be(2);
|
||||
fetched.DurationMs.Should().Be(1500);
|
||||
fetched.CompletedAt.Should().NotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MarkFailed_SetsErrorMessage()
|
||||
{
|
||||
// Arrange
|
||||
var run = CreateRun("project-fail");
|
||||
await _repository.CreateAsync(run);
|
||||
|
||||
// Act
|
||||
var result = await _repository.MarkFailedAsync(_tenantId, run.Id, "Policy engine timeout");
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, run.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.Status.Should().Be(EvaluationStatus.Failed);
|
||||
fetched.Result.Should().Be(EvaluationResult.Error);
|
||||
fetched.ErrorMessage.Should().Be("Policy engine timeout");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetStats_ReturnsCorrectStatistics()
|
||||
{
|
||||
// Arrange
|
||||
var passedRun = new EvaluationRunEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Status = EvaluationStatus.Completed,
|
||||
Result = EvaluationResult.Pass,
|
||||
Score = 100,
|
||||
FindingsCount = 0,
|
||||
CriticalCount = 0,
|
||||
HighCount = 0
|
||||
};
|
||||
var failedRun = new EvaluationRunEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Status = EvaluationStatus.Completed,
|
||||
Result = EvaluationResult.Fail,
|
||||
Score = 50,
|
||||
FindingsCount = 5,
|
||||
CriticalCount = 1,
|
||||
HighCount = 2
|
||||
};
|
||||
await _repository.CreateAsync(passedRun);
|
||||
await _repository.CreateAsync(failedRun);
|
||||
|
||||
var from = DateTimeOffset.UtcNow.AddHours(-1);
|
||||
var to = DateTimeOffset.UtcNow.AddHours(1);
|
||||
|
||||
// Act
|
||||
var stats = await _repository.GetStatsAsync(_tenantId, from, to);
|
||||
|
||||
// Assert
|
||||
stats.Total.Should().Be(2);
|
||||
stats.Passed.Should().Be(1);
|
||||
stats.Failed.Should().Be(1);
|
||||
stats.TotalFindings.Should().Be(5);
|
||||
stats.CriticalFindings.Should().Be(1);
|
||||
stats.HighFindings.Should().Be(2);
|
||||
}
|
||||
|
||||
private EvaluationRunEntity CreateRun(string projectId) => new()
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
ProjectId = projectId,
|
||||
Status = EvaluationStatus.Pending
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,278 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Policy.Storage.Postgres.Models;
|
||||
using StellaOps.Policy.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Policy.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(PolicyPostgresCollection.Name)]
|
||||
public sealed class ExceptionRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly PolicyPostgresFixture _fixture;
|
||||
private readonly ExceptionRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public ExceptionRepositoryTests(PolicyPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new PolicyDataSource(Options.Create(options), NullLogger<PolicyDataSource>.Instance);
|
||||
_repository = new ExceptionRepository(dataSource, NullLogger<ExceptionRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task CreateAndGetById_RoundTripsException()
|
||||
{
|
||||
// Arrange
|
||||
var exception = new ExceptionEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "legacy-root-container",
|
||||
Description = "Allow root containers for legacy app",
|
||||
RulePattern = "no-root-containers",
|
||||
ProjectId = "project-legacy",
|
||||
Reason = "Legacy application requires root access",
|
||||
Status = ExceptionStatus.Active,
|
||||
ExpiresAt = DateTimeOffset.UtcNow.AddDays(30)
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(exception);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, exception.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(exception.Id);
|
||||
fetched.Name.Should().Be("legacy-root-container");
|
||||
fetched.Status.Should().Be(ExceptionStatus.Active);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByName_ReturnsCorrectException()
|
||||
{
|
||||
// Arrange
|
||||
var exception = CreateException("temp-waiver");
|
||||
await _repository.CreateAsync(exception);
|
||||
|
||||
// Act
|
||||
var fetched = await _repository.GetByNameAsync(_tenantId, "temp-waiver");
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(exception.Id);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetAll_ReturnsAllExceptionsForTenant()
|
||||
{
|
||||
// Arrange
|
||||
var exception1 = CreateException("exception1");
|
||||
var exception2 = CreateException("exception2");
|
||||
await _repository.CreateAsync(exception1);
|
||||
await _repository.CreateAsync(exception2);
|
||||
|
||||
// Act
|
||||
var exceptions = await _repository.GetAllAsync(_tenantId);
|
||||
|
||||
// Assert
|
||||
exceptions.Should().HaveCount(2);
|
||||
exceptions.Select(e => e.Name).Should().Contain(["exception1", "exception2"]);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetAll_FiltersByStatus()
|
||||
{
|
||||
// Arrange
|
||||
var activeException = CreateException("active");
|
||||
var revokedException = new ExceptionEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "revoked",
|
||||
Reason = "Test",
|
||||
Status = ExceptionStatus.Revoked
|
||||
};
|
||||
await _repository.CreateAsync(activeException);
|
||||
await _repository.CreateAsync(revokedException);
|
||||
|
||||
// Act
|
||||
var activeExceptions = await _repository.GetAllAsync(_tenantId, status: ExceptionStatus.Active);
|
||||
|
||||
// Assert
|
||||
activeExceptions.Should().HaveCount(1);
|
||||
activeExceptions[0].Name.Should().Be("active");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetActiveForProject_ReturnsProjectExceptions()
|
||||
{
|
||||
// Arrange
|
||||
var projectException = new ExceptionEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "project-exception",
|
||||
ProjectId = "project-123",
|
||||
Reason = "Project-specific waiver",
|
||||
Status = ExceptionStatus.Active
|
||||
};
|
||||
var otherProjectException = new ExceptionEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "other-exception",
|
||||
ProjectId = "project-456",
|
||||
Reason = "Other project waiver",
|
||||
Status = ExceptionStatus.Active
|
||||
};
|
||||
await _repository.CreateAsync(projectException);
|
||||
await _repository.CreateAsync(otherProjectException);
|
||||
|
||||
// Act
|
||||
var exceptions = await _repository.GetActiveForProjectAsync(_tenantId, "project-123");
|
||||
|
||||
// Assert
|
||||
exceptions.Should().HaveCount(1);
|
||||
exceptions[0].Name.Should().Be("project-exception");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetActiveForRule_ReturnsRuleExceptions()
|
||||
{
|
||||
// Arrange
|
||||
var ruleException = new ExceptionEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "rule-exception",
|
||||
RulePattern = "no-root-containers",
|
||||
Reason = "Rule-specific waiver",
|
||||
Status = ExceptionStatus.Active
|
||||
};
|
||||
await _repository.CreateAsync(ruleException);
|
||||
|
||||
// Act
|
||||
var exceptions = await _repository.GetActiveForRuleAsync(_tenantId, "no-root-containers");
|
||||
|
||||
// Assert
|
||||
exceptions.Should().HaveCount(1);
|
||||
exceptions[0].Name.Should().Be("rule-exception");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Update_ModifiesException()
|
||||
{
|
||||
// Arrange
|
||||
var exception = CreateException("update-test");
|
||||
await _repository.CreateAsync(exception);
|
||||
|
||||
// Act
|
||||
var updated = new ExceptionEntity
|
||||
{
|
||||
Id = exception.Id,
|
||||
TenantId = _tenantId,
|
||||
Name = "update-test",
|
||||
Reason = "Updated reason",
|
||||
Description = "Updated description"
|
||||
};
|
||||
var result = await _repository.UpdateAsync(updated);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, exception.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.Reason.Should().Be("Updated reason");
|
||||
fetched.Description.Should().Be("Updated description");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Approve_SetsApprovalDetails()
|
||||
{
|
||||
// Arrange
|
||||
var exception = CreateException("approve-test");
|
||||
await _repository.CreateAsync(exception);
|
||||
|
||||
// Act
|
||||
var result = await _repository.ApproveAsync(_tenantId, exception.Id, "admin@example.com");
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, exception.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.ApprovedBy.Should().Be("admin@example.com");
|
||||
fetched.ApprovedAt.Should().NotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Revoke_SetsRevokedStatusAndDetails()
|
||||
{
|
||||
// Arrange
|
||||
var exception = CreateException("revoke-test");
|
||||
await _repository.CreateAsync(exception);
|
||||
|
||||
// Act
|
||||
var result = await _repository.RevokeAsync(_tenantId, exception.Id, "admin@example.com");
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, exception.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.Status.Should().Be(ExceptionStatus.Revoked);
|
||||
fetched.RevokedBy.Should().Be("admin@example.com");
|
||||
fetched.RevokedAt.Should().NotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Expire_ExpiresOldExceptions()
|
||||
{
|
||||
// Arrange - Create an exception that expires in the past
|
||||
var expiredException = new ExceptionEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "expired",
|
||||
Reason = "Test",
|
||||
Status = ExceptionStatus.Active,
|
||||
ExpiresAt = DateTimeOffset.UtcNow.AddDays(-1)
|
||||
};
|
||||
await _repository.CreateAsync(expiredException);
|
||||
|
||||
// Act
|
||||
var count = await _repository.ExpireAsync(_tenantId);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, expiredException.Id);
|
||||
|
||||
// Assert
|
||||
count.Should().Be(1);
|
||||
fetched!.Status.Should().Be(ExceptionStatus.Expired);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_RemovesException()
|
||||
{
|
||||
// Arrange
|
||||
var exception = CreateException("delete-test");
|
||||
await _repository.CreateAsync(exception);
|
||||
|
||||
// Act
|
||||
var result = await _repository.DeleteAsync(_tenantId, exception.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, exception.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched.Should().BeNull();
|
||||
}
|
||||
|
||||
private ExceptionEntity CreateException(string name) => new()
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = name,
|
||||
Reason = "Test exception",
|
||||
Status = ExceptionStatus.Active
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,213 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Policy.Storage.Postgres.Models;
|
||||
using StellaOps.Policy.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Policy.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(PolicyPostgresCollection.Name)]
|
||||
public sealed class PackRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly PolicyPostgresFixture _fixture;
|
||||
private readonly PackRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public PackRepositoryTests(PolicyPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new PolicyDataSource(Options.Create(options), NullLogger<PolicyDataSource>.Instance);
|
||||
_repository = new PackRepository(dataSource, NullLogger<PackRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task CreateAndGetById_RoundTripsPack()
|
||||
{
|
||||
// Arrange
|
||||
var pack = new PackEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "security-baseline",
|
||||
DisplayName = "Security Baseline Pack",
|
||||
Description = "Core security policy rules",
|
||||
IsBuiltin = false
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(pack);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, pack.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(pack.Id);
|
||||
fetched.Name.Should().Be("security-baseline");
|
||||
fetched.DisplayName.Should().Be("Security Baseline Pack");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByName_ReturnsCorrectPack()
|
||||
{
|
||||
// Arrange
|
||||
var pack = CreatePack("compliance-pack");
|
||||
await _repository.CreateAsync(pack);
|
||||
|
||||
// Act
|
||||
var fetched = await _repository.GetByNameAsync(_tenantId, "compliance-pack");
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(pack.Id);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetAll_ReturnsAllPacksForTenant()
|
||||
{
|
||||
// Arrange
|
||||
var pack1 = CreatePack("pack1");
|
||||
var pack2 = CreatePack("pack2");
|
||||
await _repository.CreateAsync(pack1);
|
||||
await _repository.CreateAsync(pack2);
|
||||
|
||||
// Act
|
||||
var packs = await _repository.GetAllAsync(_tenantId);
|
||||
|
||||
// Assert
|
||||
packs.Should().HaveCount(2);
|
||||
packs.Select(p => p.Name).Should().Contain(["pack1", "pack2"]);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetAll_ExcludesDeprecated()
|
||||
{
|
||||
// Arrange
|
||||
var activePack = CreatePack("active");
|
||||
var deprecatedPack = new PackEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "deprecated",
|
||||
IsDeprecated = true
|
||||
};
|
||||
await _repository.CreateAsync(activePack);
|
||||
await _repository.CreateAsync(deprecatedPack);
|
||||
|
||||
// Act
|
||||
var packs = await _repository.GetAllAsync(_tenantId, includeDeprecated: false);
|
||||
|
||||
// Assert
|
||||
packs.Should().HaveCount(1);
|
||||
packs[0].Name.Should().Be("active");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetBuiltin_ReturnsOnlyBuiltinPacks()
|
||||
{
|
||||
// Arrange
|
||||
var builtinPack = new PackEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "builtin",
|
||||
IsBuiltin = true
|
||||
};
|
||||
var customPack = CreatePack("custom");
|
||||
await _repository.CreateAsync(builtinPack);
|
||||
await _repository.CreateAsync(customPack);
|
||||
|
||||
// Act
|
||||
var builtinPacks = await _repository.GetBuiltinAsync(_tenantId);
|
||||
|
||||
// Assert
|
||||
builtinPacks.Should().HaveCount(1);
|
||||
builtinPacks[0].Name.Should().Be("builtin");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Update_ModifiesPack()
|
||||
{
|
||||
// Arrange
|
||||
var pack = CreatePack("update-test");
|
||||
await _repository.CreateAsync(pack);
|
||||
|
||||
// Act
|
||||
var updated = new PackEntity
|
||||
{
|
||||
Id = pack.Id,
|
||||
TenantId = _tenantId,
|
||||
Name = "update-test",
|
||||
DisplayName = "Updated Display Name",
|
||||
Description = "Updated description"
|
||||
};
|
||||
var result = await _repository.UpdateAsync(updated);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, pack.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.DisplayName.Should().Be("Updated Display Name");
|
||||
fetched.Description.Should().Be("Updated description");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task SetActiveVersion_UpdatesActiveVersion()
|
||||
{
|
||||
// Arrange
|
||||
var pack = CreatePack("version-test");
|
||||
await _repository.CreateAsync(pack);
|
||||
|
||||
// Act
|
||||
var result = await _repository.SetActiveVersionAsync(_tenantId, pack.Id, 2);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, pack.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.ActiveVersion.Should().Be(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Deprecate_MarksParkAsDeprecated()
|
||||
{
|
||||
// Arrange
|
||||
var pack = CreatePack("deprecate-test");
|
||||
await _repository.CreateAsync(pack);
|
||||
|
||||
// Act
|
||||
var result = await _repository.DeprecateAsync(_tenantId, pack.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, pack.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.IsDeprecated.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_RemovesPack()
|
||||
{
|
||||
// Arrange
|
||||
var pack = CreatePack("delete-test");
|
||||
await _repository.CreateAsync(pack);
|
||||
|
||||
// Act
|
||||
var result = await _repository.DeleteAsync(_tenantId, pack.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, pack.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched.Should().BeNull();
|
||||
}
|
||||
|
||||
private PackEntity CreatePack(string name) => new()
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = name,
|
||||
IsBuiltin = false
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,191 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Policy.Storage.Postgres.Models;
|
||||
using StellaOps.Policy.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Policy.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(PolicyPostgresCollection.Name)]
|
||||
public sealed class PolicyAuditRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly PolicyPostgresFixture _fixture;
|
||||
private readonly PolicyAuditRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public PolicyAuditRepositoryTests(PolicyPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new PolicyDataSource(Options.Create(options), NullLogger<PolicyDataSource>.Instance);
|
||||
_repository = new PolicyAuditRepository(dataSource, NullLogger<PolicyAuditRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task Create_ReturnsGeneratedId()
|
||||
{
|
||||
// Arrange
|
||||
var audit = new PolicyAuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
UserId = Guid.NewGuid(),
|
||||
Action = "pack.created",
|
||||
ResourceType = "pack",
|
||||
ResourceId = Guid.NewGuid().ToString()
|
||||
};
|
||||
|
||||
// Act
|
||||
var id = await _repository.CreateAsync(audit);
|
||||
|
||||
// Assert
|
||||
id.Should().BeGreaterThan(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task List_ReturnsAuditEntriesOrderedByCreatedAtDesc()
|
||||
{
|
||||
// Arrange
|
||||
var audit1 = CreateAudit("action1");
|
||||
var audit2 = CreateAudit("action2");
|
||||
await _repository.CreateAsync(audit1);
|
||||
await Task.Delay(10);
|
||||
await _repository.CreateAsync(audit2);
|
||||
|
||||
// Act
|
||||
var audits = await _repository.ListAsync(_tenantId, limit: 10);
|
||||
|
||||
// Assert
|
||||
audits.Should().HaveCount(2);
|
||||
audits[0].Action.Should().Be("action2"); // Most recent first
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByResource_ReturnsResourceAudits()
|
||||
{
|
||||
// Arrange
|
||||
var resourceId = Guid.NewGuid().ToString();
|
||||
var audit = new PolicyAuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
Action = "exception.updated",
|
||||
ResourceType = "exception",
|
||||
ResourceId = resourceId
|
||||
};
|
||||
await _repository.CreateAsync(audit);
|
||||
|
||||
// Act
|
||||
var audits = await _repository.GetByResourceAsync(_tenantId, "exception", resourceId);
|
||||
|
||||
// Assert
|
||||
audits.Should().HaveCount(1);
|
||||
audits[0].ResourceId.Should().Be(resourceId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByResource_WithoutResourceId_ReturnsAllOfType()
|
||||
{
|
||||
// Arrange
|
||||
await _repository.CreateAsync(new PolicyAuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
Action = "pack.created",
|
||||
ResourceType = "pack",
|
||||
ResourceId = Guid.NewGuid().ToString()
|
||||
});
|
||||
await _repository.CreateAsync(new PolicyAuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
Action = "pack.updated",
|
||||
ResourceType = "pack",
|
||||
ResourceId = Guid.NewGuid().ToString()
|
||||
});
|
||||
|
||||
// Act
|
||||
var audits = await _repository.GetByResourceAsync(_tenantId, "pack");
|
||||
|
||||
// Assert
|
||||
audits.Should().HaveCount(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByCorrelationId_ReturnsCorrelatedAudits()
|
||||
{
|
||||
// Arrange
|
||||
var correlationId = Guid.NewGuid().ToString();
|
||||
var audit1 = new PolicyAuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
Action = "evaluation.started",
|
||||
ResourceType = "evaluation",
|
||||
CorrelationId = correlationId
|
||||
};
|
||||
var audit2 = new PolicyAuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
Action = "evaluation.completed",
|
||||
ResourceType = "evaluation",
|
||||
CorrelationId = correlationId
|
||||
};
|
||||
await _repository.CreateAsync(audit1);
|
||||
await _repository.CreateAsync(audit2);
|
||||
|
||||
// Act
|
||||
var audits = await _repository.GetByCorrelationIdAsync(_tenantId, correlationId);
|
||||
|
||||
// Assert
|
||||
audits.Should().HaveCount(2);
|
||||
audits.Should().AllSatisfy(a => a.CorrelationId.Should().Be(correlationId));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Create_StoresJsonbValues()
|
||||
{
|
||||
// Arrange
|
||||
var audit = new PolicyAuditEntity
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
Action = "profile.updated",
|
||||
ResourceType = "risk_profile",
|
||||
OldValue = "{\"threshold\": 7.0}",
|
||||
NewValue = "{\"threshold\": 8.0}"
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(audit);
|
||||
var audits = await _repository.GetByResourceAsync(_tenantId, "risk_profile");
|
||||
|
||||
// Assert
|
||||
audits.Should().HaveCount(1);
|
||||
audits[0].OldValue.Should().Contain("7.0");
|
||||
audits[0].NewValue.Should().Contain("8.0");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task DeleteOld_RemovesOldAudits()
|
||||
{
|
||||
// Arrange
|
||||
await _repository.CreateAsync(CreateAudit("old-action"));
|
||||
|
||||
// Act - Delete audits older than future date
|
||||
var cutoff = DateTimeOffset.UtcNow.AddMinutes(1);
|
||||
var count = await _repository.DeleteOldAsync(cutoff);
|
||||
|
||||
// Assert
|
||||
count.Should().Be(1);
|
||||
}
|
||||
|
||||
private PolicyAuditEntity CreateAudit(string action) => new()
|
||||
{
|
||||
TenantId = _tenantId,
|
||||
UserId = Guid.NewGuid(),
|
||||
Action = action,
|
||||
ResourceType = "test",
|
||||
ResourceId = Guid.NewGuid().ToString()
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,274 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Policy.Storage.Postgres.Models;
|
||||
using StellaOps.Policy.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Policy.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(PolicyPostgresCollection.Name)]
|
||||
public sealed class RiskProfileRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly PolicyPostgresFixture _fixture;
|
||||
private readonly RiskProfileRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public RiskProfileRepositoryTests(PolicyPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new PolicyDataSource(Options.Create(options), NullLogger<PolicyDataSource>.Instance);
|
||||
_repository = new RiskProfileRepository(dataSource, NullLogger<RiskProfileRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task CreateAndGetById_RoundTripsRiskProfile()
|
||||
{
|
||||
// Arrange
|
||||
var profile = new RiskProfileEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "default",
|
||||
DisplayName = "Default Risk Profile",
|
||||
Description = "Standard risk scoring profile",
|
||||
Version = 1,
|
||||
IsActive = true,
|
||||
Thresholds = "{\"critical\": 9.0, \"high\": 7.0}",
|
||||
ScoringWeights = "{\"vulnerability\": 1.0, \"configuration\": 0.5}"
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(profile);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, profile.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(profile.Id);
|
||||
fetched.Name.Should().Be("default");
|
||||
fetched.Version.Should().Be(1);
|
||||
fetched.IsActive.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetActiveByName_ReturnsActiveVersion()
|
||||
{
|
||||
// Arrange
|
||||
var inactiveProfile = new RiskProfileEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "versioned-profile",
|
||||
Version = 1,
|
||||
IsActive = false
|
||||
};
|
||||
var activeProfile = new RiskProfileEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "versioned-profile",
|
||||
Version = 2,
|
||||
IsActive = true
|
||||
};
|
||||
await _repository.CreateAsync(inactiveProfile);
|
||||
await _repository.CreateAsync(activeProfile);
|
||||
|
||||
// Act
|
||||
var fetched = await _repository.GetActiveByNameAsync(_tenantId, "versioned-profile");
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Version.Should().Be(2);
|
||||
fetched.IsActive.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetAll_ReturnsProfilesForTenant()
|
||||
{
|
||||
// Arrange
|
||||
var profile1 = CreateProfile("profile1");
|
||||
var profile2 = CreateProfile("profile2");
|
||||
await _repository.CreateAsync(profile1);
|
||||
await _repository.CreateAsync(profile2);
|
||||
|
||||
// Act
|
||||
var profiles = await _repository.GetAllAsync(_tenantId);
|
||||
|
||||
// Assert
|
||||
profiles.Should().HaveCount(2);
|
||||
profiles.Select(p => p.Name).Should().Contain(["profile1", "profile2"]);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetAll_FiltersActiveOnly()
|
||||
{
|
||||
// Arrange
|
||||
var activeProfile = CreateProfile("active");
|
||||
var inactiveProfile = new RiskProfileEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "inactive",
|
||||
IsActive = false
|
||||
};
|
||||
await _repository.CreateAsync(activeProfile);
|
||||
await _repository.CreateAsync(inactiveProfile);
|
||||
|
||||
// Act
|
||||
var activeProfiles = await _repository.GetAllAsync(_tenantId, activeOnly: true);
|
||||
|
||||
// Assert
|
||||
activeProfiles.Should().HaveCount(1);
|
||||
activeProfiles[0].Name.Should().Be("active");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetVersionsByName_ReturnsAllVersions()
|
||||
{
|
||||
// Arrange
|
||||
var v1 = new RiskProfileEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "multi-version",
|
||||
Version = 1,
|
||||
IsActive = false
|
||||
};
|
||||
var v2 = new RiskProfileEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "multi-version",
|
||||
Version = 2,
|
||||
IsActive = true
|
||||
};
|
||||
await _repository.CreateAsync(v1);
|
||||
await _repository.CreateAsync(v2);
|
||||
|
||||
// Act
|
||||
var versions = await _repository.GetVersionsByNameAsync(_tenantId, "multi-version");
|
||||
|
||||
// Assert
|
||||
versions.Should().HaveCount(2);
|
||||
versions.Select(v => v.Version).Should().Contain([1, 2]);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Update_ModifiesProfile()
|
||||
{
|
||||
// Arrange
|
||||
var profile = CreateProfile("update-test");
|
||||
await _repository.CreateAsync(profile);
|
||||
|
||||
// Act
|
||||
var updated = new RiskProfileEntity
|
||||
{
|
||||
Id = profile.Id,
|
||||
TenantId = _tenantId,
|
||||
Name = "update-test",
|
||||
DisplayName = "Updated Display Name",
|
||||
Description = "Updated description",
|
||||
Thresholds = "{\"critical\": 8.0}"
|
||||
};
|
||||
var result = await _repository.UpdateAsync(updated);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, profile.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.DisplayName.Should().Be("Updated Display Name");
|
||||
fetched.Thresholds.Should().Contain("8.0");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task CreateVersion_CreatesNewVersion()
|
||||
{
|
||||
// Arrange
|
||||
var original = CreateProfile("version-create");
|
||||
await _repository.CreateAsync(original);
|
||||
|
||||
// Act
|
||||
var newVersion = new RiskProfileEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "version-create",
|
||||
DisplayName = "New Version",
|
||||
Version = 2,
|
||||
IsActive = true
|
||||
};
|
||||
var created = await _repository.CreateVersionAsync(_tenantId, "version-create", newVersion);
|
||||
|
||||
// Assert
|
||||
created.Should().NotBeNull();
|
||||
created.Version.Should().Be(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Activate_SetsProfileAsActive()
|
||||
{
|
||||
// Arrange
|
||||
var profile = new RiskProfileEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "activate-test",
|
||||
IsActive = false
|
||||
};
|
||||
await _repository.CreateAsync(profile);
|
||||
|
||||
// Act
|
||||
var result = await _repository.ActivateAsync(_tenantId, profile.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, profile.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.IsActive.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Deactivate_SetsProfileAsInactive()
|
||||
{
|
||||
// Arrange
|
||||
var profile = CreateProfile("deactivate-test");
|
||||
await _repository.CreateAsync(profile);
|
||||
|
||||
// Act
|
||||
var result = await _repository.DeactivateAsync(_tenantId, profile.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, profile.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.IsActive.Should().BeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_RemovesProfile()
|
||||
{
|
||||
// Arrange
|
||||
var profile = CreateProfile("delete-test");
|
||||
await _repository.CreateAsync(profile);
|
||||
|
||||
// Act
|
||||
var result = await _repository.DeleteAsync(_tenantId, profile.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, profile.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched.Should().BeNull();
|
||||
}
|
||||
|
||||
private RiskProfileEntity CreateProfile(string name) => new()
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = name,
|
||||
Version = 1,
|
||||
IsActive = true
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,231 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Policy.Storage.Postgres.Models;
|
||||
using StellaOps.Policy.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Policy.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(PolicyPostgresCollection.Name)]
|
||||
public sealed class RuleRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly PolicyPostgresFixture _fixture;
|
||||
private readonly RuleRepository _repository;
|
||||
private readonly Guid _packVersionId = Guid.NewGuid();
|
||||
|
||||
public RuleRepositoryTests(PolicyPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new PolicyDataSource(Options.Create(options), NullLogger<PolicyDataSource>.Instance);
|
||||
_repository = new RuleRepository(dataSource, NullLogger<RuleRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task CreateAndGetById_RoundTripsRule()
|
||||
{
|
||||
// Arrange
|
||||
var rule = new RuleEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
PackVersionId = _packVersionId,
|
||||
Name = "no-root-containers",
|
||||
Description = "Containers should not run as root",
|
||||
RuleType = RuleType.Rego,
|
||||
Content = "package container\ndefault allow = false",
|
||||
ContentHash = "abc123",
|
||||
Severity = RuleSeverity.High,
|
||||
Category = "security",
|
||||
Tags = ["container", "security"]
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(rule);
|
||||
var fetched = await _repository.GetByIdAsync(rule.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(rule.Id);
|
||||
fetched.Name.Should().Be("no-root-containers");
|
||||
fetched.Severity.Should().Be(RuleSeverity.High);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByName_ReturnsCorrectRule()
|
||||
{
|
||||
// Arrange
|
||||
var rule = CreateRule("required-labels");
|
||||
await _repository.CreateAsync(rule);
|
||||
|
||||
// Act
|
||||
var fetched = await _repository.GetByNameAsync(_packVersionId, "required-labels");
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(rule.Id);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task CreateBatch_CreatesMultipleRules()
|
||||
{
|
||||
// Arrange
|
||||
var rules = new[]
|
||||
{
|
||||
CreateRule("rule1"),
|
||||
CreateRule("rule2"),
|
||||
CreateRule("rule3")
|
||||
};
|
||||
|
||||
// Act
|
||||
var count = await _repository.CreateBatchAsync(rules);
|
||||
|
||||
// Assert
|
||||
count.Should().Be(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByPackVersionId_ReturnsAllRulesForVersion()
|
||||
{
|
||||
// Arrange
|
||||
var rule1 = CreateRule("rule1");
|
||||
var rule2 = CreateRule("rule2");
|
||||
await _repository.CreateAsync(rule1);
|
||||
await _repository.CreateAsync(rule2);
|
||||
|
||||
// Act
|
||||
var rules = await _repository.GetByPackVersionIdAsync(_packVersionId);
|
||||
|
||||
// Assert
|
||||
rules.Should().HaveCount(2);
|
||||
rules.Select(r => r.Name).Should().Contain(["rule1", "rule2"]);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetBySeverity_ReturnsRulesWithSeverity()
|
||||
{
|
||||
// Arrange
|
||||
var criticalRule = new RuleEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
PackVersionId = _packVersionId,
|
||||
Name = "critical-rule",
|
||||
Content = "content",
|
||||
ContentHash = "hash",
|
||||
Severity = RuleSeverity.Critical
|
||||
};
|
||||
var lowRule = new RuleEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
PackVersionId = _packVersionId,
|
||||
Name = "low-rule",
|
||||
Content = "content",
|
||||
ContentHash = "hash2",
|
||||
Severity = RuleSeverity.Low
|
||||
};
|
||||
await _repository.CreateAsync(criticalRule);
|
||||
await _repository.CreateAsync(lowRule);
|
||||
|
||||
// Act
|
||||
var criticalRules = await _repository.GetBySeverityAsync(_packVersionId, RuleSeverity.Critical);
|
||||
|
||||
// Assert
|
||||
criticalRules.Should().HaveCount(1);
|
||||
criticalRules[0].Name.Should().Be("critical-rule");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByCategory_ReturnsRulesInCategory()
|
||||
{
|
||||
// Arrange
|
||||
var securityRule = new RuleEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
PackVersionId = _packVersionId,
|
||||
Name = "security-rule",
|
||||
Content = "content",
|
||||
ContentHash = "hash",
|
||||
Category = "security"
|
||||
};
|
||||
var complianceRule = new RuleEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
PackVersionId = _packVersionId,
|
||||
Name = "compliance-rule",
|
||||
Content = "content",
|
||||
ContentHash = "hash2",
|
||||
Category = "compliance"
|
||||
};
|
||||
await _repository.CreateAsync(securityRule);
|
||||
await _repository.CreateAsync(complianceRule);
|
||||
|
||||
// Act
|
||||
var securityRules = await _repository.GetByCategoryAsync(_packVersionId, "security");
|
||||
|
||||
// Assert
|
||||
securityRules.Should().HaveCount(1);
|
||||
securityRules[0].Name.Should().Be("security-rule");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByTag_ReturnsRulesWithTag()
|
||||
{
|
||||
// Arrange
|
||||
var containerRule = new RuleEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
PackVersionId = _packVersionId,
|
||||
Name = "container-rule",
|
||||
Content = "content",
|
||||
ContentHash = "hash",
|
||||
Tags = ["container", "docker"]
|
||||
};
|
||||
var networkRule = new RuleEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
PackVersionId = _packVersionId,
|
||||
Name = "network-rule",
|
||||
Content = "content",
|
||||
ContentHash = "hash2",
|
||||
Tags = ["network"]
|
||||
};
|
||||
await _repository.CreateAsync(containerRule);
|
||||
await _repository.CreateAsync(networkRule);
|
||||
|
||||
// Act
|
||||
var containerRules = await _repository.GetByTagAsync(_packVersionId, "container");
|
||||
|
||||
// Assert
|
||||
containerRules.Should().HaveCount(1);
|
||||
containerRules[0].Name.Should().Be("container-rule");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task CountByPackVersionId_ReturnsCorrectCount()
|
||||
{
|
||||
// Arrange
|
||||
await _repository.CreateAsync(CreateRule("rule1"));
|
||||
await _repository.CreateAsync(CreateRule("rule2"));
|
||||
await _repository.CreateAsync(CreateRule("rule3"));
|
||||
|
||||
// Act
|
||||
var count = await _repository.CountByPackVersionIdAsync(_packVersionId);
|
||||
|
||||
// Assert
|
||||
count.Should().Be(3);
|
||||
}
|
||||
|
||||
private RuleEntity CreateRule(string name) => new()
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
PackVersionId = _packVersionId,
|
||||
Name = name,
|
||||
Content = "package test",
|
||||
ContentHash = Guid.NewGuid().ToString()
|
||||
};
|
||||
}
|
||||
@@ -28,6 +28,11 @@ public static class ServiceCollectionExtensions
|
||||
|
||||
// Register repositories
|
||||
services.AddScoped<IJobRepository, JobRepository>();
|
||||
services.AddScoped<ITriggerRepository, TriggerRepository>();
|
||||
services.AddScoped<IWorkerRepository, WorkerRepository>();
|
||||
services.AddScoped<IDistributedLockRepository, DistributedLockRepository>();
|
||||
services.AddScoped<IJobHistoryRepository, JobHistoryRepository>();
|
||||
services.AddScoped<IMetricsRepository, MetricsRepository>();
|
||||
|
||||
return services;
|
||||
}
|
||||
@@ -47,6 +52,11 @@ public static class ServiceCollectionExtensions
|
||||
|
||||
// Register repositories
|
||||
services.AddScoped<IJobRepository, JobRepository>();
|
||||
services.AddScoped<ITriggerRepository, TriggerRepository>();
|
||||
services.AddScoped<IWorkerRepository, WorkerRepository>();
|
||||
services.AddScoped<IDistributedLockRepository, DistributedLockRepository>();
|
||||
services.AddScoped<IJobHistoryRepository, JobHistoryRepository>();
|
||||
services.AddScoped<IMetricsRepository, MetricsRepository>();
|
||||
|
||||
return services;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,129 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Scheduler.Storage.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Scheduler.Storage.Postgres.Tests;
|
||||
|
||||
[Collection(SchedulerPostgresCollection.Name)]
|
||||
public sealed class DistributedLockRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly SchedulerPostgresFixture _fixture;
|
||||
private readonly DistributedLockRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public DistributedLockRepositoryTests(SchedulerPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new SchedulerDataSource(Options.Create(options), NullLogger<SchedulerDataSource>.Instance);
|
||||
_repository = new DistributedLockRepository(dataSource, NullLogger<DistributedLockRepository>.Instance);
|
||||
}
|
||||
|
||||
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task TryAcquire_SucceedsOnFirstAttempt()
|
||||
{
|
||||
// Arrange
|
||||
var lockKey = $"test-lock-{Guid.NewGuid()}";
|
||||
|
||||
// Act
|
||||
var acquired = await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-1", TimeSpan.FromMinutes(5));
|
||||
|
||||
// Assert
|
||||
acquired.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task TryAcquire_FailsWhenAlreadyHeld()
|
||||
{
|
||||
// Arrange
|
||||
var lockKey = $"contended-lock-{Guid.NewGuid()}";
|
||||
await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-1", TimeSpan.FromMinutes(5));
|
||||
|
||||
// Act
|
||||
var secondAcquire = await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-2", TimeSpan.FromMinutes(5));
|
||||
|
||||
// Assert
|
||||
secondAcquire.Should().BeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Release_AllowsReacquisition()
|
||||
{
|
||||
// Arrange
|
||||
var lockKey = $"release-test-{Guid.NewGuid()}";
|
||||
await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-1", TimeSpan.FromMinutes(5));
|
||||
|
||||
// Act
|
||||
await _repository.ReleaseAsync(lockKey, "worker-1");
|
||||
var reacquired = await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-2", TimeSpan.FromMinutes(5));
|
||||
|
||||
// Assert
|
||||
reacquired.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Extend_ExtendsLockDuration()
|
||||
{
|
||||
// Arrange
|
||||
var lockKey = $"extend-test-{Guid.NewGuid()}";
|
||||
await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-1", TimeSpan.FromMinutes(1));
|
||||
|
||||
// Act
|
||||
var extended = await _repository.ExtendAsync(lockKey, "worker-1", TimeSpan.FromMinutes(10));
|
||||
|
||||
// Assert
|
||||
extended.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Extend_FailsForDifferentHolder()
|
||||
{
|
||||
// Arrange
|
||||
var lockKey = $"extend-fail-{Guid.NewGuid()}";
|
||||
await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-1", TimeSpan.FromMinutes(5));
|
||||
|
||||
// Act
|
||||
var extended = await _repository.ExtendAsync(lockKey, "worker-2", TimeSpan.FromMinutes(10));
|
||||
|
||||
// Assert
|
||||
extended.Should().BeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Get_ReturnsLockInfo()
|
||||
{
|
||||
// Arrange
|
||||
var lockKey = $"get-test-{Guid.NewGuid()}";
|
||||
await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-1", TimeSpan.FromMinutes(5));
|
||||
|
||||
// Act
|
||||
var lockInfo = await _repository.GetAsync(lockKey);
|
||||
|
||||
// Assert
|
||||
lockInfo.Should().NotBeNull();
|
||||
lockInfo!.HolderId.Should().Be("worker-1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ListByTenant_ReturnsTenantsLocks()
|
||||
{
|
||||
// Arrange
|
||||
var lockKey1 = $"tenant-lock-1-{Guid.NewGuid()}";
|
||||
var lockKey2 = $"tenant-lock-2-{Guid.NewGuid()}";
|
||||
await _repository.TryAcquireAsync(_tenantId, lockKey1, "worker-1", TimeSpan.FromMinutes(5));
|
||||
await _repository.TryAcquireAsync(_tenantId, lockKey2, "worker-1", TimeSpan.FromMinutes(5));
|
||||
|
||||
// Act
|
||||
var locks = await _repository.ListByTenantAsync(_tenantId);
|
||||
|
||||
// Assert
|
||||
locks.Should().HaveCount(2);
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user