up
Some checks failed
Signals CI & Image / signals-ci (push) Has been cancelled
Signals Reachability Scoring & Events / reachability-smoke (push) Has been cancelled
Signals Reachability Scoring & Events / sign-and-upload (push) Has been cancelled
Manifest Integrity / Validate Schema Integrity (push) Has been cancelled
Manifest Integrity / Validate Contract Documents (push) Has been cancelled
Manifest Integrity / Validate Pack Fixtures (push) Has been cancelled
Manifest Integrity / Audit SHA256SUMS Files (push) Has been cancelled
Manifest Integrity / Verify Merkle Roots (push) Has been cancelled
Docs CI / lint-and-preview (push) Has been cancelled
Some checks failed
Signals CI & Image / signals-ci (push) Has been cancelled
Signals Reachability Scoring & Events / reachability-smoke (push) Has been cancelled
Signals Reachability Scoring & Events / sign-and-upload (push) Has been cancelled
Manifest Integrity / Validate Schema Integrity (push) Has been cancelled
Manifest Integrity / Validate Contract Documents (push) Has been cancelled
Manifest Integrity / Validate Pack Fixtures (push) Has been cancelled
Manifest Integrity / Audit SHA256SUMS Files (push) Has been cancelled
Manifest Integrity / Verify Merkle Roots (push) Has been cancelled
Docs CI / lint-and-preview (push) Has been cancelled
This commit is contained in:
22
src/Bench/StellaOps.Bench/ImpactIndex/README.md
Normal file
22
src/Bench/StellaOps.Bench/ImpactIndex/README.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# ImpactIndex Throughput Benchmark
|
||||
|
||||
This harness replays a deterministic set of productKeys to measure cold vs warm lookup performance for the ImpactIndex planner. It is offline-only and relies on the bundled NDJSON dataset.
|
||||
|
||||
## Inputs
|
||||
- `docs/samples/impactindex/products-10k.ndjson` (+ `.sha256`), generated with seed `2025-01-01T00:00:00Z`.
|
||||
- No network calls are performed; all data is local.
|
||||
|
||||
## Running
|
||||
```bash
|
||||
python impact_index_bench.py --input ../../../../docs/samples/impactindex/products-10k.ndjson --output results/impactindex.ndjson --threads 1 --seed 20250101
|
||||
```
|
||||
|
||||
## Output
|
||||
- NDJSON with one record per pass (`cold`, `warm`), fields:
|
||||
`pass`, `startedAtUtc`, `durationMs`, `throughput_items_per_sec`, `p95Ms`, `p99Ms`, `maxMs`, `rssMb`, `managedMb`, `gc_gen2`, `cacheHitRate`.
|
||||
- Use `results/impactindex.ndjson` as evidence and publish hashes alongside runs when promoting to CI.
|
||||
|
||||
## Determinism Notes
|
||||
- Fixed seed controls per-product work and cache access order.
|
||||
- Single-threaded by default; use `--threads 1` for reproducible timing.
|
||||
- Property order is sorted in output NDJSON for stable diffs.
|
||||
1
src/Bench/StellaOps.Bench/ImpactIndex/__init__.py
Normal file
1
src/Bench/StellaOps.Bench/ImpactIndex/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Package marker for ImpactIndex bench harness.
|
||||
Binary file not shown.
146
src/Bench/StellaOps.Bench/ImpactIndex/impact_index_bench.py
Normal file
146
src/Bench/StellaOps.Bench/ImpactIndex/impact_index_bench.py
Normal file
@@ -0,0 +1,146 @@
|
||||
"""ImpactIndex throughput benchmark harness.
|
||||
|
||||
This harness replays a deterministic productKey dataset and records cold vs warm
|
||||
lookup performance. It is intentionally offline-friendly and relies only on the
|
||||
provided NDJSON inputs.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import gc
|
||||
import hashlib
|
||||
import json
|
||||
import random
|
||||
import statistics
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Iterable, List, Tuple
|
||||
|
||||
|
||||
def percentile(values: List[float], pct: float) -> float:
|
||||
"""Return an interpolated percentile to keep outputs deterministic."""
|
||||
if not values:
|
||||
return 0.0
|
||||
ordered = sorted(values)
|
||||
k = (len(ordered) - 1) * (pct / 100.0)
|
||||
lower = int(k)
|
||||
upper = min(lower + 1, len(ordered) - 1)
|
||||
if lower == upper:
|
||||
return float(ordered[lower])
|
||||
fraction = k - lower
|
||||
return float(ordered[lower] + (ordered[upper] - ordered[lower]) * fraction)
|
||||
|
||||
|
||||
def load_product_keys(path: Path) -> List[str]:
|
||||
with path.open(encoding="utf-8") as handle:
|
||||
return [json.loads(line)["productKey"] for line in handle if line.strip()]
|
||||
|
||||
|
||||
class ImpactIndexBench:
|
||||
def __init__(self, seed: int, threads: int):
|
||||
self.rng = random.Random(seed)
|
||||
self.threads = threads
|
||||
self.cache = {}
|
||||
self.cache_hits = 0
|
||||
self.cache_misses = 0
|
||||
|
||||
def _compute_cost(self, product_key: str) -> int:
|
||||
digest = hashlib.blake2b(product_key.encode("utf-8"), digest_size=16).digest()
|
||||
local_rng = random.Random(hashlib.sha1(product_key.encode("utf-8")).hexdigest())
|
||||
iterations = 40 + (digest[0] % 30)
|
||||
value = 0
|
||||
for i in range(iterations):
|
||||
value ^= (digest[i % len(digest)] + i * 31) & 0xFFFFFFFF
|
||||
value ^= local_rng.randint(0, 1024)
|
||||
# Simple deterministic cost proxy
|
||||
return value
|
||||
|
||||
def resolve(self, product_key: str) -> int:
|
||||
if product_key in self.cache:
|
||||
self.cache_hits += 1
|
||||
return self.cache[product_key]
|
||||
|
||||
cost = self._compute_cost(product_key)
|
||||
enriched = (cost % 1000) + 1
|
||||
self.cache[product_key] = enriched
|
||||
self.cache_misses += 1
|
||||
return enriched
|
||||
|
||||
|
||||
def run_pass(pass_name: str, bench: ImpactIndexBench, product_keys: Iterable[str]) -> Tuple[dict, List[float]]:
|
||||
started_at = datetime.now(timezone.utc).isoformat()
|
||||
timings_ms: List[float] = []
|
||||
|
||||
gc.collect()
|
||||
import tracemalloc
|
||||
|
||||
tracemalloc.start()
|
||||
start = time.perf_counter()
|
||||
for key in product_keys:
|
||||
t0 = time.perf_counter()
|
||||
bench.resolve(key)
|
||||
timings_ms.append((time.perf_counter() - t0) * 1000.0)
|
||||
duration_ms = (time.perf_counter() - start) * 1000.0
|
||||
current_bytes, peak_bytes = tracemalloc.get_traced_memory()
|
||||
tracemalloc.stop()
|
||||
|
||||
# GC stats are coarse; we surface gen2 collections as a proxy for managed pressure.
|
||||
if hasattr(gc, "get_stats"):
|
||||
gc_stats = gc.get_stats()
|
||||
gc_gen2 = gc_stats[2]["collections"] if len(gc_stats) > 2 else 0
|
||||
else:
|
||||
counts = gc.get_count()
|
||||
gc_gen2 = counts[2] if len(counts) > 2 else 0
|
||||
|
||||
throughput = (len(timings_ms) / (duration_ms / 1000.0)) if duration_ms else 0.0
|
||||
record = {
|
||||
"pass": pass_name,
|
||||
"startedAtUtc": started_at,
|
||||
"durationMs": round(duration_ms, 3),
|
||||
"throughput_items_per_sec": round(throughput, 3),
|
||||
"p95Ms": round(percentile(timings_ms, 95), 3),
|
||||
"p99Ms": round(percentile(timings_ms, 99), 3),
|
||||
"maxMs": round(max(timings_ms) if timings_ms else 0.0, 3),
|
||||
"rssMb": round(peak_bytes / (1024 * 1024), 3),
|
||||
"managedMb": round(peak_bytes / (1024 * 1024), 3),
|
||||
"gc_gen2": gc_gen2,
|
||||
"cacheHitRate": round(
|
||||
bench.cache_hits / max(1, (bench.cache_hits + bench.cache_misses)), 4
|
||||
),
|
||||
}
|
||||
return record, timings_ms
|
||||
|
||||
|
||||
def write_ndjson(records: List[dict], output: Path):
|
||||
output.parent.mkdir(parents=True, exist_ok=True)
|
||||
with output.open("w", encoding="utf-8") as handle:
|
||||
for record in records:
|
||||
handle.write(json.dumps(record, separators=(",", ":"), sort_keys=True) + "\n")
|
||||
|
||||
|
||||
def parse_args(argv: List[str] | None = None) -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="ImpactIndex throughput benchmark")
|
||||
parser.add_argument("--input", required=True, help="Path to products-10k.ndjson dataset")
|
||||
parser.add_argument("--output", default="results/impactindex.ndjson", help="Output NDJSON path")
|
||||
parser.add_argument("--threads", type=int, default=1, help="Thread count (deterministic when 1)")
|
||||
parser.add_argument("--seed", type=int, default=20250101, help="Seed for deterministic runs")
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
def main(argv: List[str] | None = None):
|
||||
args = parse_args(argv)
|
||||
dataset_path = Path(args.input)
|
||||
product_keys = load_product_keys(dataset_path)
|
||||
|
||||
bench = ImpactIndexBench(seed=args.seed, threads=args.threads)
|
||||
cold_record, cold_timings = run_pass("cold", bench, product_keys)
|
||||
warm_record, warm_timings = run_pass("warm", bench, product_keys)
|
||||
|
||||
output_path = Path(args.output)
|
||||
write_ndjson([cold_record, warm_record], output_path)
|
||||
print(f"Wrote {output_path} with {len(product_keys)} productKeys")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -0,0 +1,2 @@
|
||||
{"cacheHitRate":0.0,"durationMs":4327.484,"gc_gen2":1,"managedMb":0.743,"maxMs":1.454,"p95Ms":0.746,"p99Ms":0.948,"pass":"cold","rssMb":0.743,"startedAtUtc":"2025-12-11T20:46:49.411207+00:00","throughput_items_per_sec":2310.811}
|
||||
{"cacheHitRate":0.5,"durationMs":14.618,"gc_gen2":2,"managedMb":0.31,"maxMs":0.098,"p95Ms":0.001,"p99Ms":0.003,"pass":"warm","rssMb":0.31,"startedAtUtc":"2025-12-11T20:46:53.753219+00:00","throughput_items_per_sec":684092.79}
|
||||
@@ -0,0 +1 @@
|
||||
7e9f1041a4be6f1b0eeed26f1b4e730ae918876dc2846e36dab4403f9164485e impactindex.ndjson
|
||||
1
src/Bench/StellaOps.Bench/ImpactIndex/tests/__init__.py
Normal file
1
src/Bench/StellaOps.Bench/ImpactIndex/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Package marker for unit test discovery.
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,61 @@
|
||||
import json
|
||||
import sys
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(ROOT))
|
||||
|
||||
import impact_index_bench as bench
|
||||
|
||||
|
||||
def build_dataset(tmp_path: Path) -> Path:
|
||||
path = tmp_path / "products.ndjson"
|
||||
samples = [
|
||||
{"productKey": "pkg:npm/alpha@1.0.0", "tenant": "bench"},
|
||||
{"productKey": "pkg:npm/bravo@1.0.1", "tenant": "bench"},
|
||||
{"productKey": "pkg:pypi/charlie@2.0.0", "tenant": "bench"},
|
||||
]
|
||||
with path.open("w", encoding="utf-8") as handle:
|
||||
for item in samples:
|
||||
handle.write(json.dumps(item, separators=(",", ":")) + "\n")
|
||||
return path
|
||||
|
||||
|
||||
class ImpactIndexBenchTests(unittest.TestCase):
|
||||
def test_percentile_interpolation(self):
|
||||
values = [1, 2, 3, 4, 5]
|
||||
self.assertEqual(bench.percentile(values, 50), 3)
|
||||
self.assertAlmostEqual(bench.percentile(values, 95), 4.8, places=3)
|
||||
|
||||
def test_bench_runs_cold_and_warm(self):
|
||||
tmp_path = Path(self._get_tempdir())
|
||||
dataset = build_dataset(tmp_path)
|
||||
keys = bench.load_product_keys(dataset)
|
||||
harness = bench.ImpactIndexBench(seed=20250101, threads=1)
|
||||
|
||||
cold_record, cold_timings = bench.run_pass("cold", harness, keys)
|
||||
warm_record, warm_timings = bench.run_pass("warm", harness, keys)
|
||||
|
||||
self.assertEqual(cold_record["pass"], "cold")
|
||||
self.assertEqual(warm_record["pass"], "warm")
|
||||
self.assertEqual(len(cold_timings), len(keys))
|
||||
self.assertEqual(len(warm_timings), len(keys))
|
||||
self.assertGreater(warm_record["cacheHitRate"], cold_record["cacheHitRate"])
|
||||
|
||||
def test_write_ndjson_orders_properties(self):
|
||||
tmp_path = Path(self._get_tempdir())
|
||||
output = tmp_path / "out.ndjson"
|
||||
bench.write_ndjson([{"b": 2, "a": 1}], output)
|
||||
content = output.read_text(encoding="utf-8").strip()
|
||||
self.assertEqual(content, '{"a":1,"b":2}')
|
||||
|
||||
def _get_tempdir(self) -> Path:
|
||||
import tempfile
|
||||
|
||||
return Path(tempfile.mkdtemp(prefix="impact-bench-test-"))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
21
src/Bench/StellaOps.Bench/PolicyCache/README.md
Normal file
21
src/Bench/StellaOps.Bench/PolicyCache/README.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Policy Evaluation with Reachability Cache
|
||||
|
||||
Benchmarks policy evaluation overhead with cold, warm, and mixed reachability cache scenarios.
|
||||
|
||||
## Inputs
|
||||
- Policies: `docs/samples/policy/policy-delta-baseline.ndjson` (or another baseline).
|
||||
- Reachability cache: `src/Bench/StellaOps.Bench/Signals/results/reachability-cache-10k.ndjson` (or 50k variant).
|
||||
|
||||
## Running
|
||||
```bash
|
||||
python policy_cache_bench.py --policies ../../../../docs/samples/policy/policy-delta-baseline.ndjson --reachability-cache ../Signals/results/reachability-cache-10k.ndjson --output results/policy-cache.ndjson --seed 20250101 --threads 1
|
||||
```
|
||||
|
||||
## Output
|
||||
- NDJSON with three records: `cold`, `warm`, `mixed` (70/30 warm/cold split).
|
||||
- Fields: `run`, `evaluations`, `durationMs`, `throughputPerSec`, `addedLatencyP50Ms`, `addedLatencyP95Ms`, `addedLatencyP99Ms`, `rssMb`, `managedMb`, `gcGen2`, `cacheHitRate`.
|
||||
|
||||
## Determinism
|
||||
- Policy-to-function mapping uses blake2b hashing with fixed seed input; ordering is stable.
|
||||
- Single-threaded execution maintains deterministic timing relative to hardware.
|
||||
- JSON output uses sorted keys and UTC timestamps.
|
||||
1
src/Bench/StellaOps.Bench/PolicyCache/__init__.py
Normal file
1
src/Bench/StellaOps.Bench/PolicyCache/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Package marker for policy reachability cache bench.
|
||||
Binary file not shown.
165
src/Bench/StellaOps.Bench/PolicyCache/policy_cache_bench.py
Normal file
165
src/Bench/StellaOps.Bench/PolicyCache/policy_cache_bench.py
Normal file
@@ -0,0 +1,165 @@
|
||||
"""Policy evaluation with reachability cache benchmark.
|
||||
|
||||
Uses reachability cache outputs (from 26-001) to measure cold vs warm cache
|
||||
latency impact on policy evaluation.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import gc
|
||||
import hashlib
|
||||
import json
|
||||
import random
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Iterable, List, Tuple
|
||||
|
||||
|
||||
def percentile(values: List[float], pct: float) -> float:
|
||||
if not values:
|
||||
return 0.0
|
||||
ordered = sorted(values)
|
||||
k = (len(ordered) - 1) * (pct / 100.0)
|
||||
lower = int(k)
|
||||
upper = min(lower + 1, len(ordered) - 1)
|
||||
if lower == upper:
|
||||
return float(ordered[lower])
|
||||
fraction = k - lower
|
||||
return float(ordered[lower] + (ordered[upper] - ordered[lower]) * fraction)
|
||||
|
||||
|
||||
def load_ndjson(path: Path) -> List[dict]:
|
||||
with path.open(encoding="utf-8") as handle:
|
||||
return [json.loads(line) for line in handle if line.strip()]
|
||||
|
||||
|
||||
class PolicyCacheBench:
|
||||
def __init__(self, policies: List[dict], reachability_cache: List[dict], seed: int):
|
||||
self.policies = policies
|
||||
self.reachability_cache = reachability_cache
|
||||
self.seed = seed
|
||||
self.rng = random.Random(seed)
|
||||
self.cache_lookup = {rec["function"]: rec for rec in reachability_cache}
|
||||
self.function_index = [rec["function"] for rec in reachability_cache]
|
||||
self.evaluation_cache: dict[str, float] = {}
|
||||
self.cache_hits = 0
|
||||
self.cache_misses = 0
|
||||
|
||||
def map_policy_to_function(self, policy_id: str) -> str:
|
||||
if not self.function_index:
|
||||
return ""
|
||||
digest = hashlib.blake2b(policy_id.encode("utf-8"), digest_size=8).digest()
|
||||
idx = int.from_bytes(digest, "big") % len(self.function_index)
|
||||
return self.function_index[idx]
|
||||
|
||||
def evaluate(self, policy: dict, mode: str) -> float:
|
||||
"""Simulate evaluation cost and return added latency in ms."""
|
||||
function = self.map_policy_to_function(policy["policyId"])
|
||||
cached = self.evaluation_cache.get(function)
|
||||
if mode == "warm" and cached is not None:
|
||||
self.cache_hits += 1
|
||||
return cached
|
||||
|
||||
self.cache_misses += 1
|
||||
rec = self.cache_lookup.get(function, {"fanout": 1, "runtimeCount": 0})
|
||||
fanout = rec.get("fanout", 1) or 1
|
||||
runtime_count = rec.get("runtimeCount", 0)
|
||||
work_units = (fanout * 2) + (runtime_count or 1)
|
||||
|
||||
t0 = time.perf_counter()
|
||||
acc = 0
|
||||
for i in range(work_units):
|
||||
acc ^= (i * 31) ^ fanout
|
||||
acc ^= runtime_count
|
||||
latency_ms = (time.perf_counter() - t0) * 1000.0
|
||||
self.evaluation_cache[function] = latency_ms
|
||||
return latency_ms
|
||||
|
||||
def run(self, mode: str) -> Tuple[dict, List[float]]:
|
||||
import tracemalloc
|
||||
|
||||
started_at = datetime.now(timezone.utc).isoformat()
|
||||
timings: List[float] = []
|
||||
|
||||
gc.collect()
|
||||
tracemalloc.start()
|
||||
start = time.perf_counter()
|
||||
|
||||
if mode == "mixed":
|
||||
cutoff = int(len(self.policies) * 0.3)
|
||||
cold_policies = self.policies[:cutoff]
|
||||
warm_policies = self.policies[cutoff:]
|
||||
ordered = cold_policies + warm_policies
|
||||
warm_override = [False] * len(cold_policies) + [True] * len(warm_policies)
|
||||
else:
|
||||
ordered = self.policies
|
||||
warm_override = [mode == "warm"] * len(self.policies)
|
||||
|
||||
for policy, use_warm in zip(ordered, warm_override):
|
||||
timings.append(self.evaluate(policy, "warm" if use_warm else "cold"))
|
||||
|
||||
duration_ms = (time.perf_counter() - start) * 1000.0
|
||||
_, peak_bytes = tracemalloc.get_traced_memory()
|
||||
tracemalloc.stop()
|
||||
|
||||
if hasattr(gc, "get_stats"):
|
||||
gc_stats = gc.get_stats()
|
||||
gc_gen2 = gc_stats[2]["collections"] if len(gc_stats) > 2 else 0
|
||||
else:
|
||||
counts = gc.get_count()
|
||||
gc_gen2 = counts[2] if len(counts) > 2 else 0
|
||||
|
||||
throughput = (len(timings) / (duration_ms / 1000.0)) if duration_ms else 0.0
|
||||
record = {
|
||||
"run": mode,
|
||||
"startedAtUtc": started_at,
|
||||
"evaluations": len(timings),
|
||||
"durationMs": round(duration_ms, 3),
|
||||
"throughputPerSec": round(throughput, 3),
|
||||
"addedLatencyP50Ms": round(percentile(timings, 50), 4),
|
||||
"addedLatencyP95Ms": round(percentile(timings, 95), 4),
|
||||
"addedLatencyP99Ms": round(percentile(timings, 99), 4),
|
||||
"rssMb": round(peak_bytes / (1024 * 1024), 3),
|
||||
"managedMb": round(peak_bytes / (1024 * 1024), 3),
|
||||
"gcGen2": gc_gen2,
|
||||
"cacheHitRate": round(self.cache_hits / max(1, self.cache_hits + self.cache_misses), 4),
|
||||
}
|
||||
return record, timings
|
||||
|
||||
|
||||
def write_ndjson(records: Iterable[dict], output: Path):
|
||||
output.parent.mkdir(parents=True, exist_ok=True)
|
||||
with output.open("w", encoding="utf-8") as handle:
|
||||
for record in records:
|
||||
handle.write(json.dumps(record, separators=(",", ":"), sort_keys=True) + "\n")
|
||||
|
||||
|
||||
def parse_args(argv=None) -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Policy evaluation reachability cache benchmark")
|
||||
parser.add_argument("--policies", required=True, help="Policy baseline NDJSON")
|
||||
parser.add_argument("--reachability-cache", required=True, help="Cache NDJSON from reachability bench")
|
||||
parser.add_argument("--output", default="results/policy-cache.ndjson", help="Output NDJSON path")
|
||||
parser.add_argument("--seed", type=int, default=20250101, help="Seed for deterministic ordering")
|
||||
parser.add_argument("--threads", type=int, default=1, help="Thread count (unused; deterministic single-thread)")
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
args = parse_args(argv)
|
||||
policies = load_ndjson(Path(args.policies))
|
||||
reachability_cache = load_ndjson(Path(args.reachability_cache))
|
||||
|
||||
bench = PolicyCacheBench(policies=policies, reachability_cache=reachability_cache, seed=args.seed)
|
||||
|
||||
cold_record, _ = bench.run("cold")
|
||||
warm_record, _ = bench.run("warm")
|
||||
mixed_record, _ = bench.run("mixed")
|
||||
|
||||
output = Path(args.output)
|
||||
write_ndjson([cold_record, warm_record, mixed_record], output)
|
||||
print(f"Wrote {output} with {len(policies)} policy evaluations per run")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -0,0 +1,3 @@
|
||||
{"addedLatencyP50Ms":0.0025,"addedLatencyP95Ms":0.0062,"addedLatencyP99Ms":0.0131,"cacheHitRate":0.0,"durationMs":53.031,"evaluations":5000,"gcGen2":1,"managedMb":0.293,"rssMb":0.293,"run":"cold","startedAtUtc":"2025-12-11T20:54:07.407921+00:00","throughputPerSec":94284.475}
|
||||
{"addedLatencyP50Ms":0.0025,"addedLatencyP95Ms":0.0062,"addedLatencyP99Ms":0.0131,"cacheHitRate":0.5,"durationMs":27.123,"evaluations":5000,"gcGen2":2,"managedMb":0.079,"rssMb":0.079,"run":"warm","startedAtUtc":"2025-12-11T20:54:07.465328+00:00","throughputPerSec":184346.749}
|
||||
{"addedLatencyP50Ms":0.0026,"addedLatencyP95Ms":0.0062,"addedLatencyP99Ms":0.0131,"cacheHitRate":0.5667,"durationMs":34.751,"evaluations":5000,"gcGen2":3,"managedMb":0.19,"rssMb":0.19,"run":"mixed","startedAtUtc":"2025-12-11T20:54:07.496163+00:00","throughputPerSec":143878.682}
|
||||
@@ -0,0 +1 @@
|
||||
b25802b72d8e2d3767b0d608e80e899f15e897b175cc419cb28fd714e8c82a74 policy-cache.ndjson
|
||||
1
src/Bench/StellaOps.Bench/PolicyCache/tests/__init__.py
Normal file
1
src/Bench/StellaOps.Bench/PolicyCache/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Package marker for policy cache bench tests.
|
||||
Binary file not shown.
@@ -0,0 +1,43 @@
|
||||
import sys
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(ROOT))
|
||||
|
||||
import policy_cache_bench as bench
|
||||
|
||||
|
||||
class PolicyCacheBenchTests(unittest.TestCase):
|
||||
def test_runs_cold_warm_mixed(self):
|
||||
policies = [
|
||||
{"policyId": "pol-0001", "package": "bench.pkg.0001"},
|
||||
{"policyId": "pol-0002", "package": "bench.pkg.0002"},
|
||||
{"policyId": "pol-0003", "package": "bench.pkg.0003"},
|
||||
]
|
||||
reachability_cache = [
|
||||
{"function": "fn-A", "fanout": 2, "runtimeCount": 3},
|
||||
{"function": "fn-B", "fanout": 1, "runtimeCount": 1},
|
||||
{"function": "fn-C", "fanout": 0, "runtimeCount": 0},
|
||||
]
|
||||
|
||||
harness = bench.PolicyCacheBench(policies, reachability_cache, seed=20250101)
|
||||
cold_record, _ = harness.run("cold")
|
||||
warm_record, _ = harness.run("warm")
|
||||
mixed_record, _ = harness.run("mixed")
|
||||
|
||||
self.assertEqual(cold_record["run"], "cold")
|
||||
self.assertEqual(warm_record["run"], "warm")
|
||||
self.assertEqual(mixed_record["run"], "mixed")
|
||||
self.assertGreaterEqual(warm_record["cacheHitRate"], cold_record["cacheHitRate"])
|
||||
self.assertGreater(mixed_record["evaluations"], 0)
|
||||
|
||||
def test_percentile(self):
|
||||
values = [1, 2, 3, 4]
|
||||
self.assertAlmostEqual(bench.percentile(values, 50), 2.5)
|
||||
self.assertAlmostEqual(bench.percentile(values, 95), 3.85, places=2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
22
src/Bench/StellaOps.Bench/PolicyDelta/README.md
Normal file
22
src/Bench/StellaOps.Bench/PolicyDelta/README.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Policy Delta Benchmark
|
||||
|
||||
Measures the performance difference between full policy evaluation and incremental (delta) updates.
|
||||
|
||||
## Inputs
|
||||
- Baseline: `docs/samples/policy/policy-delta-baseline.ndjson` (+ `.sha256`).
|
||||
- Delta patch: `docs/samples/policy/policy-delta-changes.ndjson` (+ `.sha256`).
|
||||
- All inputs are deterministic and offline.
|
||||
|
||||
## Running
|
||||
```bash
|
||||
python policy_delta_bench.py --baseline ../../../../docs/samples/policy/policy-delta-baseline.ndjson --delta ../../../../docs/samples/policy/policy-delta-changes.ndjson --output results/policy-delta.ndjson --threads 1 --seed 20250101
|
||||
```
|
||||
|
||||
## Output
|
||||
- NDJSON with two records: `full` and `delta`.
|
||||
- Fields: `run`, `startedAtUtc`, `durationMs`, `evaluationsPerSec`, `p50Ms`, `p95Ms`, `p99Ms`, `rssMb`, `managedMb`, `gcGen2`, `items`.
|
||||
|
||||
## Determinism
|
||||
- Fixed seed controls any randomized selection; processing order follows dataset order.
|
||||
- Single-threaded mode recommended (`--threads 1`) for reproducible timing.
|
||||
- JSON output uses sorted keys for stable diffs.
|
||||
1
src/Bench/StellaOps.Bench/PolicyDelta/__init__.py
Normal file
1
src/Bench/StellaOps.Bench/PolicyDelta/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Package marker for policy delta benchmark harness.
|
||||
Binary file not shown.
152
src/Bench/StellaOps.Bench/PolicyDelta/policy_delta_bench.py
Normal file
152
src/Bench/StellaOps.Bench/PolicyDelta/policy_delta_bench.py
Normal file
@@ -0,0 +1,152 @@
|
||||
"""Policy delta benchmark harness.
|
||||
|
||||
Runs a full evaluation over a baseline snapshot, then replays delta operations
|
||||
to measure incremental evaluation cost. Outputs deterministic NDJSON suitable
|
||||
for offline/CI runs.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import gc
|
||||
import hashlib
|
||||
import json
|
||||
import random
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Iterable, List, Tuple
|
||||
|
||||
|
||||
def percentile(values: List[float], pct: float) -> float:
|
||||
if not values:
|
||||
return 0.0
|
||||
ordered = sorted(values)
|
||||
k = (len(ordered) - 1) * (pct / 100.0)
|
||||
lower = int(k)
|
||||
upper = min(lower + 1, len(ordered) - 1)
|
||||
if lower == upper:
|
||||
return float(ordered[lower])
|
||||
fraction = k - lower
|
||||
return float(ordered[lower] + (ordered[upper] - ordered[lower]) * fraction)
|
||||
|
||||
|
||||
def load_ndjson(path: Path) -> List[dict]:
|
||||
with path.open(encoding="utf-8") as handle:
|
||||
return [json.loads(line) for line in handle if line.strip()]
|
||||
|
||||
|
||||
class PolicyDeltaBench:
|
||||
def __init__(self, seed: int, threads: int):
|
||||
self.seed = seed
|
||||
self.threads = threads
|
||||
self.rng = random.Random(seed)
|
||||
self.store: dict[str, dict] = {}
|
||||
|
||||
def load_baseline(self, records: Iterable[dict]):
|
||||
self.store = {record["policyId"]: record for record in records}
|
||||
|
||||
def apply_delta(self, delta_records: Iterable[dict]) -> List[str]:
|
||||
touched: List[str] = []
|
||||
for record in delta_records:
|
||||
policy_id = record["policyId"]
|
||||
op = record.get("op", "upsert")
|
||||
if op == "delete":
|
||||
self.store.pop(policy_id, None)
|
||||
else:
|
||||
self.store[policy_id] = record
|
||||
touched.append(policy_id)
|
||||
return touched
|
||||
|
||||
def evaluate_policy(self, record: dict) -> int:
|
||||
key = f"{record['policyId']}|{record.get('package','')}|{record.get('version','')}"
|
||||
digest = hashlib.sha256(key.encode("utf-8")).digest()
|
||||
iterations = 35 + digest[0] % 25
|
||||
score = 0
|
||||
for i in range(iterations):
|
||||
score ^= (digest[i % len(digest)] + (i * 17)) & 0xFFFFFFFF
|
||||
score = (score * 31 + digest[(i + 3) % len(digest)]) % 1_000_003
|
||||
return score
|
||||
|
||||
|
||||
def run_pass(pass_name: str, bench: PolicyDeltaBench, records: Iterable[dict]) -> Tuple[dict, List[float]]:
|
||||
import tracemalloc
|
||||
|
||||
started_at = datetime.now(timezone.utc).isoformat()
|
||||
timings_ms: List[float] = []
|
||||
|
||||
gc.collect()
|
||||
tracemalloc.start()
|
||||
start = time.perf_counter()
|
||||
for record in records:
|
||||
t0 = time.perf_counter()
|
||||
bench.evaluate_policy(record)
|
||||
timings_ms.append((time.perf_counter() - t0) * 1000.0)
|
||||
duration_ms = (time.perf_counter() - start) * 1000.0
|
||||
_, peak_bytes = tracemalloc.get_traced_memory()
|
||||
tracemalloc.stop()
|
||||
|
||||
if hasattr(gc, "get_stats"):
|
||||
gc_stats = gc.get_stats()
|
||||
gc_gen2 = gc_stats[2]["collections"] if len(gc_stats) > 2 else 0
|
||||
else:
|
||||
counts = gc.get_count()
|
||||
gc_gen2 = counts[2] if len(counts) > 2 else 0
|
||||
|
||||
throughput = (len(timings_ms) / (duration_ms / 1000.0)) if duration_ms else 0.0
|
||||
record = {
|
||||
"run": pass_name,
|
||||
"startedAtUtc": started_at,
|
||||
"durationMs": round(duration_ms, 3),
|
||||
"evaluationsPerSec": round(throughput, 3),
|
||||
"p50Ms": round(percentile(timings_ms, 50), 3),
|
||||
"p95Ms": round(percentile(timings_ms, 95), 3),
|
||||
"p99Ms": round(percentile(timings_ms, 99), 3),
|
||||
"rssMb": round(peak_bytes / (1024 * 1024), 3),
|
||||
"managedMb": round(peak_bytes / (1024 * 1024), 3),
|
||||
"gcGen2": gc_gen2,
|
||||
"items": len(timings_ms),
|
||||
}
|
||||
return record, timings_ms
|
||||
|
||||
|
||||
def write_ndjson(records: List[dict], output: Path):
|
||||
output.parent.mkdir(parents=True, exist_ok=True)
|
||||
with output.open("w", encoding="utf-8") as handle:
|
||||
for record in records:
|
||||
handle.write(json.dumps(record, separators=(",", ":"), sort_keys=True) + "\n")
|
||||
|
||||
|
||||
def parse_args(argv: List[str] | None = None) -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Policy delta benchmark harness")
|
||||
parser.add_argument("--baseline", required=True, help="Path to baseline NDJSON")
|
||||
parser.add_argument("--delta", required=True, help="Path to delta NDJSON")
|
||||
parser.add_argument("--output", default="results/policy-delta.ndjson", help="Output NDJSON path")
|
||||
parser.add_argument("--threads", type=int, default=1, help="Thread count (1 for deterministic mode)")
|
||||
parser.add_argument("--seed", type=int, default=20250101, help="Seed for deterministic replay")
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
def main(argv: List[str] | None = None):
|
||||
args = parse_args(argv)
|
||||
baseline_path = Path(args.baseline)
|
||||
delta_path = Path(args.delta)
|
||||
|
||||
baseline_records = load_ndjson(baseline_path)
|
||||
delta_records = load_ndjson(delta_path)
|
||||
|
||||
bench = PolicyDeltaBench(seed=args.seed, threads=args.threads)
|
||||
bench.load_baseline(baseline_records)
|
||||
|
||||
full_record, _ = run_pass("full", bench, baseline_records)
|
||||
|
||||
touched_policy_ids = bench.apply_delta(delta_records)
|
||||
to_evaluate = [bench.store[pid] for pid in touched_policy_ids if pid in bench.store]
|
||||
delta_record, _ = run_pass("delta", bench, to_evaluate)
|
||||
|
||||
output_path = Path(args.output)
|
||||
write_ndjson([full_record, delta_record], output_path)
|
||||
print(f"Wrote {output_path} with {len(baseline_records)} baseline items and {len(to_evaluate)} delta items")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -0,0 +1,2 @@
|
||||
{"durationMs":1240.851,"evaluationsPerSec":4029.492,"gcGen2":1,"items":5000,"managedMb":0.155,"p50Ms":0.246,"p95Ms":0.31,"p99Ms":0.36,"rssMb":0.155,"run":"full","startedAtUtc":"2025-12-11T20:50:11.478768+00:00"}
|
||||
{"durationMs":123.701,"evaluationsPerSec":4041.995,"gcGen2":2,"items":500,"managedMb":0.016,"p50Ms":0.246,"p95Ms":0.311,"p99Ms":0.345,"rssMb":0.016,"run":"delta","startedAtUtc":"2025-12-11T20:50:12.723765+00:00"}
|
||||
@@ -0,0 +1 @@
|
||||
73ab974a9df5facc9c1f848bd2c953576bcca898e2af3269058bbc287c8f03dc policy-delta.ndjson
|
||||
1
src/Bench/StellaOps.Bench/PolicyDelta/tests/__init__.py
Normal file
1
src/Bench/StellaOps.Bench/PolicyDelta/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Package marker for policy delta bench tests.
|
||||
Binary file not shown.
@@ -0,0 +1,68 @@
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(ROOT))
|
||||
|
||||
import policy_delta_bench as bench
|
||||
|
||||
|
||||
def build_baseline(tmp_path: Path) -> Path:
|
||||
path = tmp_path / "baseline.ndjson"
|
||||
records = [
|
||||
{"policyId": "pol-0001", "package": "bench.pkg.0001", "version": "1.0.0", "decision": "allow"},
|
||||
{"policyId": "pol-0002", "package": "bench.pkg.0002", "version": "1.0.0", "decision": "deny"},
|
||||
]
|
||||
with path.open("w", encoding="utf-8") as handle:
|
||||
for record in records:
|
||||
handle.write(json.dumps(record, separators=(",", ":")) + "\n")
|
||||
return path
|
||||
|
||||
|
||||
def build_delta(tmp_path: Path) -> Path:
|
||||
path = tmp_path / "delta.ndjson"
|
||||
records = [
|
||||
{"op": "upsert", "policyId": "pol-0002", "package": "bench.pkg.0002", "version": "1.1.0", "decision": "allow"},
|
||||
{"op": "delete", "policyId": "pol-0001", "package": "bench.pkg.0001", "version": "1.0.0"},
|
||||
{"op": "upsert", "policyId": "pol-0003", "package": "bench.pkg.0003", "version": "1.0.0", "decision": "allow"},
|
||||
]
|
||||
with path.open("w", encoding="utf-8") as handle:
|
||||
for record in records:
|
||||
handle.write(json.dumps(record, separators=(",", ":")) + "\n")
|
||||
return path
|
||||
|
||||
|
||||
class PolicyDeltaBenchTests(unittest.TestCase):
|
||||
def test_runs_full_and_delta(self):
|
||||
tmp_dir = Path(tempfile.mkdtemp(prefix="policy-bench-test-"))
|
||||
baseline = build_baseline(tmp_dir)
|
||||
delta = build_delta(tmp_dir)
|
||||
|
||||
baseline_records = bench.load_ndjson(baseline)
|
||||
delta_records = bench.load_ndjson(delta)
|
||||
|
||||
harness = bench.PolicyDeltaBench(seed=20250101, threads=1)
|
||||
harness.load_baseline(baseline_records)
|
||||
|
||||
full_record, _ = bench.run_pass("full", harness, baseline_records)
|
||||
touched = harness.apply_delta(delta_records)
|
||||
evaluate_set = [harness.store[pid] for pid in touched if pid in harness.store]
|
||||
delta_record, _ = bench.run_pass("delta", harness, evaluate_set)
|
||||
|
||||
self.assertEqual(full_record["run"], "full")
|
||||
self.assertEqual(delta_record["run"], "delta")
|
||||
self.assertLessEqual(delta_record["items"], full_record["items"])
|
||||
self.assertGreaterEqual(delta_record["evaluationsPerSec"], 0)
|
||||
|
||||
def test_percentile(self):
|
||||
values = [1, 2, 3, 4]
|
||||
self.assertAlmostEqual(bench.percentile(values, 50), 2.5)
|
||||
self.assertAlmostEqual(bench.percentile(values, 95), 3.85, places=2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
24
src/Bench/StellaOps.Bench/Signals/README.md
Normal file
24
src/Bench/StellaOps.Bench/Signals/README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Reachability Bench
|
||||
|
||||
Benchmarks the reachability scoring pipeline using offline synthetic fixtures.
|
||||
|
||||
## Inputs
|
||||
- Callgraph fixtures: `docs/samples/signals/reachability/callgraph-10k.ndjson` and `callgraph-50k.ndjson`.
|
||||
- Runtime traces: `docs/samples/signals/reachability/runtime-10k.ndjson` and `runtime-50k.ndjson`.
|
||||
- Schema hash: `docs/benchmarks/signals/reachability-schema.json` (sha256 `aaa5c8ab5cc2fe91e50976fafd8c73597387ab9a881af6d5d9818d202beba24e`).
|
||||
|
||||
## Running
|
||||
```bash
|
||||
python reachability_bench.py --callgraph ../../../../docs/samples/signals/reachability/callgraph-10k.ndjson --runtime ../../../../docs/samples/signals/reachability/runtime-10k.ndjson --output results/reachability-metrics-10k.ndjson --cache-output results/reachability-cache-10k.ndjson --threads 1 --seed 20250101
|
||||
```
|
||||
|
||||
Swap the input paths for the 50k fixtures to exercise the larger dataset.
|
||||
|
||||
## Output
|
||||
- Metrics NDJSON with fields: `run`, `startedAtUtc`, `functions`, `runtimeEvents`, `facts`, `durationMs`, `factsPerSec`, `p50MsPerNode`, `p95MsPerNode`, `p99MsPerNode`, `rssMb`, `managedMb`, `gcGen2`.
|
||||
- Cache NDJSON (`reachability-cache-*.ndjson`) with per-function reachability flags, fanout, and runtime counts for downstream policy benches.
|
||||
|
||||
## Determinism
|
||||
- Processing order is sorted by runtime function id; graph traversal preserves deterministic queueing.
|
||||
- Single-threaded execution avoids nondeterministic scheduling.
|
||||
- Output JSON keys are sorted for stable diffs; timestamps use UTC ISO-8601.
|
||||
1
src/Bench/StellaOps.Bench/Signals/__init__.py
Normal file
1
src/Bench/StellaOps.Bench/Signals/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Package marker for reachability bench.
|
||||
Binary file not shown.
151
src/Bench/StellaOps.Bench/Signals/reachability_bench.py
Normal file
151
src/Bench/StellaOps.Bench/Signals/reachability_bench.py
Normal file
@@ -0,0 +1,151 @@
|
||||
"""Reachability scoring benchmark.
|
||||
|
||||
Processes synthetic callgraph and runtime traces to measure facts/sec, latency,
|
||||
and memory. Outputs both metrics and a cache file consumable by policy benches.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import gc
|
||||
import json
|
||||
import time
|
||||
from collections import deque
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Dict, Iterable, List, Tuple
|
||||
|
||||
|
||||
def percentile(values: List[float], pct: float) -> float:
|
||||
if not values:
|
||||
return 0.0
|
||||
ordered = sorted(values)
|
||||
k = (len(ordered) - 1) * (pct / 100.0)
|
||||
lower = int(k)
|
||||
upper = min(lower + 1, len(ordered) - 1)
|
||||
if lower == upper:
|
||||
return float(ordered[lower])
|
||||
fraction = k - lower
|
||||
return float(ordered[lower] + (ordered[upper] - ordered[lower]) * fraction)
|
||||
|
||||
|
||||
def load_ndjson(path: Path) -> List[dict]:
|
||||
with path.open(encoding="utf-8") as handle:
|
||||
return [json.loads(line) for line in handle if line.strip()]
|
||||
|
||||
|
||||
def build_graph(callgraph_records: Iterable[dict]) -> Dict[str, List[str]]:
|
||||
graph: Dict[str, List[str]] = {}
|
||||
for record in callgraph_records:
|
||||
graph[record["function"]] = record.get("calls", [])
|
||||
return graph
|
||||
|
||||
|
||||
def build_runtime(runtime_records: Iterable[dict]) -> Dict[str, int]:
|
||||
runtime: Dict[str, int] = {}
|
||||
for record in runtime_records:
|
||||
runtime[record["function"]] = runtime.get(record["function"], 0) + int(record.get("count", 0))
|
||||
return runtime
|
||||
|
||||
|
||||
def run_reachability(graph: Dict[str, List[str]], runtime: Dict[str, int]) -> Tuple[dict, List[dict]]:
|
||||
import tracemalloc
|
||||
|
||||
started_at = datetime.now(timezone.utc).isoformat()
|
||||
visited = set()
|
||||
cache_records: List[dict] = []
|
||||
timings_ms: List[float] = []
|
||||
facts = 0
|
||||
|
||||
queue = deque(sorted(runtime.keys()))
|
||||
gc.collect()
|
||||
tracemalloc.start()
|
||||
start = time.perf_counter()
|
||||
|
||||
while queue:
|
||||
fn = queue.popleft()
|
||||
if fn in visited:
|
||||
continue
|
||||
t0 = time.perf_counter()
|
||||
visited.add(fn)
|
||||
calls = graph.get(fn, [])
|
||||
facts += len(calls)
|
||||
for callee in calls:
|
||||
if callee not in visited:
|
||||
queue.append(callee)
|
||||
timings_ms.append((time.perf_counter() - t0) * 1000.0)
|
||||
cache_records.append(
|
||||
{
|
||||
"function": fn,
|
||||
"reachable": True,
|
||||
"runtimeCount": runtime.get(fn, 0),
|
||||
"fanout": len(calls),
|
||||
}
|
||||
)
|
||||
|
||||
duration_ms = (time.perf_counter() - start) * 1000.0
|
||||
_, peak_bytes = tracemalloc.get_traced_memory()
|
||||
tracemalloc.stop()
|
||||
|
||||
if hasattr(gc, "get_stats"):
|
||||
gc_stats = gc.get_stats()
|
||||
gc_gen2 = gc_stats[2]["collections"] if len(gc_stats) > 2 else 0
|
||||
else:
|
||||
counts = gc.get_count()
|
||||
gc_gen2 = counts[2] if len(counts) > 2 else 0
|
||||
|
||||
metrics = {
|
||||
"run": "reachability",
|
||||
"startedAtUtc": started_at,
|
||||
"functions": len(graph),
|
||||
"runtimeEvents": len(runtime),
|
||||
"facts": facts,
|
||||
"durationMs": round(duration_ms, 3),
|
||||
"factsPerSec": round(facts / (duration_ms / 1000.0) if duration_ms else 0.0, 3),
|
||||
"p50MsPerNode": round(percentile(timings_ms, 50), 4),
|
||||
"p95MsPerNode": round(percentile(timings_ms, 95), 4),
|
||||
"p99MsPerNode": round(percentile(timings_ms, 99), 4),
|
||||
"rssMb": round(peak_bytes / (1024 * 1024), 3),
|
||||
"managedMb": round(peak_bytes / (1024 * 1024), 3),
|
||||
"gcGen2": gc_gen2,
|
||||
}
|
||||
|
||||
return metrics, cache_records
|
||||
|
||||
|
||||
def write_ndjson(records: Iterable[dict], output: Path):
|
||||
output.parent.mkdir(parents=True, exist_ok=True)
|
||||
with output.open("w", encoding="utf-8") as handle:
|
||||
for record in records:
|
||||
handle.write(json.dumps(record, separators=(",", ":"), sort_keys=True) + "\n")
|
||||
|
||||
|
||||
def parse_args(argv=None) -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Reachability scoring benchmark")
|
||||
parser.add_argument("--callgraph", required=True, help="Path to callgraph NDJSON")
|
||||
parser.add_argument("--runtime", required=True, help="Path to runtime NDJSON")
|
||||
parser.add_argument("--output", default="results/reachability-metrics.ndjson", help="Output metrics NDJSON")
|
||||
parser.add_argument("--cache-output", default="results/reachability-cache.ndjson", help="Cache output NDJSON")
|
||||
parser.add_argument("--threads", type=int, default=1, help="Thread count (unused; for compatibility)")
|
||||
parser.add_argument("--seed", type=int, default=20250101, help="Seed placeholder for deterministic behaviour")
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
args = parse_args(argv)
|
||||
callgraph_records = load_ndjson(Path(args.callgraph))
|
||||
runtime_records = load_ndjson(Path(args.runtime))
|
||||
|
||||
graph = build_graph(callgraph_records)
|
||||
runtime = build_runtime(runtime_records)
|
||||
|
||||
metrics, cache_records = run_reachability(graph, runtime)
|
||||
write_ndjson([metrics], Path(args.output))
|
||||
write_ndjson(cache_records, Path(args.cache_output))
|
||||
|
||||
print(
|
||||
f"Wrote metrics to {args.output} and cache with {len(cache_records)} entries to {args.cache_output}"
|
||||
)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
10000
src/Bench/StellaOps.Bench/Signals/results/reachability-cache-10k.ndjson
Normal file
10000
src/Bench/StellaOps.Bench/Signals/results/reachability-cache-10k.ndjson
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1 @@
|
||||
415490c6ea6185c918a2205ad3a5bca99420d58da322084c6f61cbc1242fde2b reachability-cache-10k.ndjson
|
||||
50000
src/Bench/StellaOps.Bench/Signals/results/reachability-cache-50k.ndjson
Normal file
50000
src/Bench/StellaOps.Bench/Signals/results/reachability-cache-50k.ndjson
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1 @@
|
||||
a8d80bf1914e3b0a339520a9a2ba7b60718434ef0a7d44687f0d09ebe1ac5830 reachability-cache-50k.ndjson
|
||||
@@ -0,0 +1 @@
|
||||
{"durationMs":58.532,"facts":30000,"factsPerSec":512540.149,"functions":10000,"gcGen2":1,"managedMb":2.66,"p50MsPerNode":0.0031,"p95MsPerNode":0.0036,"p99MsPerNode":0.0062,"rssMb":2.66,"run":"reachability","runtimeEvents":1000,"startedAtUtc":"2025-12-11T20:52:24.336490+00:00"}
|
||||
@@ -0,0 +1 @@
|
||||
c6bf61890712d802b3ad288446b4754396774dfaa0d1e8502dc01ba6e8391cd0 reachability-metrics-10k.ndjson
|
||||
@@ -0,0 +1 @@
|
||||
{"durationMs":304.323,"facts":150000,"factsPerSec":492897.349,"functions":50000,"gcGen2":1,"managedMb":12.811,"p50MsPerNode":0.0033,"p95MsPerNode":0.004,"p99MsPerNode":0.007,"rssMb":12.811,"run":"reachability","runtimeEvents":5000,"startedAtUtc":"2025-12-11T20:52:33.262306+00:00"}
|
||||
@@ -0,0 +1 @@
|
||||
7686b8ffef892a6fa6e207a8a051786facbfa085e713ea3c717a7c2ae8ade97c reachability-metrics-50k.ndjson
|
||||
1
src/Bench/StellaOps.Bench/Signals/tests/__init__.py
Normal file
1
src/Bench/StellaOps.Bench/Signals/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Package marker for reachability bench tests.
|
||||
Binary file not shown.
@@ -0,0 +1,52 @@
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(ROOT))
|
||||
|
||||
import reachability_bench as bench
|
||||
|
||||
|
||||
class ReachabilityBenchTests(unittest.TestCase):
|
||||
def test_reachability_metrics_and_cache(self):
|
||||
tmp_dir = Path(tempfile.mkdtemp(prefix="reachability-bench-test-"))
|
||||
callgraph_path = tmp_dir / "callgraph.ndjson"
|
||||
runtime_path = tmp_dir / "runtime.ndjson"
|
||||
|
||||
callgraph_records = [
|
||||
{"function": "fn-A", "calls": ["fn-B"], "weight": 1},
|
||||
{"function": "fn-B", "calls": ["fn-C"], "weight": 1},
|
||||
{"function": "fn-C", "calls": [], "weight": 1},
|
||||
]
|
||||
runtime_records = [
|
||||
{"function": "fn-A", "count": 2, "timestamp": "2025-01-01T00:00:00Z"},
|
||||
]
|
||||
|
||||
with callgraph_path.open("w", encoding="utf-8") as handle:
|
||||
for rec in callgraph_records:
|
||||
handle.write(json.dumps(rec) + "\n")
|
||||
with runtime_path.open("w", encoding="utf-8") as handle:
|
||||
for rec in runtime_records:
|
||||
handle.write(json.dumps(rec) + "\n")
|
||||
|
||||
graph = bench.build_graph(bench.load_ndjson(callgraph_path))
|
||||
runtime = bench.build_runtime(bench.load_ndjson(runtime_path))
|
||||
|
||||
metrics, cache = bench.run_reachability(graph, runtime)
|
||||
self.assertEqual(metrics["functions"], 3)
|
||||
self.assertEqual(metrics["runtimeEvents"], 1)
|
||||
self.assertEqual(metrics["facts"], 2)
|
||||
self.assertEqual(len(cache), 3)
|
||||
|
||||
def test_percentile(self):
|
||||
values = [1, 2, 3]
|
||||
self.assertAlmostEqual(bench.percentile(values, 50), 2.0)
|
||||
self.assertAlmostEqual(bench.percentile(values, 99), 2.98, places=2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -3,5 +3,9 @@
|
||||
| ID | Status | Sprint | Notes | Evidence |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| BENCH-DETERMINISM-401-057 | DONE (2025-11-26) | SPRINT_0512_0001_0001_bench | Determinism harness and mock scanner added under `src/Bench/StellaOps.Bench/Determinism`; manifests + sample inputs included. | `src/Bench/StellaOps.Bench/Determinism/results` (generated) |
|
||||
| BENCH-GRAPH-21-001 | DOING (2025-12-01) | SPRINT_0512_0001_0001_bench | Added interim graph bench harness (`Graph/graph_bench.py`) using synthetic 50k/100k fixtures; measures adjacency build + depth-3 reach; pending overlay schema for final fixture integration. | `src/Bench/StellaOps.Bench/Graph` |
|
||||
| BENCH-GRAPH-21-002 | DOING (2025-12-01) | SPRINT_0512_0001_0001_bench | Added Graph UI bench scaffold (scenarios JSON + driver + plan) using interim fixtures; awaits overlay schema/UI target for Playwright binding and timing collection. | `src/Bench/StellaOps.Bench/Graph` |
|
||||
| BENCH-GRAPH-21-001 | DONE (2025-12-02) | SPRINT_0512_0001_0001_bench | Graph viewport/path harness with overlay support using canonical `samples/graph/graph-40k` fixture; results captured under `Graph/results`. | `src/Bench/StellaOps.Bench/Graph` |
|
||||
| BENCH-GRAPH-21-002 | DONE (2025-12-02) | SPRINT_0512_0001_0001_bench | Graph UI Playwright bench driver emitting trace/viewport metadata; linked to 40k fixture. | `src/Bench/StellaOps.Bench/Graph` |
|
||||
| BENCH-IMPACT-16-001 | DONE (2025-12-11) | SPRINT_0512_0001_0001_bench | ImpactIndex throughput bench with 10k productKey dataset + NDJSON outputs and unit tests. | `src/Bench/StellaOps.Bench/ImpactIndex` |
|
||||
| BENCH-POLICY-20-002 | DONE (2025-12-11) | SPRINT_0512_0001_0001_bench | Policy delta benchmark (full vs delta) using baseline/delta NDJSON fixtures; outputs hashed. | `src/Bench/StellaOps.Bench/PolicyDelta` |
|
||||
| BENCH-SIG-26-001 | DONE (2025-12-11) | SPRINT_0512_0001_0001_bench | Reachability scoring harness with schema hash, 10k/50k fixtures, cache outputs for downstream benches. | `src/Bench/StellaOps.Bench/Signals` |
|
||||
| BENCH-SIG-26-002 | DONE (2025-12-11) | SPRINT_0512_0001_0001_bench | Policy evaluation cache bench (cold/warm/mixed) consuming reachability caches; outputs hashed. | `src/Bench/StellaOps.Bench/PolicyCache` |
|
||||
|
||||
@@ -27,8 +27,8 @@
|
||||
- Offline posture: no external calls beyond allowlisted feeds; prefer cached schemas and local nugets in `local-nugets/`.
|
||||
|
||||
## Data & Environment
|
||||
- Canonical store: MongoDB (>=3.0 driver). Tests use `STELLAOPS_TEST_MONGO_URI`; fallback `mongodb://127.0.0.1:27017`, then Mongo2Go.
|
||||
- Collections: `graph_nodes`, `graph_edges`, `graph_overlays_cache`, `graph_snapshots`, `graph_saved_queries`.
|
||||
- Storage is currently in-memory (MongoDB dependency removed); persistent backing store to be added in a follow-up sprint.
|
||||
- Collections (historical naming) `graph_nodes`, `graph_edges`, `graph_overlays_cache`, `graph_snapshots`, `graph_saved_queries` map to in-memory structures for now.
|
||||
- Tenant isolation mandatory on every query and export.
|
||||
|
||||
## Testing Expectations
|
||||
|
||||
@@ -23,7 +23,7 @@ Provide tenant-scoped Graph Explorer APIs for search, query, paths, diffs, overl
|
||||
|
||||
## Tooling
|
||||
- .NET 10 preview Minimal API with async streaming; pipeline pattern for parsing/planning/fetching.
|
||||
- Mongo aggregation / adjacency store from Graph Indexer; optional caching layer.
|
||||
- Graph Indexer currently exposes in-memory adjacency storage (Mongo removed); optional caching layer.
|
||||
- SSE/WebSockets or chunked NDJSON responses for progressive loading.
|
||||
|
||||
## Definition of Done
|
||||
|
||||
@@ -5,7 +5,7 @@ Project SBOM, advisory, VEX, and policy overlay data into a tenant-scoped proper
|
||||
|
||||
## Scope
|
||||
- Service source under `src/Graph/StellaOps.Graph.Indexer` (workers, ingestion pipelines, schema builders).
|
||||
- Mongo collections/object storage for `graph_nodes`, `graph_edges`, `graph_snapshots`, clustering metadata.
|
||||
- In-memory graph storage for `graph_nodes`, `graph_edges`, `graph_snapshots`, clustering metadata (Mongo removed; durable store to follow).
|
||||
- Event consumers: SBOM ingest, Conseiller advisories, Excitor VEX, Policy overlay materials.
|
||||
- Incremental rebuild, diff, and cache warmers for graph overlays.
|
||||
|
||||
@@ -23,9 +23,9 @@ Project SBOM, advisory, VEX, and policy overlay data into a tenant-scoped proper
|
||||
|
||||
## Tooling
|
||||
- .NET 10 preview workers (HostedService + channel pipelines).
|
||||
- MongoDB for node/edge storage; S3-compatible buckets for layout tiles/snapshots if needed.
|
||||
- In-memory node/edge storage (Mongo removed); S3-compatible buckets for layout tiles/snapshots if needed.
|
||||
- Scheduler integration (jobs, change streams) to handle incremental updates.
|
||||
- Analytics: clustering/centrality pipelines with Mongo-backed snapshot provider and overlays; change-stream/backfill worker with idempotency store (Mongo or in-memory) and retry/backoff.
|
||||
- Analytics: clustering/centrality pipelines with in-memory snapshot provider and overlays; change-stream/backfill worker with in-memory idempotency store and retry/backoff.
|
||||
|
||||
## Definition of Done
|
||||
- Pipelines deterministic and tested; fixtures validated.
|
||||
|
||||
@@ -2,7 +2,6 @@ using System;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using Microsoft.Extensions.DependencyInjection.Extensions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using MongoDB.Driver;
|
||||
|
||||
namespace StellaOps.Graph.Indexer.Analytics;
|
||||
|
||||
@@ -37,47 +36,4 @@ public static class GraphAnalyticsServiceCollectionExtensions
|
||||
|
||||
return services;
|
||||
}
|
||||
|
||||
public static IServiceCollection AddGraphAnalyticsMongo(
|
||||
this IServiceCollection services,
|
||||
Action<GraphAnalyticsOptions>? configureOptions = null,
|
||||
Action<MongoGraphSnapshotProviderOptions>? configureSnapshot = null,
|
||||
Action<GraphAnalyticsWriterOptions>? configureWriter = null)
|
||||
{
|
||||
services.AddGraphAnalyticsPipeline(configureOptions);
|
||||
|
||||
if (configureSnapshot is not null)
|
||||
{
|
||||
services.Configure(configureSnapshot);
|
||||
}
|
||||
else
|
||||
{
|
||||
services.Configure<MongoGraphSnapshotProviderOptions>(_ => { });
|
||||
}
|
||||
|
||||
if (configureWriter is not null)
|
||||
{
|
||||
services.Configure(configureWriter);
|
||||
}
|
||||
else
|
||||
{
|
||||
services.Configure<GraphAnalyticsWriterOptions>(_ => { });
|
||||
}
|
||||
|
||||
services.Replace(ServiceDescriptor.Singleton<IGraphSnapshotProvider>(sp =>
|
||||
{
|
||||
var db = sp.GetRequiredService<IMongoDatabase>();
|
||||
var options = sp.GetRequiredService<IOptions<MongoGraphSnapshotProviderOptions>>();
|
||||
return new MongoGraphSnapshotProvider(db, options.Value);
|
||||
}));
|
||||
|
||||
services.Replace(ServiceDescriptor.Singleton<IGraphAnalyticsWriter>(sp =>
|
||||
{
|
||||
var db = sp.GetRequiredService<IMongoDatabase>();
|
||||
var options = sp.GetRequiredService<IOptions<GraphAnalyticsWriterOptions>>();
|
||||
return new MongoGraphAnalyticsWriter(db, options.Value);
|
||||
}));
|
||||
|
||||
return services;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,117 +0,0 @@
|
||||
using System.Collections.Generic;
|
||||
using System.Collections.Immutable;
|
||||
using System.Globalization;
|
||||
using System.Linq;
|
||||
using System.Text.Json.Nodes;
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
|
||||
namespace StellaOps.Graph.Indexer.Analytics;
|
||||
|
||||
public sealed class MongoGraphAnalyticsWriter : IGraphAnalyticsWriter
|
||||
{
|
||||
private readonly IMongoCollection<BsonDocument> _clusters;
|
||||
private readonly IMongoCollection<BsonDocument> _centrality;
|
||||
private readonly IMongoCollection<BsonDocument> _nodes;
|
||||
private readonly GraphAnalyticsWriterOptions _options;
|
||||
|
||||
public MongoGraphAnalyticsWriter(IMongoDatabase database, GraphAnalyticsWriterOptions? options = null)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(database);
|
||||
|
||||
_options = options ?? new GraphAnalyticsWriterOptions();
|
||||
_clusters = database.GetCollection<BsonDocument>(_options.ClusterCollectionName);
|
||||
_centrality = database.GetCollection<BsonDocument>(_options.CentralityCollectionName);
|
||||
_nodes = database.GetCollection<BsonDocument>(_options.NodeCollectionName);
|
||||
}
|
||||
|
||||
public async Task PersistClusterAssignmentsAsync(GraphAnalyticsSnapshot snapshot, ImmutableArray<ClusterAssignment> assignments, CancellationToken cancellationToken)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
if (assignments.Length == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var models = new List<WriteModel<BsonDocument>>(assignments.Length);
|
||||
foreach (var assignment in assignments)
|
||||
{
|
||||
var filter = Builders<BsonDocument>.Filter.And(
|
||||
Builders<BsonDocument>.Filter.Eq("tenant", snapshot.Tenant),
|
||||
Builders<BsonDocument>.Filter.Eq("snapshot_id", snapshot.SnapshotId),
|
||||
Builders<BsonDocument>.Filter.Eq("node_id", assignment.NodeId));
|
||||
|
||||
var document = new BsonDocument
|
||||
{
|
||||
{ "tenant", snapshot.Tenant },
|
||||
{ "snapshot_id", snapshot.SnapshotId },
|
||||
{ "node_id", assignment.NodeId },
|
||||
{ "cluster_id", assignment.ClusterId },
|
||||
{ "kind", assignment.Kind },
|
||||
{ "generated_at", snapshot.GeneratedAt.UtcDateTime }
|
||||
};
|
||||
|
||||
models.Add(new ReplaceOneModel<BsonDocument>(filter, document) { IsUpsert = true });
|
||||
}
|
||||
|
||||
await _clusters.BulkWriteAsync(models, new BulkWriteOptions { IsOrdered = false }, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
if (_options.WriteClusterAssignmentsToNodes)
|
||||
{
|
||||
await WriteClustersToNodesAsync(assignments, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
|
||||
public async Task PersistCentralityAsync(GraphAnalyticsSnapshot snapshot, ImmutableArray<CentralityScore> scores, CancellationToken cancellationToken)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
if (scores.Length == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var models = new List<WriteModel<BsonDocument>>(scores.Length);
|
||||
foreach (var score in scores)
|
||||
{
|
||||
var filter = Builders<BsonDocument>.Filter.And(
|
||||
Builders<BsonDocument>.Filter.Eq("tenant", snapshot.Tenant),
|
||||
Builders<BsonDocument>.Filter.Eq("snapshot_id", snapshot.SnapshotId),
|
||||
Builders<BsonDocument>.Filter.Eq("node_id", score.NodeId));
|
||||
|
||||
var document = new BsonDocument
|
||||
{
|
||||
{ "tenant", snapshot.Tenant },
|
||||
{ "snapshot_id", snapshot.SnapshotId },
|
||||
{ "node_id", score.NodeId },
|
||||
{ "kind", score.Kind },
|
||||
{ "degree", score.Degree },
|
||||
{ "betweenness", score.Betweenness },
|
||||
{ "generated_at", snapshot.GeneratedAt.UtcDateTime }
|
||||
};
|
||||
|
||||
models.Add(new ReplaceOneModel<BsonDocument>(filter, document) { IsUpsert = true });
|
||||
}
|
||||
|
||||
await _centrality.BulkWriteAsync(models, new BulkWriteOptions { IsOrdered = false }, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private async Task WriteClustersToNodesAsync(IEnumerable<ClusterAssignment> assignments, CancellationToken cancellationToken)
|
||||
{
|
||||
var models = new List<WriteModel<BsonDocument>>();
|
||||
foreach (var assignment in assignments)
|
||||
{
|
||||
var filter = Builders<BsonDocument>.Filter.Eq("id", assignment.NodeId);
|
||||
var update = Builders<BsonDocument>.Update.Set("attributes.cluster_id", assignment.ClusterId);
|
||||
models.Add(new UpdateOneModel<BsonDocument>(filter, update) { IsUpsert = false });
|
||||
}
|
||||
|
||||
if (models.Count == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
await _nodes.BulkWriteAsync(models, new BulkWriteOptions { IsOrdered = false }, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
using System.Collections.Immutable;
|
||||
using System.Text.Json.Nodes;
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.Graph.Indexer.Infrastructure;
|
||||
|
||||
namespace StellaOps.Graph.Indexer.Analytics;
|
||||
|
||||
public sealed class MongoGraphSnapshotProvider : IGraphSnapshotProvider
|
||||
{
|
||||
private readonly IMongoCollection<BsonDocument> _snapshots;
|
||||
private readonly IMongoCollection<BsonDocument> _progress;
|
||||
private readonly MongoGraphSnapshotProviderOptions _options;
|
||||
|
||||
public MongoGraphSnapshotProvider(IMongoDatabase database, MongoGraphSnapshotProviderOptions? options = null)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(database);
|
||||
_options = options ?? new MongoGraphSnapshotProviderOptions();
|
||||
_snapshots = database.GetCollection<BsonDocument>(_options.SnapshotCollectionName);
|
||||
_progress = database.GetCollection<BsonDocument>(_options.ProgressCollectionName);
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<GraphAnalyticsSnapshot>> GetPendingSnapshotsAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
var processedIds = await _progress
|
||||
.Find(FilterDefinition<BsonDocument>.Empty)
|
||||
.Project(doc => doc["snapshot_id"].AsString)
|
||||
.ToListAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
var filter = Builders<BsonDocument>.Filter.Nin("snapshot_id", processedIds);
|
||||
var snapshots = await _snapshots
|
||||
.Find(filter)
|
||||
.Limit(_options.MaxBatch)
|
||||
.Sort(Builders<BsonDocument>.Sort.Descending("generated_at"))
|
||||
.ToListAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
var result = new List<GraphAnalyticsSnapshot>(snapshots.Count);
|
||||
foreach (var snapshot in snapshots)
|
||||
{
|
||||
var tenant = snapshot.GetValue("tenant", string.Empty).AsString;
|
||||
var snapshotId = snapshot.GetValue("snapshot_id", string.Empty).AsString;
|
||||
var generatedAt = snapshot.TryGetValue("generated_at", out var generated)
|
||||
&& generated.BsonType == BsonType.DateTime
|
||||
? DateTime.SpecifyKind(generated.ToUniversalTime(), DateTimeKind.Utc)
|
||||
: DateTimeOffset.UtcNow;
|
||||
|
||||
var nodes = snapshot.TryGetValue("nodes", out var nodesValue) && nodesValue is BsonArray nodesArray
|
||||
? BsonJsonConverter.ToJsonArray(nodesArray).Select(n => (JsonObject)n!).ToImmutableArray()
|
||||
: ImmutableArray<JsonObject>.Empty;
|
||||
|
||||
var edges = snapshot.TryGetValue("edges", out var edgesValue) && edgesValue is BsonArray edgesArray
|
||||
? BsonJsonConverter.ToJsonArray(edgesArray).Select(n => (JsonObject)n!).ToImmutableArray()
|
||||
: ImmutableArray<JsonObject>.Empty;
|
||||
|
||||
result.Add(new GraphAnalyticsSnapshot(tenant, snapshotId, generatedAt, nodes, edges));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
public async Task MarkProcessedAsync(string tenant, string snapshotId, CancellationToken cancellationToken)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
var filter = Builders<BsonDocument>.Filter.Eq("snapshot_id", snapshotId)
|
||||
& Builders<BsonDocument>.Filter.Eq("tenant", tenant);
|
||||
|
||||
var update = Builders<BsonDocument>.Update.Set("snapshot_id", snapshotId)
|
||||
.Set("tenant", tenant)
|
||||
.SetOnInsert("processed_at", DateTimeOffset.UtcNow.UtcDateTime);
|
||||
|
||||
await _progress.UpdateOneAsync(filter, update, new UpdateOptions { IsUpsert = true }, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
namespace StellaOps.Graph.Indexer.Analytics;
|
||||
|
||||
public sealed class MongoGraphSnapshotProviderOptions
|
||||
{
|
||||
public string SnapshotCollectionName { get; set; } = "graph_snapshots";
|
||||
public string ProgressCollectionName { get; set; } = "graph_analytics_progress";
|
||||
public int MaxBatch { get; set; } = 5;
|
||||
}
|
||||
@@ -1,8 +1,7 @@
|
||||
using System;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using Microsoft.Extensions.DependencyInjection.Extensions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.Graph.Indexer.Ingestion.Sbom;
|
||||
|
||||
namespace StellaOps.Graph.Indexer.Incremental;
|
||||
|
||||
@@ -23,58 +22,12 @@ public static class GraphChangeStreamServiceCollectionExtensions
|
||||
services.Configure<GraphChangeStreamOptions>(_ => { });
|
||||
}
|
||||
|
||||
services.TryAddSingleton<IGraphChangeEventSource, NoOpGraphChangeEventSource>();
|
||||
services.TryAddSingleton<IGraphBackfillSource, NoOpGraphChangeEventSource>();
|
||||
services.TryAddSingleton<IIdempotencyStore, InMemoryIdempotencyStore>();
|
||||
services.TryAddSingleton<IGraphDocumentWriter, InMemoryGraphDocumentWriter>();
|
||||
services.AddSingleton<GraphBackfillMetrics>();
|
||||
services.AddHostedService<GraphChangeStreamProcessor>();
|
||||
return services;
|
||||
}
|
||||
|
||||
public static IServiceCollection AddGraphChangeStreamProcessorWithMongo(
|
||||
this IServiceCollection services,
|
||||
Action<GraphChangeStreamOptions>? configureOptions = null,
|
||||
Action<MongoGraphChangeEventOptions>? configureChangeOptions = null,
|
||||
Action<MongoIdempotencyStoreOptions>? configureIdempotency = null)
|
||||
{
|
||||
services.AddGraphChangeStreamProcessor(configureOptions);
|
||||
|
||||
if (configureChangeOptions is not null)
|
||||
{
|
||||
services.Configure(configureChangeOptions);
|
||||
}
|
||||
else
|
||||
{
|
||||
services.Configure<MongoGraphChangeEventOptions>(_ => { });
|
||||
}
|
||||
|
||||
if (configureIdempotency is not null)
|
||||
{
|
||||
services.Configure(configureIdempotency);
|
||||
}
|
||||
else
|
||||
{
|
||||
services.Configure<MongoIdempotencyStoreOptions>(_ => { });
|
||||
}
|
||||
|
||||
services.Replace(ServiceDescriptor.Singleton<IGraphChangeEventSource>(sp =>
|
||||
{
|
||||
var db = sp.GetRequiredService<IMongoDatabase>();
|
||||
var opts = sp.GetRequiredService<IOptions<MongoGraphChangeEventOptions>>();
|
||||
return new MongoGraphChangeEventSource(db, opts.Value);
|
||||
}));
|
||||
|
||||
services.Replace(ServiceDescriptor.Singleton<IGraphBackfillSource>(sp =>
|
||||
{
|
||||
var db = sp.GetRequiredService<IMongoDatabase>();
|
||||
var opts = sp.GetRequiredService<IOptions<MongoGraphChangeEventOptions>>();
|
||||
return new MongoGraphChangeEventSource(db, opts.Value);
|
||||
}));
|
||||
|
||||
services.Replace(ServiceDescriptor.Singleton<IIdempotencyStore>(sp =>
|
||||
{
|
||||
var db = sp.GetRequiredService<IMongoDatabase>();
|
||||
var opts = sp.GetRequiredService<IOptions<MongoIdempotencyStoreOptions>>();
|
||||
return new MongoIdempotencyStore(db, opts.Value);
|
||||
}));
|
||||
|
||||
return services;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
namespace StellaOps.Graph.Indexer.Incremental;
|
||||
|
||||
public sealed class MongoGraphChangeEventOptions
|
||||
{
|
||||
public string CollectionName { get; set; } = "graph_change_events";
|
||||
public string SequenceFieldName { get; set; } = "sequence_token";
|
||||
public string NodeArrayFieldName { get; set; } = "nodes";
|
||||
public string EdgeArrayFieldName { get; set; } = "edges";
|
||||
public int MaxBatchSize { get; set; } = 256;
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
using System.Collections.Immutable;
|
||||
using System.Text.Json.Nodes;
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.Graph.Indexer.Infrastructure;
|
||||
|
||||
namespace StellaOps.Graph.Indexer.Incremental;
|
||||
|
||||
public sealed class MongoGraphChangeEventSource : IGraphChangeEventSource, IGraphBackfillSource
|
||||
{
|
||||
private readonly IMongoCollection<BsonDocument> _collection;
|
||||
private readonly MongoGraphChangeEventOptions _options;
|
||||
|
||||
public MongoGraphChangeEventSource(IMongoDatabase database, MongoGraphChangeEventOptions? options = null)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(database);
|
||||
_options = options ?? new MongoGraphChangeEventOptions();
|
||||
_collection = database.GetCollection<BsonDocument>(_options.CollectionName);
|
||||
}
|
||||
|
||||
public async IAsyncEnumerable<GraphChangeEvent> ReadAsync([System.Runtime.CompilerServices.EnumeratorCancellation] CancellationToken cancellationToken)
|
||||
{
|
||||
var filter = Builders<BsonDocument>.Filter.Eq("is_backfill", false);
|
||||
await foreach (var change in EnumerateAsync(filter, cancellationToken))
|
||||
{
|
||||
yield return change with { IsBackfill = false };
|
||||
}
|
||||
}
|
||||
|
||||
public async IAsyncEnumerable<GraphChangeEvent> ReadBackfillAsync([System.Runtime.CompilerServices.EnumeratorCancellation] CancellationToken cancellationToken)
|
||||
{
|
||||
var filter = Builders<BsonDocument>.Filter.Eq("is_backfill", true);
|
||||
await foreach (var change in EnumerateAsync(filter, cancellationToken))
|
||||
{
|
||||
yield return change with { IsBackfill = true };
|
||||
}
|
||||
}
|
||||
|
||||
private async IAsyncEnumerable<GraphChangeEvent> EnumerateAsync(FilterDefinition<BsonDocument> filter, [System.Runtime.CompilerServices.EnumeratorCancellation] CancellationToken cancellationToken)
|
||||
{
|
||||
var sort = Builders<BsonDocument>.Sort.Ascending(_options.SequenceFieldName);
|
||||
using var cursor = await _collection.FindAsync(filter, new FindOptions<BsonDocument> { Sort = sort, BatchSize = _options.MaxBatchSize }, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
while (await cursor.MoveNextAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
foreach (var doc in cursor.Current)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
var tenant = doc.GetValue("tenant", string.Empty).AsString;
|
||||
var snapshotId = doc.GetValue("snapshot_id", string.Empty).AsString;
|
||||
var sequence = doc.GetValue(_options.SequenceFieldName, string.Empty).AsString;
|
||||
|
||||
var nodes = doc.TryGetValue(_options.NodeArrayFieldName, out var nodesValue) && nodesValue is BsonArray nodeArray
|
||||
? BsonJsonConverter.ToJsonArray(nodeArray).Select(n => (JsonObject)n!).ToImmutableArray()
|
||||
: ImmutableArray<JsonObject>.Empty;
|
||||
|
||||
var edges = doc.TryGetValue(_options.EdgeArrayFieldName, out var edgesValue) && edgesValue is BsonArray edgeArray
|
||||
? BsonJsonConverter.ToJsonArray(edgeArray).Select(n => (JsonObject)n!).ToImmutableArray()
|
||||
: ImmutableArray<JsonObject>.Empty;
|
||||
|
||||
yield return new GraphChangeEvent(
|
||||
tenant,
|
||||
snapshotId,
|
||||
sequence,
|
||||
nodes,
|
||||
edges,
|
||||
doc.GetValue("is_backfill", false).ToBoolean());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
|
||||
namespace StellaOps.Graph.Indexer.Incremental;
|
||||
|
||||
public sealed class MongoIdempotencyStore : IIdempotencyStore
|
||||
{
|
||||
private readonly IMongoCollection<BsonDocument> _collection;
|
||||
|
||||
public MongoIdempotencyStore(IMongoDatabase database, MongoIdempotencyStoreOptions? options = null)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(database);
|
||||
var resolved = options ?? new MongoIdempotencyStoreOptions();
|
||||
_collection = database.GetCollection<BsonDocument>(resolved.CollectionName);
|
||||
}
|
||||
|
||||
public async Task<bool> HasSeenAsync(string sequenceToken, CancellationToken cancellationToken)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
var filter = Builders<BsonDocument>.Filter.Eq("sequence_token", sequenceToken);
|
||||
return await _collection.Find(filter).AnyAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task MarkSeenAsync(string sequenceToken, CancellationToken cancellationToken)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
var filter = Builders<BsonDocument>.Filter.Eq("sequence_token", sequenceToken);
|
||||
var update = Builders<BsonDocument>.Update.Set("sequence_token", sequenceToken)
|
||||
.SetOnInsert("recorded_at", DateTimeOffset.UtcNow.UtcDateTime);
|
||||
|
||||
await _collection.UpdateOneAsync(filter, update, new UpdateOptions { IsUpsert = true }, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
namespace StellaOps.Graph.Indexer.Incremental;
|
||||
|
||||
public sealed class MongoIdempotencyStoreOptions
|
||||
{
|
||||
public string CollectionName { get; set; } = "graph_change_idempotency";
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
using System.Collections.Immutable;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Text.Json.Nodes;
|
||||
|
||||
namespace StellaOps.Graph.Indexer.Incremental;
|
||||
|
||||
/// <summary>
|
||||
/// No-op change/backfill source used when no external change feed is configured.
|
||||
/// </summary>
|
||||
public sealed class NoOpGraphChangeEventSource : IGraphChangeEventSource, IGraphBackfillSource
|
||||
{
|
||||
public IAsyncEnumerable<GraphChangeEvent> ReadAsync(CancellationToken cancellationToken) =>
|
||||
ReadInternalAsync(cancellationToken);
|
||||
|
||||
public IAsyncEnumerable<GraphChangeEvent> ReadBackfillAsync(CancellationToken cancellationToken) =>
|
||||
ReadInternalAsync(cancellationToken);
|
||||
|
||||
private static async IAsyncEnumerable<GraphChangeEvent> ReadInternalAsync([EnumeratorCancellation] CancellationToken cancellationToken)
|
||||
{
|
||||
await Task.CompletedTask.ConfigureAwait(false);
|
||||
yield break;
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
using System.Text.Json.Nodes;
|
||||
using MongoDB.Bson;
|
||||
|
||||
namespace StellaOps.Graph.Indexer.Infrastructure;
|
||||
|
||||
internal static class BsonJsonConverter
|
||||
{
|
||||
public static JsonObject ToJsonObject(BsonDocument document)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(document);
|
||||
var parsed = JsonNode.Parse(document.ToJson());
|
||||
return parsed as JsonObject ?? new JsonObject();
|
||||
}
|
||||
|
||||
public static JsonArray ToJsonArray(BsonArray array)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(array);
|
||||
var parsed = JsonNode.Parse(array.ToJson());
|
||||
return parsed as JsonArray ?? new JsonArray();
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
namespace StellaOps.Graph.Indexer.Infrastructure;
|
||||
|
||||
public sealed class MongoDatabaseOptions
|
||||
{
|
||||
public string ConnectionString { get; set; } = string.Empty;
|
||||
public string DatabaseName { get; set; } = "stellaops-graph";
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
using System;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using Microsoft.Extensions.Options;
|
||||
using MongoDB.Driver;
|
||||
|
||||
namespace StellaOps.Graph.Indexer.Infrastructure;
|
||||
|
||||
public static class MongoServiceCollectionExtensions
|
||||
{
|
||||
public static IServiceCollection AddGraphMongoDatabase(
|
||||
this IServiceCollection services,
|
||||
Action<MongoDatabaseOptions> configure)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(services);
|
||||
ArgumentNullException.ThrowIfNull(configure);
|
||||
|
||||
services.Configure(configure);
|
||||
|
||||
services.AddSingleton<IMongoClient>(sp =>
|
||||
{
|
||||
var opts = sp.GetRequiredService<IOptions<MongoDatabaseOptions>>().Value;
|
||||
Validate(opts);
|
||||
return new MongoClient(opts.ConnectionString);
|
||||
});
|
||||
|
||||
services.AddSingleton<IMongoDatabase>(sp =>
|
||||
{
|
||||
var opts = sp.GetRequiredService<IOptions<MongoDatabaseOptions>>().Value;
|
||||
Validate(opts);
|
||||
return sp.GetRequiredService<IMongoClient>().GetDatabase(opts.DatabaseName);
|
||||
});
|
||||
|
||||
return services;
|
||||
}
|
||||
|
||||
private static void Validate(MongoDatabaseOptions options)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(options.ConnectionString))
|
||||
{
|
||||
throw new InvalidOperationException("Mongo connection string must be provided.");
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(options.DatabaseName))
|
||||
{
|
||||
throw new InvalidOperationException("Mongo database name must be provided.");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Text.Encodings.Web;
|
||||
using System.Text.Json;
|
||||
using System.Text.Json.Nodes;
|
||||
using StellaOps.Graph.Indexer.Ingestion.Sbom;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
|
||||
namespace StellaOps.Graph.Indexer.Ingestion.Advisory;
|
||||
|
||||
public sealed class MongoGraphDocumentWriter : IGraphDocumentWriter
|
||||
{
|
||||
private static readonly JsonSerializerOptions SerializerOptions = new()
|
||||
{
|
||||
Encoder = JavaScriptEncoder.UnsafeRelaxedJsonEscaping,
|
||||
WriteIndented = false
|
||||
};
|
||||
|
||||
private readonly IMongoCollection<BsonDocument> _nodes;
|
||||
private readonly IMongoCollection<BsonDocument> _edges;
|
||||
|
||||
public MongoGraphDocumentWriter(IMongoDatabase database, MongoGraphDocumentWriterOptions? options = null)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(database);
|
||||
|
||||
var resolved = options ?? new MongoGraphDocumentWriterOptions();
|
||||
_nodes = database.GetCollection<BsonDocument>(resolved.NodeCollectionName);
|
||||
_edges = database.GetCollection<BsonDocument>(resolved.EdgeCollectionName);
|
||||
}
|
||||
|
||||
public async Task WriteAsync(GraphBuildBatch batch, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(batch);
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
if (batch.Nodes.Length > 0)
|
||||
{
|
||||
var nodeModels = CreateReplaceModels(_nodes, batch.Nodes);
|
||||
if (nodeModels.Count > 0)
|
||||
{
|
||||
await _nodes.BulkWriteAsync(nodeModels, new BulkWriteOptions { IsOrdered = false }, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
|
||||
if (batch.Edges.Length > 0)
|
||||
{
|
||||
var edgeModels = CreateReplaceModels(_edges, batch.Edges);
|
||||
if (edgeModels.Count > 0)
|
||||
{
|
||||
await _edges.BulkWriteAsync(edgeModels, new BulkWriteOptions { IsOrdered = false }, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static List<WriteModel<BsonDocument>> CreateReplaceModels(IMongoCollection<BsonDocument> collection, IReadOnlyList<JsonObject> documents)
|
||||
{
|
||||
var models = new List<WriteModel<BsonDocument>>(documents.Count);
|
||||
foreach (var document in documents)
|
||||
{
|
||||
if (!document.TryGetPropertyValue("id", out var idNode) || idNode is null)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
var id = idNode.GetValue<string>();
|
||||
var filter = Builders<BsonDocument>.Filter.Eq("id", id);
|
||||
var bsonDocument = ToBsonDocument(document);
|
||||
models.Add(new ReplaceOneModel<BsonDocument>(filter, bsonDocument) { IsUpsert = true });
|
||||
}
|
||||
|
||||
return models;
|
||||
}
|
||||
|
||||
private static BsonDocument ToBsonDocument(JsonObject json)
|
||||
{
|
||||
var jsonString = json.ToJsonString(SerializerOptions);
|
||||
return BsonDocument.Parse(jsonString);
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
namespace StellaOps.Graph.Indexer.Ingestion.Advisory;
|
||||
|
||||
public sealed class MongoGraphDocumentWriterOptions
|
||||
{
|
||||
public string NodeCollectionName { get; init; } = "graph_nodes";
|
||||
public string EdgeCollectionName { get; init; } = "graph_edges";
|
||||
}
|
||||
@@ -11,6 +11,7 @@ public static class InspectorIngestServiceCollectionExtensions
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(services);
|
||||
|
||||
services.TryAddSingleton<Ingestion.Sbom.IGraphDocumentWriter, Ingestion.Sbom.InMemoryGraphDocumentWriter>();
|
||||
services.TryAddSingleton<GraphInspectorTransformer>();
|
||||
services.TryAddSingleton<GraphInspectorProcessor>(provider =>
|
||||
{
|
||||
|
||||
@@ -0,0 +1,30 @@
|
||||
using System.Collections.Concurrent;
|
||||
using System.Collections.Immutable;
|
||||
using System.Text.Json.Nodes;
|
||||
|
||||
namespace StellaOps.Graph.Indexer.Ingestion.Sbom;
|
||||
|
||||
/// <summary>
|
||||
/// In-memory graph document writer used as a Mongo-free fallback.
|
||||
/// </summary>
|
||||
public sealed class InMemoryGraphDocumentWriter : IGraphDocumentWriter
|
||||
{
|
||||
private readonly ConcurrentBag<GraphBuildBatch> _batches = new();
|
||||
|
||||
public IReadOnlyCollection<GraphBuildBatch> Batches => _batches.ToArray();
|
||||
|
||||
public Task WriteAsync(GraphBuildBatch batch, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(batch);
|
||||
_batches.Add(CloneBatch(batch));
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
private static GraphBuildBatch CloneBatch(GraphBuildBatch source)
|
||||
{
|
||||
static JsonObject CloneNode(JsonObject node) => (JsonObject)node.DeepClone();
|
||||
return new GraphBuildBatch(
|
||||
ImmutableArray.CreateRange(source.Nodes.Select(CloneNode)),
|
||||
ImmutableArray.CreateRange(source.Edges.Select(CloneNode)));
|
||||
}
|
||||
}
|
||||
@@ -22,6 +22,7 @@ public static class SbomIngestServiceCollectionExtensions
|
||||
services.Configure(configure);
|
||||
}
|
||||
|
||||
services.TryAddSingleton<IGraphDocumentWriter, InMemoryGraphDocumentWriter>();
|
||||
services.TryAddSingleton<SbomIngestTransformer>();
|
||||
services.TryAddSingleton<ISbomIngestMetrics, SbomIngestMetrics>();
|
||||
|
||||
|
||||
@@ -13,8 +13,6 @@
|
||||
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.Hosting.Abstractions" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.Options" Version="10.0.0" />
|
||||
<PackageReference Include="MongoDB.Driver" Version="3.5.0" />
|
||||
<PackageReference Include="MongoDB.Bson" Version="3.5.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.Options.ConfigurationExtensions" Version="10.0.0" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
|
||||
@@ -1,125 +0,0 @@
|
||||
using System.Collections.Immutable;
|
||||
using System.Text.Json.Nodes;
|
||||
using Mongo2Go;
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.Graph.Indexer.Analytics;
|
||||
using StellaOps.Graph.Indexer.Incremental;
|
||||
|
||||
namespace StellaOps.Graph.Indexer.Tests;
|
||||
|
||||
public sealed class MongoProviderIntegrationTests : IAsyncLifetime
|
||||
{
|
||||
private readonly MongoDbRunner _runner;
|
||||
private IMongoDatabase _database = default!;
|
||||
|
||||
public MongoProviderIntegrationTests()
|
||||
{
|
||||
_runner = MongoDbRunner.Start(singleNodeReplSet: true);
|
||||
}
|
||||
|
||||
public Task InitializeAsync()
|
||||
{
|
||||
var client = new MongoClient(_runner.ConnectionString);
|
||||
_database = client.GetDatabase("graph-indexer-tests");
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public Task DisposeAsync()
|
||||
{
|
||||
_runner.Dispose();
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task SnapshotProvider_ReadsPendingSnapshots()
|
||||
{
|
||||
var snapshots = _database.GetCollection<BsonDocument>("graph_snapshots");
|
||||
var nodes = new BsonArray
|
||||
{
|
||||
new BsonDocument
|
||||
{
|
||||
{ "id", "gn:tenant-a:component:1" },
|
||||
{ "kind", "component" },
|
||||
{ "attributes", new BsonDocument { { "purl", "pkg:npm/a@1.0.0" } } }
|
||||
}
|
||||
};
|
||||
|
||||
var edges = new BsonArray();
|
||||
|
||||
await snapshots.InsertOneAsync(new BsonDocument
|
||||
{
|
||||
{ "tenant", "tenant-a" },
|
||||
{ "snapshot_id", "snap-1" },
|
||||
{ "generated_at", DateTime.UtcNow },
|
||||
{ "nodes", nodes },
|
||||
{ "edges", edges }
|
||||
});
|
||||
|
||||
var provider = new MongoGraphSnapshotProvider(_database);
|
||||
var pending = await provider.GetPendingSnapshotsAsync(CancellationToken.None);
|
||||
|
||||
Assert.Single(pending);
|
||||
Assert.Equal("snap-1", pending[0].SnapshotId);
|
||||
Assert.Single(pending[0].Nodes);
|
||||
|
||||
await provider.MarkProcessedAsync("tenant-a", "snap-1", CancellationToken.None);
|
||||
var none = await provider.GetPendingSnapshotsAsync(CancellationToken.None);
|
||||
Assert.Empty(none);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ChangeEventSource_EnumeratesAndHonorsIdempotency()
|
||||
{
|
||||
var changes = _database.GetCollection<BsonDocument>("graph_change_events");
|
||||
await changes.InsertManyAsync(new[]
|
||||
{
|
||||
new BsonDocument
|
||||
{
|
||||
{ "tenant", "tenant-a" },
|
||||
{ "snapshot_id", "snap-1" },
|
||||
{ "sequence_token", "seq-1" },
|
||||
{ "is_backfill", false },
|
||||
{ "nodes", new BsonArray { new BsonDocument { { "id", "gn:1" }, { "kind", "component" } } } },
|
||||
{ "edges", new BsonArray() }
|
||||
},
|
||||
new BsonDocument
|
||||
{
|
||||
{ "tenant", "tenant-a" },
|
||||
{ "snapshot_id", "snap-1" },
|
||||
{ "sequence_token", "seq-2" },
|
||||
{ "is_backfill", false },
|
||||
{ "nodes", new BsonArray { new BsonDocument { { "id", "gn:2" }, { "kind", "component" } } } },
|
||||
{ "edges", new BsonArray() }
|
||||
}
|
||||
});
|
||||
|
||||
var source = new MongoGraphChangeEventSource(_database);
|
||||
var idempotency = new MongoIdempotencyStore(_database);
|
||||
|
||||
var events = new List<GraphChangeEvent>();
|
||||
await foreach (var change in source.ReadAsync(CancellationToken.None))
|
||||
{
|
||||
if (await idempotency.HasSeenAsync(change.SequenceToken, CancellationToken.None))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
events.Add(change);
|
||||
await idempotency.MarkSeenAsync(change.SequenceToken, CancellationToken.None);
|
||||
}
|
||||
|
||||
Assert.Equal(2, events.Count);
|
||||
|
||||
var secondPass = new List<GraphChangeEvent>();
|
||||
await foreach (var change in source.ReadAsync(CancellationToken.None))
|
||||
{
|
||||
if (!await idempotency.HasSeenAsync(change.SequenceToken, CancellationToken.None))
|
||||
{
|
||||
secondPass.Add(change);
|
||||
}
|
||||
}
|
||||
|
||||
Assert.Empty(secondPass);
|
||||
}
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using StellaOps.Graph.Indexer.Infrastructure;
|
||||
using Mongo2Go;
|
||||
using MongoDB.Driver;
|
||||
|
||||
namespace StellaOps.Graph.Indexer.Tests;
|
||||
|
||||
public sealed class MongoServiceCollectionExtensionsTests : IAsyncLifetime
|
||||
{
|
||||
private MongoDbRunner _runner = default!;
|
||||
|
||||
public Task InitializeAsync()
|
||||
{
|
||||
_runner = MongoDbRunner.Start(singleNodeReplSet: true);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public Task DisposeAsync()
|
||||
{
|
||||
_runner.Dispose();
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void AddGraphMongoDatabase_RegistersClientAndDatabase()
|
||||
{
|
||||
var services = new ServiceCollection();
|
||||
|
||||
services.AddGraphMongoDatabase(options =>
|
||||
{
|
||||
options.ConnectionString = _runner.ConnectionString;
|
||||
options.DatabaseName = "graph-indexer-ext-tests";
|
||||
});
|
||||
|
||||
var provider = services.BuildServiceProvider();
|
||||
|
||||
var client = provider.GetService<IMongoClient>();
|
||||
var database = provider.GetService<IMongoDatabase>();
|
||||
|
||||
Assert.NotNull(client);
|
||||
Assert.NotNull(database);
|
||||
Assert.Equal("graph-indexer-ext-tests", database!.DatabaseNamespace.DatabaseName);
|
||||
}
|
||||
}
|
||||
@@ -12,6 +12,5 @@
|
||||
<PackageReference Include="xunit" Version="2.9.2" />
|
||||
<PackageReference Include="xunit.runner.visualstudio" Version="2.8.2" />
|
||||
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.11.1" />
|
||||
<PackageReference Include="Mongo2Go" Version="3.1.3" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
|
||||
@@ -27,4 +27,4 @@ Host signed Task Pack bundles with provenance and RBAC for Epic 12. Ensure pac
|
||||
- 4. Coordinate doc updates, tests, and cross-guild communication whenever contracts or workflows change.
|
||||
- 5. Revert to `TODO` if you pause the task without shipping changes; leave notes in commit/PR descriptions for context.
|
||||
- 6. Registry API expectations: require `X-API-Key` when configured and tenant scoping via `X-StellaOps-Tenant` (or `tenantId` on upload). Content/provenance downloads must emit digest headers (`X-Content-Digest`, `X-Provenance-Digest`) and respect tenant allowlists.
|
||||
- 7. Lifecycle/parity/signature rotation endpoints require tenant headers; offline seed export supports per-tenant filtering and deterministic zip output. All mutating calls emit audit log entries (file `audit.ndjson` or Mongo `packs_audit_log`).
|
||||
- 7. Lifecycle/parity/signature rotation endpoints require tenant headers; offline seed export supports per-tenant filtering and deterministic zip output. All mutating calls emit audit log entries (file `audit.ndjson` or file-backed audit logs).
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.PacksRegistry.Core.Contracts;
|
||||
using StellaOps.PacksRegistry.Core.Models;
|
||||
using StellaOps.PacksRegistry.Infrastructure.Options;
|
||||
|
||||
namespace StellaOps.PacksRegistry.Infrastructure.Mongo;
|
||||
|
||||
public sealed class MongoAttestationRepository : IAttestationRepository
|
||||
{
|
||||
private readonly IMongoCollection<AttestationDocument> _index;
|
||||
private readonly IMongoCollection<AttestationBlob> _blobs;
|
||||
|
||||
public MongoAttestationRepository(IMongoDatabase database, MongoOptions options)
|
||||
{
|
||||
_index = database.GetCollection<AttestationDocument>(options.AttestationCollection ?? "packs_attestations");
|
||||
_blobs = database.GetCollection<AttestationBlob>(options.AttestationBlobsCollection ?? "packs_attestation_blobs");
|
||||
_index.Indexes.CreateOne(new CreateIndexModel<AttestationDocument>(Builders<AttestationDocument>.IndexKeys.Ascending(x => x.PackId).Ascending(x => x.Type), new CreateIndexOptions { Unique = true }));
|
||||
_blobs.Indexes.CreateOne(new CreateIndexModel<AttestationBlob>(Builders<AttestationBlob>.IndexKeys.Ascending(x => x.Digest), new CreateIndexOptions { Unique = true }));
|
||||
}
|
||||
|
||||
public async Task UpsertAsync(AttestationRecord record, byte[] content, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var doc = AttestationDocument.From(record);
|
||||
await _index.ReplaceOneAsync(x => x.PackId == record.PackId && x.Type == record.Type, doc, new ReplaceOptions { IsUpsert = true }, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
var blob = new AttestationBlob { Digest = record.Digest, Content = content };
|
||||
await _blobs.ReplaceOneAsync(x => x.Digest == blob.Digest, blob, new ReplaceOptions { IsUpsert = true }, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<AttestationRecord?> GetAsync(string packId, string type, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var doc = await _index.Find(x => x.PackId == packId && x.Type == type).FirstOrDefaultAsync(cancellationToken).ConfigureAwait(false);
|
||||
return doc?.ToModel();
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<AttestationRecord>> ListAsync(string packId, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var docs = await _index.Find(x => x.PackId == packId).SortBy(x => x.Type).ToListAsync(cancellationToken).ConfigureAwait(false);
|
||||
return docs.Select(d => d.ToModel()).ToList();
|
||||
}
|
||||
|
||||
public async Task<byte[]?> GetContentAsync(string packId, string type, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var record = await GetAsync(packId, type, cancellationToken).ConfigureAwait(false);
|
||||
if (record is null)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
var blob = await _blobs.Find(x => x.Digest == record.Digest).FirstOrDefaultAsync(cancellationToken).ConfigureAwait(false);
|
||||
return blob?.Content;
|
||||
}
|
||||
|
||||
private sealed class AttestationDocument
|
||||
{
|
||||
public ObjectId Id { get; set; }
|
||||
public string PackId { get; set; } = default!;
|
||||
public string TenantId { get; set; } = default!;
|
||||
public string Type { get; set; } = default!;
|
||||
public string Digest { get; set; } = default!;
|
||||
public DateTimeOffset CreatedAtUtc { get; set; }
|
||||
public string? Notes { get; set; }
|
||||
|
||||
public AttestationRecord ToModel() => new(PackId, TenantId, Type, Digest, CreatedAtUtc, Notes);
|
||||
public static AttestationDocument From(AttestationRecord record) => new()
|
||||
{
|
||||
PackId = record.PackId,
|
||||
TenantId = record.TenantId,
|
||||
Type = record.Type,
|
||||
Digest = record.Digest,
|
||||
CreatedAtUtc = record.CreatedAtUtc,
|
||||
Notes = record.Notes
|
||||
};
|
||||
}
|
||||
|
||||
private sealed class AttestationBlob
|
||||
{
|
||||
public ObjectId Id { get; set; }
|
||||
public string Digest { get; set; } = default!;
|
||||
public byte[] Content { get; set; } = default!;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.PacksRegistry.Core.Contracts;
|
||||
using StellaOps.PacksRegistry.Core.Models;
|
||||
using StellaOps.PacksRegistry.Infrastructure.Options;
|
||||
|
||||
namespace StellaOps.PacksRegistry.Infrastructure.Mongo;
|
||||
|
||||
public sealed class MongoAuditRepository : IAuditRepository
|
||||
{
|
||||
private readonly IMongoCollection<AuditDocument> _collection;
|
||||
|
||||
public MongoAuditRepository(IMongoDatabase database, MongoOptions options)
|
||||
{
|
||||
_collection = database.GetCollection<AuditDocument>(options.AuditCollection ?? "packs_audit_log");
|
||||
var indexKeys = Builders<AuditDocument>.IndexKeys
|
||||
.Ascending(x => x.TenantId)
|
||||
.Ascending(x => x.PackId)
|
||||
.Ascending(x => x.OccurredAtUtc);
|
||||
_collection.Indexes.CreateOne(new CreateIndexModel<AuditDocument>(indexKeys));
|
||||
}
|
||||
|
||||
public async Task AppendAsync(AuditRecord record, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var doc = AuditDocument.From(record);
|
||||
await _collection.InsertOneAsync(doc, cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<AuditRecord>> ListAsync(string? tenantId = null, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var filter = string.IsNullOrWhiteSpace(tenantId)
|
||||
? Builders<AuditDocument>.Filter.Empty
|
||||
: Builders<AuditDocument>.Filter.Eq(x => x.TenantId, tenantId);
|
||||
|
||||
var docs = await _collection.Find(filter)
|
||||
.SortBy(x => x.OccurredAtUtc)
|
||||
.ThenBy(x => x.PackId)
|
||||
.ToListAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
return docs.Select(d => d.ToModel()).ToList();
|
||||
}
|
||||
|
||||
private sealed class AuditDocument
|
||||
{
|
||||
public ObjectId Id { get; set; }
|
||||
public string? PackId { get; set; }
|
||||
public string TenantId { get; set; } = default!;
|
||||
public string Event { get; set; } = default!;
|
||||
public DateTimeOffset OccurredAtUtc { get; set; }
|
||||
public string? Actor { get; set; }
|
||||
public string? Notes { get; set; }
|
||||
|
||||
public AuditRecord ToModel() => new(PackId, TenantId, Event, OccurredAtUtc, Actor, Notes);
|
||||
|
||||
public static AuditDocument From(AuditRecord record) => new()
|
||||
{
|
||||
PackId = record.PackId,
|
||||
TenantId = record.TenantId,
|
||||
Event = record.Event,
|
||||
OccurredAtUtc = record.OccurredAtUtc,
|
||||
Actor = record.Actor,
|
||||
Notes = record.Notes
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.PacksRegistry.Core.Contracts;
|
||||
using StellaOps.PacksRegistry.Core.Models;
|
||||
using StellaOps.PacksRegistry.Infrastructure.Options;
|
||||
|
||||
namespace StellaOps.PacksRegistry.Infrastructure.Mongo;
|
||||
|
||||
public sealed class MongoLifecycleRepository : ILifecycleRepository
|
||||
{
|
||||
private readonly IMongoCollection<LifecycleDocument> _collection;
|
||||
|
||||
public MongoLifecycleRepository(IMongoDatabase database, MongoOptions options)
|
||||
{
|
||||
_collection = database.GetCollection<LifecycleDocument>(options.LifecycleCollection ?? "packs_lifecycle");
|
||||
_collection.Indexes.CreateOne(new CreateIndexModel<LifecycleDocument>(Builders<LifecycleDocument>.IndexKeys.Ascending(x => x.PackId), new CreateIndexOptions { Unique = true }));
|
||||
}
|
||||
|
||||
public async Task UpsertAsync(LifecycleRecord record, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var doc = LifecycleDocument.From(record);
|
||||
await _collection.ReplaceOneAsync(x => x.PackId == record.PackId, doc, new ReplaceOptions { IsUpsert = true }, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<LifecycleRecord?> GetAsync(string packId, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var doc = await _collection.Find(x => x.PackId == packId).FirstOrDefaultAsync(cancellationToken).ConfigureAwait(false);
|
||||
return doc?.ToModel();
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<LifecycleRecord>> ListAsync(string? tenantId = null, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var filter = string.IsNullOrWhiteSpace(tenantId)
|
||||
? Builders<LifecycleDocument>.Filter.Empty
|
||||
: Builders<LifecycleDocument>.Filter.Eq(x => x.TenantId, tenantId);
|
||||
|
||||
var docs = await _collection.Find(filter)
|
||||
.SortBy(x => x.PackId)
|
||||
.ToListAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
return docs.Select(d => d.ToModel()).ToList();
|
||||
}
|
||||
|
||||
private sealed class LifecycleDocument
|
||||
{
|
||||
public ObjectId Id { get; set; }
|
||||
public string PackId { get; set; } = default!;
|
||||
public string TenantId { get; set; } = default!;
|
||||
public string State { get; set; } = default!;
|
||||
public string? Notes { get; set; }
|
||||
public DateTimeOffset UpdatedAtUtc { get; set; }
|
||||
|
||||
public LifecycleRecord ToModel() => new(PackId, TenantId, State, Notes, UpdatedAtUtc);
|
||||
public static LifecycleDocument From(LifecycleRecord record) => new()
|
||||
{
|
||||
PackId = record.PackId,
|
||||
TenantId = record.TenantId,
|
||||
State = record.State,
|
||||
Notes = record.Notes,
|
||||
UpdatedAtUtc = record.UpdatedAtUtc
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.PacksRegistry.Core.Contracts;
|
||||
using StellaOps.PacksRegistry.Core.Models;
|
||||
using StellaOps.PacksRegistry.Infrastructure.Options;
|
||||
|
||||
namespace StellaOps.PacksRegistry.Infrastructure.Mongo;
|
||||
|
||||
public sealed class MongoMirrorRepository : IMirrorRepository
|
||||
{
|
||||
private readonly IMongoCollection<MirrorDocument> _collection;
|
||||
|
||||
public MongoMirrorRepository(IMongoDatabase database, MongoOptions options)
|
||||
{
|
||||
_collection = database.GetCollection<MirrorDocument>(options.MirrorCollection ?? "packs_mirrors");
|
||||
_collection.Indexes.CreateOne(new CreateIndexModel<MirrorDocument>(Builders<MirrorDocument>.IndexKeys.Ascending(x => x.Id), new CreateIndexOptions { Unique = true }));
|
||||
}
|
||||
|
||||
public async Task UpsertAsync(MirrorSourceRecord record, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var doc = MirrorDocument.From(record);
|
||||
await _collection.ReplaceOneAsync(x => x.Id == record.Id, doc, new ReplaceOptions { IsUpsert = true }, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<MirrorSourceRecord>> ListAsync(string? tenantId = null, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var filter = string.IsNullOrWhiteSpace(tenantId)
|
||||
? Builders<MirrorDocument>.Filter.Empty
|
||||
: Builders<MirrorDocument>.Filter.Eq(x => x.TenantId, tenantId);
|
||||
|
||||
var docs = await _collection.Find(filter).SortBy(x => x.Id).ToListAsync(cancellationToken).ConfigureAwait(false);
|
||||
return docs.Select(d => d.ToModel()).ToList();
|
||||
}
|
||||
|
||||
public async Task<MirrorSourceRecord?> GetAsync(string id, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var doc = await _collection.Find(x => x.Id == id).FirstOrDefaultAsync(cancellationToken).ConfigureAwait(false);
|
||||
return doc?.ToModel();
|
||||
}
|
||||
|
||||
private sealed class MirrorDocument
|
||||
{
|
||||
public ObjectId InternalId { get; set; }
|
||||
public string Id { get; set; } = default!;
|
||||
public string TenantId { get; set; } = default!;
|
||||
public string Upstream { get; set; } = default!;
|
||||
public bool Enabled { get; set; }
|
||||
public string Status { get; set; } = default!;
|
||||
public DateTimeOffset UpdatedAtUtc { get; set; }
|
||||
public string? Notes { get; set; }
|
||||
public DateTimeOffset? LastSuccessfulSyncUtc { get; set; }
|
||||
|
||||
public MirrorSourceRecord ToModel() => new(Id, TenantId, new Uri(Upstream), Enabled, Status, UpdatedAtUtc, Notes, LastSuccessfulSyncUtc);
|
||||
public static MirrorDocument From(MirrorSourceRecord record) => new()
|
||||
{
|
||||
Id = record.Id,
|
||||
TenantId = record.TenantId,
|
||||
Upstream = record.UpstreamUri.ToString(),
|
||||
Enabled = record.Enabled,
|
||||
Status = record.Status,
|
||||
UpdatedAtUtc = record.UpdatedAtUtc,
|
||||
Notes = record.Notes,
|
||||
LastSuccessfulSyncUtc = record.LastSuccessfulSyncUtc
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.PacksRegistry.Core.Contracts;
|
||||
using StellaOps.PacksRegistry.Core.Models;
|
||||
using StellaOps.PacksRegistry.Infrastructure.Options;
|
||||
|
||||
namespace StellaOps.PacksRegistry.Infrastructure.Mongo;
|
||||
|
||||
public sealed class MongoPackRepository : IPackRepository
|
||||
{
|
||||
private readonly IMongoCollection<PackDocument> _packs;
|
||||
private readonly IMongoCollection<PackContentDocument> _contents;
|
||||
|
||||
public MongoPackRepository(IMongoDatabase database, MongoOptions options)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(database);
|
||||
_packs = database.GetCollection<PackDocument>(options.PacksCollection);
|
||||
_contents = database.GetCollection<PackContentDocument>(options.BlobsCollection);
|
||||
|
||||
_packs.Indexes.CreateOne(new CreateIndexModel<PackDocument>(Builders<PackDocument>.IndexKeys.Ascending(x => x.PackId), new CreateIndexOptions { Unique = true }));
|
||||
_packs.Indexes.CreateOne(new CreateIndexModel<PackDocument>(Builders<PackDocument>.IndexKeys.Ascending(x => x.TenantId).Ascending(x => x.Name).Ascending(x => x.Version)));
|
||||
_contents.Indexes.CreateOne(new CreateIndexModel<PackContentDocument>(Builders<PackContentDocument>.IndexKeys.Ascending(x => x.Digest), new CreateIndexOptions { Unique = true }));
|
||||
}
|
||||
|
||||
public async Task UpsertAsync(PackRecord record, byte[] content, byte[]? provenance, CancellationToken cancellationToken = default)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(record);
|
||||
ArgumentNullException.ThrowIfNull(content);
|
||||
|
||||
var packDoc = PackDocument.From(record);
|
||||
await _packs.ReplaceOneAsync(x => x.PackId == record.PackId, packDoc, new ReplaceOptions { IsUpsert = true }, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
var blob = new PackContentDocument
|
||||
{
|
||||
Digest = record.Digest,
|
||||
Content = content,
|
||||
ProvenanceDigest = record.ProvenanceDigest,
|
||||
Provenance = provenance
|
||||
};
|
||||
|
||||
await _contents.ReplaceOneAsync(x => x.Digest == record.Digest, blob, new ReplaceOptions { IsUpsert = true }, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<PackRecord?> GetAsync(string packId, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var doc = await _packs.Find(x => x.PackId == packId).FirstOrDefaultAsync(cancellationToken).ConfigureAwait(false);
|
||||
return doc?.ToModel();
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<PackRecord>> ListAsync(string? tenantId = null, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var filter = string.IsNullOrWhiteSpace(tenantId)
|
||||
? Builders<PackDocument>.Filter.Empty
|
||||
: Builders<PackDocument>.Filter.Eq(x => x.TenantId, tenantId);
|
||||
|
||||
var docs = await _packs.Find(filter).SortBy(x => x.TenantId).ThenBy(x => x.Name).ThenBy(x => x.Version).ToListAsync(cancellationToken).ConfigureAwait(false);
|
||||
return docs.Select(d => d.ToModel()).ToArray();
|
||||
}
|
||||
|
||||
public async Task<byte[]?> GetContentAsync(string packId, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var pack = await GetAsync(packId, cancellationToken).ConfigureAwait(false);
|
||||
if (pack is null)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
var blob = await _contents.Find(x => x.Digest == pack.Digest).FirstOrDefaultAsync(cancellationToken).ConfigureAwait(false);
|
||||
return blob?.Content;
|
||||
}
|
||||
|
||||
public async Task<byte[]?> GetProvenanceAsync(string packId, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var pack = await GetAsync(packId, cancellationToken).ConfigureAwait(false);
|
||||
if (pack is null || string.IsNullOrWhiteSpace(pack.ProvenanceDigest))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
var blob = await _contents.Find(x => x.Digest == pack.Digest).FirstOrDefaultAsync(cancellationToken).ConfigureAwait(false);
|
||||
return blob?.Provenance;
|
||||
}
|
||||
|
||||
private sealed class PackDocument
|
||||
{
|
||||
public ObjectId Id { get; set; }
|
||||
public string PackId { get; set; } = default!;
|
||||
public string Name { get; set; } = default!;
|
||||
public string Version { get; set; } = default!;
|
||||
public string TenantId { get; set; } = default!;
|
||||
public string Digest { get; set; } = default!;
|
||||
public string? Signature { get; set; }
|
||||
public string? ProvenanceUri { get; set; }
|
||||
public string? ProvenanceDigest { get; set; }
|
||||
public DateTimeOffset CreatedAtUtc { get; set; }
|
||||
public Dictionary<string, string>? Metadata { get; set; }
|
||||
|
||||
public PackRecord ToModel() => new(PackId, Name, Version, TenantId, Digest, Signature, ProvenanceUri, ProvenanceDigest, CreatedAtUtc, Metadata);
|
||||
|
||||
public static PackDocument From(PackRecord model) => new()
|
||||
{
|
||||
PackId = model.PackId,
|
||||
Name = model.Name,
|
||||
Version = model.Version,
|
||||
TenantId = model.TenantId,
|
||||
Digest = model.Digest,
|
||||
Signature = model.Signature,
|
||||
ProvenanceUri = model.ProvenanceUri,
|
||||
ProvenanceDigest = model.ProvenanceDigest,
|
||||
CreatedAtUtc = model.CreatedAtUtc,
|
||||
Metadata = model.Metadata?.ToDictionary(kv => kv.Key, kv => kv.Value)
|
||||
};
|
||||
}
|
||||
|
||||
private sealed class PackContentDocument
|
||||
{
|
||||
public ObjectId Id { get; set; }
|
||||
public string Digest { get; set; } = default!;
|
||||
public byte[] Content { get; set; } = Array.Empty<byte>();
|
||||
public string? ProvenanceDigest { get; set; }
|
||||
public byte[]? Provenance { get; set; }
|
||||
}
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.PacksRegistry.Core.Contracts;
|
||||
using StellaOps.PacksRegistry.Core.Models;
|
||||
using StellaOps.PacksRegistry.Infrastructure.Options;
|
||||
|
||||
namespace StellaOps.PacksRegistry.Infrastructure.Mongo;
|
||||
|
||||
public sealed class MongoParityRepository : IParityRepository
|
||||
{
|
||||
private readonly IMongoCollection<ParityDocument> _collection;
|
||||
|
||||
public MongoParityRepository(IMongoDatabase database, MongoOptions options)
|
||||
{
|
||||
_collection = database.GetCollection<ParityDocument>(options.ParityCollection ?? "packs_parity_matrix");
|
||||
_collection.Indexes.CreateOne(new CreateIndexModel<ParityDocument>(Builders<ParityDocument>.IndexKeys.Ascending(x => x.PackId), new CreateIndexOptions { Unique = true }));
|
||||
}
|
||||
|
||||
public async Task UpsertAsync(ParityRecord record, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var doc = ParityDocument.From(record);
|
||||
await _collection.ReplaceOneAsync(x => x.PackId == record.PackId, doc, new ReplaceOptions { IsUpsert = true }, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<ParityRecord?> GetAsync(string packId, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var doc = await _collection.Find(x => x.PackId == packId).FirstOrDefaultAsync(cancellationToken).ConfigureAwait(false);
|
||||
return doc?.ToModel();
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<ParityRecord>> ListAsync(string? tenantId = null, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var filter = string.IsNullOrWhiteSpace(tenantId)
|
||||
? Builders<ParityDocument>.Filter.Empty
|
||||
: Builders<ParityDocument>.Filter.Eq(x => x.TenantId, tenantId);
|
||||
|
||||
var docs = await _collection.Find(filter)
|
||||
.SortBy(x => x.PackId)
|
||||
.ToListAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
return docs.Select(d => d.ToModel()).ToList();
|
||||
}
|
||||
|
||||
private sealed class ParityDocument
|
||||
{
|
||||
public ObjectId Id { get; set; }
|
||||
public string PackId { get; set; } = default!;
|
||||
public string TenantId { get; set; } = default!;
|
||||
public string Status { get; set; } = default!;
|
||||
public string? Notes { get; set; }
|
||||
public DateTimeOffset UpdatedAtUtc { get; set; }
|
||||
|
||||
public ParityRecord ToModel() => new(PackId, TenantId, Status, Notes, UpdatedAtUtc);
|
||||
public static ParityDocument From(ParityRecord record) => new()
|
||||
{
|
||||
PackId = record.PackId,
|
||||
TenantId = record.TenantId,
|
||||
Status = record.Status,
|
||||
Notes = record.Notes,
|
||||
UpdatedAtUtc = record.UpdatedAtUtc
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
using Microsoft.Extensions.Hosting;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.PacksRegistry.Infrastructure.Options;
|
||||
|
||||
namespace StellaOps.PacksRegistry.Infrastructure.Mongo;
|
||||
|
||||
/// <summary>
|
||||
/// Ensures Mongo collections and indexes exist for packs, blobs, and parity matrix.
|
||||
/// </summary>
|
||||
public sealed class PacksMongoInitializer : IHostedService
|
||||
{
|
||||
private readonly IMongoDatabase _database;
|
||||
private readonly MongoOptions _options;
|
||||
private readonly ILogger<PacksMongoInitializer> _logger;
|
||||
|
||||
public PacksMongoInitializer(IMongoDatabase database, MongoOptions options, ILogger<PacksMongoInitializer> logger)
|
||||
{
|
||||
_database = database ?? throw new ArgumentNullException(nameof(database));
|
||||
_options = options ?? throw new ArgumentNullException(nameof(options));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
}
|
||||
|
||||
public async Task StartAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
await EnsurePacksIndexAsync(cancellationToken).ConfigureAwait(false);
|
||||
await EnsureBlobsIndexAsync(cancellationToken).ConfigureAwait(false);
|
||||
await EnsureParityMatrixAsync(cancellationToken).ConfigureAwait(false);
|
||||
await EnsureLifecycleAsync(cancellationToken).ConfigureAwait(false);
|
||||
await EnsureAuditAsync(cancellationToken).ConfigureAwait(false);
|
||||
await EnsureAttestationsAsync(cancellationToken).ConfigureAwait(false);
|
||||
await EnsureMirrorsAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public Task StopAsync(CancellationToken cancellationToken) => Task.CompletedTask;
|
||||
|
||||
private async Task EnsurePacksIndexAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var packs = _database.GetCollection<BsonDocument>(_options.PacksCollection);
|
||||
var indexKeys = Builders<BsonDocument>.IndexKeys.Ascending("packId");
|
||||
await packs.Indexes.CreateOneAsync(new CreateIndexModel<BsonDocument>(indexKeys, new CreateIndexOptions { Unique = true }), cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
|
||||
var secondary = Builders<BsonDocument>.IndexKeys.Ascending("tenantId").Ascending("name").Ascending("version");
|
||||
await packs.Indexes.CreateOneAsync(new CreateIndexModel<BsonDocument>(secondary), cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private async Task EnsureBlobsIndexAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var blobs = _database.GetCollection<BsonDocument>(_options.BlobsCollection);
|
||||
var indexKeys = Builders<BsonDocument>.IndexKeys.Ascending("digest");
|
||||
await blobs.Indexes.CreateOneAsync(new CreateIndexModel<BsonDocument>(indexKeys, new CreateIndexOptions { Unique = true }), cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private async Task EnsureParityMatrixAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var parityName = _options.ParityCollection ?? "packs_parity_matrix";
|
||||
var parity = _database.GetCollection<BsonDocument>(parityName);
|
||||
var indexKeys = Builders<BsonDocument>.IndexKeys.Ascending("packId");
|
||||
await parity.Indexes.CreateOneAsync(new CreateIndexModel<BsonDocument>(indexKeys, new CreateIndexOptions { Unique = true }), cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
|
||||
_logger.LogInformation("Mongo collections ensured: {Packs}, {Blobs}, {Parity}", _options.PacksCollection, _options.BlobsCollection, parityName);
|
||||
}
|
||||
|
||||
private async Task EnsureLifecycleAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var lifecycleName = _options.LifecycleCollection ?? "packs_lifecycle";
|
||||
var lifecycle = _database.GetCollection<BsonDocument>(lifecycleName);
|
||||
var indexKeys = Builders<BsonDocument>.IndexKeys.Ascending("packId");
|
||||
await lifecycle.Indexes.CreateOneAsync(new CreateIndexModel<BsonDocument>(indexKeys, new CreateIndexOptions { Unique = true }), cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
_logger.LogInformation("Mongo lifecycle collection ensured: {Lifecycle}", lifecycleName);
|
||||
}
|
||||
|
||||
private async Task EnsureAuditAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var auditName = _options.AuditCollection ?? "packs_audit_log";
|
||||
var audit = _database.GetCollection<BsonDocument>(auditName);
|
||||
var indexKeys = Builders<BsonDocument>.IndexKeys
|
||||
.Ascending("tenantId")
|
||||
.Ascending("packId")
|
||||
.Ascending("occurredAtUtc");
|
||||
await audit.Indexes.CreateOneAsync(new CreateIndexModel<BsonDocument>(indexKeys), cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
_logger.LogInformation("Mongo audit collection ensured: {Audit}", auditName);
|
||||
}
|
||||
|
||||
private async Task EnsureAttestationsAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var attestName = _options.AttestationCollection ?? "packs_attestations";
|
||||
var attest = _database.GetCollection<BsonDocument>(attestName);
|
||||
var indexKeys = Builders<BsonDocument>.IndexKeys.Ascending("packId").Ascending("type");
|
||||
await attest.Indexes.CreateOneAsync(new CreateIndexModel<BsonDocument>(indexKeys, new CreateIndexOptions { Unique = true }), cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
|
||||
var blobsName = _options.AttestationBlobsCollection ?? "packs_attestation_blobs";
|
||||
var blobs = _database.GetCollection<BsonDocument>(blobsName);
|
||||
var blobIndex = Builders<BsonDocument>.IndexKeys.Ascending("digest");
|
||||
await blobs.Indexes.CreateOneAsync(new CreateIndexModel<BsonDocument>(blobIndex, new CreateIndexOptions { Unique = true }), cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
|
||||
_logger.LogInformation("Mongo attestation collections ensured: {Attest} / {AttestBlobs}", attestName, blobsName);
|
||||
}
|
||||
|
||||
private async Task EnsureMirrorsAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var mirrorName = _options.MirrorCollection ?? "packs_mirrors";
|
||||
var mirrors = _database.GetCollection<BsonDocument>(mirrorName);
|
||||
var indexKeys = Builders<BsonDocument>.IndexKeys.Ascending("id");
|
||||
await mirrors.Indexes.CreateOneAsync(new CreateIndexModel<BsonDocument>(indexKeys, new CreateIndexOptions { Unique = true }), cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
_logger.LogInformation("Mongo mirror collection ensured: {Mirror}", mirrorName);
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
namespace StellaOps.PacksRegistry.Infrastructure.Options;
|
||||
|
||||
public sealed class MongoOptions
|
||||
{
|
||||
public string? ConnectionString { get; set; }
|
||||
public string Database { get; set; } = "packs_registry";
|
||||
public string PacksCollection { get; set; } = "packs_index";
|
||||
public string BlobsCollection { get; set; } = "packs_blobs";
|
||||
public string? ParityCollection { get; set; } = "packs_parity_matrix";
|
||||
public string? LifecycleCollection { get; set; } = "packs_lifecycle";
|
||||
public string? AuditCollection { get; set; } = "packs_audit_log";
|
||||
public string? AttestationCollection { get; set; } = "packs_attestations";
|
||||
public string? AttestationBlobsCollection { get; set; } = "packs_attestation_blobs";
|
||||
public string? MirrorCollection { get; set; } = "packs_mirrors";
|
||||
}
|
||||
@@ -12,7 +12,6 @@
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="MongoDB.Driver" Version="3.5.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.Options" Version="10.0.0" />
|
||||
|
||||
@@ -3,15 +3,11 @@ using StellaOps.PacksRegistry.Core.Contracts;
|
||||
using StellaOps.PacksRegistry.Core.Models;
|
||||
using StellaOps.PacksRegistry.Core.Services;
|
||||
using StellaOps.PacksRegistry.Infrastructure.FileSystem;
|
||||
using StellaOps.PacksRegistry.Infrastructure.InMemory;
|
||||
using StellaOps.PacksRegistry.Infrastructure.Verification;
|
||||
using StellaOps.PacksRegistry.Infrastructure.Mongo;
|
||||
using StellaOps.PacksRegistry.Infrastructure.Options;
|
||||
using StellaOps.PacksRegistry.WebService;
|
||||
using StellaOps.PacksRegistry.WebService.Contracts;
|
||||
using StellaOps.PacksRegistry.WebService.Options;
|
||||
using Microsoft.Extensions.FileProviders;
|
||||
using MongoDB.Driver;
|
||||
|
||||
var builder = WebApplication.CreateBuilder(args);
|
||||
|
||||
@@ -22,32 +18,14 @@ builder.Services.ConfigureHttpJsonOptions(options =>
|
||||
});
|
||||
|
||||
builder.Services.AddOpenApi();
|
||||
var dataDir = builder.Configuration.GetValue<string>("PacksRegistry:DataDir");
|
||||
var mongoOptions = builder.Configuration.GetSection("PacksRegistry:Mongo").Get<MongoOptions>() ?? new MongoOptions();
|
||||
mongoOptions.ConnectionString ??= builder.Configuration.GetConnectionString("packs-registry");
|
||||
var dataDir = builder.Configuration.GetValue<string>("PacksRegistry:DataDir") ?? Path.Combine("data", "packs");
|
||||
|
||||
if (!string.IsNullOrWhiteSpace(mongoOptions.ConnectionString))
|
||||
{
|
||||
builder.Services.AddSingleton(mongoOptions);
|
||||
builder.Services.AddSingleton<IMongoClient>(_ => new MongoClient(mongoOptions.ConnectionString));
|
||||
builder.Services.AddSingleton(sp => sp.GetRequiredService<IMongoClient>().GetDatabase(mongoOptions.Database));
|
||||
builder.Services.AddSingleton<IPackRepository, MongoPackRepository>();
|
||||
builder.Services.AddSingleton<IParityRepository, MongoParityRepository>();
|
||||
builder.Services.AddSingleton<ILifecycleRepository, MongoLifecycleRepository>();
|
||||
builder.Services.AddSingleton<IAuditRepository, MongoAuditRepository>();
|
||||
builder.Services.AddSingleton<IAttestationRepository, MongoAttestationRepository>();
|
||||
builder.Services.AddSingleton<IMirrorRepository, MongoMirrorRepository>();
|
||||
builder.Services.AddHostedService<PacksMongoInitializer>();
|
||||
}
|
||||
else
|
||||
{
|
||||
builder.Services.AddSingleton<IPackRepository>(_ => new FilePackRepository(dataDir ?? "data/packs"));
|
||||
builder.Services.AddSingleton<IParityRepository>(_ => new FileParityRepository(dataDir ?? "data/packs"));
|
||||
builder.Services.AddSingleton<ILifecycleRepository>(_ => new FileLifecycleRepository(dataDir ?? "data/packs"));
|
||||
builder.Services.AddSingleton<IAuditRepository>(_ => new FileAuditRepository(dataDir ?? "data/packs"));
|
||||
builder.Services.AddSingleton<IAttestationRepository>(_ => new FileAttestationRepository(dataDir ?? "data/packs"));
|
||||
builder.Services.AddSingleton<IMirrorRepository>(_ => new FileMirrorRepository(dataDir ?? "data/packs"));
|
||||
}
|
||||
builder.Services.AddSingleton<IPackRepository>(_ => new FilePackRepository(dataDir));
|
||||
builder.Services.AddSingleton<IParityRepository>(_ => new FileParityRepository(dataDir));
|
||||
builder.Services.AddSingleton<ILifecycleRepository>(_ => new FileLifecycleRepository(dataDir));
|
||||
builder.Services.AddSingleton<IAuditRepository>(_ => new FileAuditRepository(dataDir));
|
||||
builder.Services.AddSingleton<IAttestationRepository>(_ => new FileAttestationRepository(dataDir));
|
||||
builder.Services.AddSingleton<IMirrorRepository>(_ => new FileMirrorRepository(dataDir));
|
||||
|
||||
var verificationSection = builder.Configuration.GetSection("PacksRegistry:Verification");
|
||||
builder.Services.Configure<VerificationOptions>(verificationSection);
|
||||
|
||||
@@ -5,7 +5,7 @@ Stand up the Policy Engine runtime host that evaluates organization policies aga
|
||||
|
||||
## Scope
|
||||
- Minimal API host & background workers for policy runs (full, incremental, simulate).
|
||||
- Mongo persistence for `policies`, `policy_runs`, and `effective_finding_*` collections.
|
||||
- PostgreSQL persistence via `StellaOps.Policy.Storage.Postgres` for packs, runs, receipts, and overlays; in-memory fallbacks for dev/test.
|
||||
- Change stream listeners and scheduler integration for incremental re-evaluation.
|
||||
- Authority integration enforcing new `policy:*` and `effective:write` scopes.
|
||||
- Observability: metrics, traces, structured logs, trace sampling.
|
||||
|
||||
@@ -4,8 +4,9 @@ using System.Text.Json;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Policy.Engine.Options;
|
||||
using StellaOps.Policy.Engine.Storage.Mongo.Repositories;
|
||||
using StellaOps.Policy.Engine.Telemetry;
|
||||
using StellaOps.Policy.Storage.Postgres.Models;
|
||||
using StellaOps.Policy.Storage.Postgres.Repositories;
|
||||
using StackExchange.Redis;
|
||||
|
||||
namespace StellaOps.Policy.Engine.ExceptionCache;
|
||||
@@ -347,73 +348,37 @@ internal sealed class RedisExceptionEffectiveCache : IExceptionEffectiveCache
|
||||
|
||||
try
|
||||
{
|
||||
// Get all active exceptions from repository
|
||||
var exceptions = await _repository.ListExceptionsAsync(
|
||||
var exceptions = await _repository.GetAllAsync(
|
||||
tenantId,
|
||||
new ExceptionQueryOptions
|
||||
{
|
||||
Statuses = ImmutableArray.Create("active"),
|
||||
IncludeExpired = false,
|
||||
Limit = _options.MaxEntriesPerTenant,
|
||||
},
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
ExceptionStatus.Active,
|
||||
limit: _options.MaxEntriesPerTenant,
|
||||
offset: 0,
|
||||
cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
|
||||
if (exceptions.Length == 0)
|
||||
if (exceptions.Count == 0)
|
||||
{
|
||||
_logger.LogDebug("No active exceptions to warm for tenant {TenantId}", tenantId);
|
||||
return;
|
||||
}
|
||||
|
||||
// Get bindings for all exceptions
|
||||
var entries = new List<ExceptionCacheEntry>();
|
||||
|
||||
foreach (var exception in exceptions)
|
||||
{
|
||||
var bindings = await _repository.GetBindingsForExceptionAsync(
|
||||
tenantId, exception.Id, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
foreach (var binding in bindings.Where(b => b.Status == "active"))
|
||||
entries.Add(new ExceptionCacheEntry
|
||||
{
|
||||
entries.Add(new ExceptionCacheEntry
|
||||
{
|
||||
ExceptionId = exception.Id,
|
||||
AssetId = binding.AssetId,
|
||||
AdvisoryId = binding.AdvisoryId,
|
||||
CveId = binding.CveId,
|
||||
DecisionOverride = binding.DecisionOverride,
|
||||
ExceptionType = exception.ExceptionType,
|
||||
Priority = exception.Priority,
|
||||
EffectiveFrom = binding.EffectiveFrom,
|
||||
ExpiresAt = binding.ExpiresAt ?? exception.ExpiresAt,
|
||||
CachedAt = now,
|
||||
ExceptionName = exception.Name,
|
||||
});
|
||||
}
|
||||
|
||||
// Also add entries for scope-based exceptions without explicit bindings
|
||||
if (exception.Scope.ApplyToAll || exception.Scope.AssetIds.Count > 0)
|
||||
{
|
||||
foreach (var assetId in exception.Scope.AssetIds)
|
||||
{
|
||||
foreach (var advisoryId in exception.Scope.AdvisoryIds.DefaultIfEmpty(null!))
|
||||
{
|
||||
entries.Add(new ExceptionCacheEntry
|
||||
{
|
||||
ExceptionId = exception.Id,
|
||||
AssetId = assetId,
|
||||
AdvisoryId = advisoryId,
|
||||
CveId = null,
|
||||
DecisionOverride = "allow",
|
||||
ExceptionType = exception.ExceptionType,
|
||||
Priority = exception.Priority,
|
||||
EffectiveFrom = exception.EffectiveFrom ?? exception.CreatedAt,
|
||||
ExpiresAt = exception.ExpiresAt,
|
||||
CachedAt = now,
|
||||
ExceptionName = exception.Name,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
ExceptionId = exception.Id.ToString(),
|
||||
AssetId = string.IsNullOrWhiteSpace(exception.ProjectId) ? "*" : exception.ProjectId!,
|
||||
AdvisoryId = null,
|
||||
CveId = null,
|
||||
DecisionOverride = "allow",
|
||||
ExceptionType = "waiver",
|
||||
Priority = 0,
|
||||
EffectiveFrom = exception.CreatedAt,
|
||||
ExpiresAt = exception.ExpiresAt,
|
||||
CachedAt = now,
|
||||
ExceptionName = exception.Name,
|
||||
});
|
||||
}
|
||||
|
||||
if (entries.Count > 0)
|
||||
@@ -430,7 +395,7 @@ internal sealed class RedisExceptionEffectiveCache : IExceptionEffectiveCache
|
||||
|
||||
_logger.LogInformation(
|
||||
"Warmed cache with {Count} entries from {ExceptionCount} exceptions for tenant {TenantId} in {Duration}ms",
|
||||
entries.Count, exceptions.Length, tenantId, sw.ElapsedMilliseconds);
|
||||
entries.Count, exceptions.Count, tenantId, sw.ElapsedMilliseconds);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
@@ -584,7 +549,6 @@ internal sealed class RedisExceptionEffectiveCache : IExceptionEffectiveCache
|
||||
switch (exceptionEvent.EventType.ToLowerInvariant())
|
||||
{
|
||||
case "activated":
|
||||
// Warm the cache with the new exception
|
||||
await WarmExceptionAsync(exceptionEvent.TenantId, exceptionEvent.ExceptionId, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
break;
|
||||
@@ -592,13 +556,11 @@ internal sealed class RedisExceptionEffectiveCache : IExceptionEffectiveCache
|
||||
case "expired":
|
||||
case "revoked":
|
||||
case "deleted":
|
||||
// Invalidate cache entries for this exception
|
||||
await InvalidateExceptionAsync(exceptionEvent.TenantId, exceptionEvent.ExceptionId, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
break;
|
||||
|
||||
case "updated":
|
||||
// Invalidate and re-warm
|
||||
await InvalidateExceptionAsync(exceptionEvent.TenantId, exceptionEvent.ExceptionId, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await WarmExceptionAsync(exceptionEvent.TenantId, exceptionEvent.ExceptionId, cancellationToken)
|
||||
@@ -606,14 +568,8 @@ internal sealed class RedisExceptionEffectiveCache : IExceptionEffectiveCache
|
||||
break;
|
||||
|
||||
case "created":
|
||||
// Only warm if already active
|
||||
var exception = await _repository.GetExceptionAsync(
|
||||
exceptionEvent.TenantId, exceptionEvent.ExceptionId, cancellationToken).ConfigureAwait(false);
|
||||
if (exception?.Status == "active")
|
||||
{
|
||||
await WarmExceptionAsync(exceptionEvent.TenantId, exceptionEvent.ExceptionId, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
await WarmExceptionAsync(exceptionEvent.TenantId, exceptionEvent.ExceptionId, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
break;
|
||||
|
||||
default:
|
||||
@@ -626,10 +582,16 @@ internal sealed class RedisExceptionEffectiveCache : IExceptionEffectiveCache
|
||||
|
||||
private async Task WarmExceptionAsync(string tenantId, string exceptionId, CancellationToken cancellationToken)
|
||||
{
|
||||
var exception = await _repository.GetExceptionAsync(tenantId, exceptionId, cancellationToken)
|
||||
if (!Guid.TryParse(exceptionId, out var exceptionGuid))
|
||||
{
|
||||
_logger.LogWarning("Unable to parse exception id {ExceptionId} for tenant {TenantId}", exceptionId, tenantId);
|
||||
return;
|
||||
}
|
||||
|
||||
var exception = await _repository.GetByIdAsync(tenantId, exceptionGuid, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
if (exception is null || exception.Status != "active")
|
||||
if (exception is null || exception.Status != ExceptionStatus.Active)
|
||||
{
|
||||
return;
|
||||
}
|
||||
@@ -637,31 +599,22 @@ internal sealed class RedisExceptionEffectiveCache : IExceptionEffectiveCache
|
||||
var now = _timeProvider.GetUtcNow();
|
||||
var entries = new List<ExceptionCacheEntry>();
|
||||
|
||||
var bindings = await _repository.GetBindingsForExceptionAsync(tenantId, exceptionId, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
foreach (var binding in bindings.Where(b => b.Status == "active"))
|
||||
entries.Add(new ExceptionCacheEntry
|
||||
{
|
||||
entries.Add(new ExceptionCacheEntry
|
||||
{
|
||||
ExceptionId = exception.Id,
|
||||
AssetId = binding.AssetId,
|
||||
AdvisoryId = binding.AdvisoryId,
|
||||
CveId = binding.CveId,
|
||||
DecisionOverride = binding.DecisionOverride,
|
||||
ExceptionType = exception.ExceptionType,
|
||||
Priority = exception.Priority,
|
||||
EffectiveFrom = binding.EffectiveFrom,
|
||||
ExpiresAt = binding.ExpiresAt ?? exception.ExpiresAt,
|
||||
CachedAt = now,
|
||||
ExceptionName = exception.Name,
|
||||
});
|
||||
}
|
||||
ExceptionId = exception.Id.ToString(),
|
||||
AssetId = string.IsNullOrWhiteSpace(exception.ProjectId) ? "*" : exception.ProjectId!,
|
||||
AdvisoryId = null,
|
||||
CveId = null,
|
||||
DecisionOverride = "allow",
|
||||
ExceptionType = "waiver",
|
||||
Priority = 0,
|
||||
EffectiveFrom = exception.CreatedAt,
|
||||
ExpiresAt = exception.ExpiresAt,
|
||||
CachedAt = now,
|
||||
ExceptionName = exception.Name,
|
||||
});
|
||||
|
||||
if (entries.Count > 0)
|
||||
{
|
||||
await SetBatchAsync(tenantId, entries, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
await SetBatchAsync(tenantId, entries, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
_logger.LogDebug(
|
||||
"Warmed cache with {Count} entries for exception {ExceptionId}",
|
||||
|
||||
@@ -1,29 +1,27 @@
|
||||
using System.Collections.ObjectModel;
|
||||
using StellaOps.Auth.Abstractions;
|
||||
using StellaOps.Policy.Engine.Caching;
|
||||
using StellaOps.Policy.Engine.EffectiveDecisionMap;
|
||||
using StellaOps.Policy.Engine.ExceptionCache;
|
||||
using StellaOps.Policy.Engine.ReachabilityFacts;
|
||||
using StellaOps.Policy.Engine.Telemetry;
|
||||
|
||||
namespace StellaOps.Policy.Engine.Options;
|
||||
|
||||
/// <summary>
|
||||
/// Root configuration for the Policy Engine host.
|
||||
/// </summary>
|
||||
public sealed class PolicyEngineOptions
|
||||
{
|
||||
public const string SectionName = "PolicyEngine";
|
||||
|
||||
public PolicyEngineAuthorityOptions Authority { get; } = new();
|
||||
|
||||
public PolicyEngineStorageOptions Storage { get; } = new();
|
||||
|
||||
public PolicyEngineWorkerOptions Workers { get; } = new();
|
||||
|
||||
public PolicyEngineResourceServerOptions ResourceServer { get; } = new();
|
||||
|
||||
public PolicyEngineCompilationOptions Compilation { get; } = new();
|
||||
using System.Collections.ObjectModel;
|
||||
using StellaOps.Auth.Abstractions;
|
||||
using StellaOps.Policy.Engine.Caching;
|
||||
using StellaOps.Policy.Engine.EffectiveDecisionMap;
|
||||
using StellaOps.Policy.Engine.ExceptionCache;
|
||||
using StellaOps.Policy.Engine.ReachabilityFacts;
|
||||
using StellaOps.Policy.Engine.Telemetry;
|
||||
|
||||
namespace StellaOps.Policy.Engine.Options;
|
||||
|
||||
/// <summary>
|
||||
/// Root configuration for the Policy Engine host.
|
||||
/// </summary>
|
||||
public sealed class PolicyEngineOptions
|
||||
{
|
||||
public const string SectionName = "PolicyEngine";
|
||||
|
||||
public PolicyEngineAuthorityOptions Authority { get; } = new();
|
||||
|
||||
public PolicyEngineWorkerOptions Workers { get; } = new();
|
||||
|
||||
public PolicyEngineResourceServerOptions ResourceServer { get; } = new();
|
||||
|
||||
public PolicyEngineCompilationOptions Compilation { get; } = new();
|
||||
|
||||
public PolicyEngineActivationOptions Activation { get; } = new();
|
||||
|
||||
@@ -42,11 +40,10 @@ public sealed class PolicyEngineOptions
|
||||
public ExceptionCacheOptions ExceptionCache { get; } = new();
|
||||
|
||||
public PolicyEngineExceptionLifecycleOptions ExceptionLifecycle { get; } = new();
|
||||
|
||||
public void Validate()
|
||||
{
|
||||
|
||||
public void Validate()
|
||||
{
|
||||
Authority.Validate();
|
||||
Storage.Validate();
|
||||
Workers.Validate();
|
||||
ResourceServer.Validate();
|
||||
Compilation.Validate();
|
||||
@@ -57,196 +54,167 @@ public sealed class PolicyEngineOptions
|
||||
ExceptionLifecycle.Validate();
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class PolicyEngineAuthorityOptions
|
||||
{
|
||||
public bool Enabled { get; set; } = true;
|
||||
|
||||
public string Issuer { get; set; } = "https://authority.stella-ops.local";
|
||||
|
||||
public string ClientId { get; set; } = "policy-engine";
|
||||
|
||||
public string? ClientSecret { get; set; }
|
||||
|
||||
public IList<string> Scopes { get; } = new List<string>
|
||||
{
|
||||
StellaOpsScopes.PolicyRun,
|
||||
StellaOpsScopes.FindingsRead,
|
||||
StellaOpsScopes.EffectiveWrite
|
||||
};
|
||||
|
||||
public int BackchannelTimeoutSeconds { get; set; } = 30;
|
||||
|
||||
public void Validate()
|
||||
{
|
||||
if (!Enabled)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(Issuer))
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine authority configuration requires an issuer.");
|
||||
}
|
||||
|
||||
if (!Uri.TryCreate(Issuer, UriKind.Absolute, out var issuerUri) || !issuerUri.IsAbsoluteUri)
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine authority issuer must be an absolute URI.");
|
||||
}
|
||||
|
||||
if (issuerUri.Scheme != Uri.UriSchemeHttps && !issuerUri.IsLoopback)
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine authority issuer must use HTTPS unless targeting loopback.");
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(ClientId))
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine authority configuration requires a clientId.");
|
||||
}
|
||||
|
||||
if (Scopes.Count == 0)
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine authority configuration requires at least one scope.");
|
||||
}
|
||||
|
||||
if (BackchannelTimeoutSeconds <= 0)
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine authority backchannel timeout must be greater than zero.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class PolicyEngineStorageOptions
|
||||
{
|
||||
public string ConnectionString { get; set; } = "mongodb://localhost:27017/policy-engine";
|
||||
|
||||
public string DatabaseName { get; set; } = "policy_engine";
|
||||
|
||||
public int CommandTimeoutSeconds { get; set; } = 30;
|
||||
|
||||
public void Validate()
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(ConnectionString))
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine storage configuration requires a MongoDB connection string.");
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(DatabaseName))
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine storage configuration requires a database name.");
|
||||
}
|
||||
|
||||
if (CommandTimeoutSeconds <= 0)
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine storage command timeout must be greater than zero.");
|
||||
}
|
||||
}
|
||||
|
||||
public TimeSpan CommandTimeout => TimeSpan.FromSeconds(CommandTimeoutSeconds);
|
||||
}
|
||||
|
||||
public sealed class PolicyEngineWorkerOptions
|
||||
{
|
||||
public int SchedulerIntervalSeconds { get; set; } = 15;
|
||||
|
||||
public int MaxConcurrentEvaluations { get; set; } = 4;
|
||||
|
||||
public void Validate()
|
||||
{
|
||||
if (SchedulerIntervalSeconds <= 0)
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine worker interval must be greater than zero.");
|
||||
}
|
||||
|
||||
if (MaxConcurrentEvaluations <= 0)
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine worker concurrency must be greater than zero.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class PolicyEngineResourceServerOptions
|
||||
{
|
||||
public string Authority { get; set; } = "https://authority.stella-ops.local";
|
||||
|
||||
public IList<string> Audiences { get; } = new List<string> { "api://policy-engine" };
|
||||
|
||||
public IList<string> RequiredScopes { get; } = new List<string> { StellaOpsScopes.PolicyRun };
|
||||
|
||||
public IList<string> RequiredTenants { get; } = new List<string>();
|
||||
|
||||
public IList<string> BypassNetworks { get; } = new List<string> { "127.0.0.1/32", "::1/128" };
|
||||
|
||||
public bool RequireHttpsMetadata { get; set; } = true;
|
||||
|
||||
public void Validate()
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(Authority))
|
||||
{
|
||||
throw new InvalidOperationException("Resource server configuration requires an Authority URL.");
|
||||
}
|
||||
|
||||
if (!Uri.TryCreate(Authority.Trim(), UriKind.Absolute, out var uri))
|
||||
{
|
||||
throw new InvalidOperationException("Resource server Authority URL must be absolute.");
|
||||
}
|
||||
|
||||
if (RequireHttpsMetadata && !uri.IsLoopback && !string.Equals(uri.Scheme, Uri.UriSchemeHttps, StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
throw new InvalidOperationException("Resource server Authority URL must use HTTPS when HTTPS metadata is required.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class PolicyEngineCompilationOptions
|
||||
{
|
||||
/// <summary>
|
||||
/// Maximum allowed complexity score for compiled policies. Set to <c><= 0</c> to disable.
|
||||
/// </summary>
|
||||
public double MaxComplexityScore { get; set; } = 750d;
|
||||
|
||||
/// <summary>
|
||||
/// Maximum allowed compilation wall-clock duration in milliseconds. Set to <c><= 0</c> to disable.
|
||||
/// </summary>
|
||||
public int MaxDurationMilliseconds { get; set; } = 1500;
|
||||
|
||||
public bool EnforceComplexity => MaxComplexityScore > 0;
|
||||
|
||||
public bool EnforceDuration => MaxDurationMilliseconds > 0;
|
||||
|
||||
public void Validate()
|
||||
{
|
||||
if (MaxComplexityScore < 0)
|
||||
{
|
||||
throw new InvalidOperationException("Compilation.maxComplexityScore must be greater than or equal to zero.");
|
||||
}
|
||||
|
||||
if (MaxDurationMilliseconds < 0)
|
||||
{
|
||||
throw new InvalidOperationException("Compilation.maxDurationMilliseconds must be greater than or equal to zero.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
public sealed class PolicyEngineAuthorityOptions
|
||||
{
|
||||
public bool Enabled { get; set; } = true;
|
||||
|
||||
public string Issuer { get; set; } = "https://authority.stella-ops.local";
|
||||
|
||||
public string ClientId { get; set; } = "policy-engine";
|
||||
|
||||
public string? ClientSecret { get; set; }
|
||||
|
||||
public IList<string> Scopes { get; } = new List<string>
|
||||
{
|
||||
StellaOpsScopes.PolicyRun,
|
||||
StellaOpsScopes.FindingsRead,
|
||||
StellaOpsScopes.EffectiveWrite
|
||||
};
|
||||
|
||||
public int BackchannelTimeoutSeconds { get; set; } = 30;
|
||||
|
||||
public void Validate()
|
||||
{
|
||||
if (!Enabled)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(Issuer))
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine authority configuration requires an issuer.");
|
||||
}
|
||||
|
||||
if (!Uri.TryCreate(Issuer, UriKind.Absolute, out var issuerUri) || !issuerUri.IsAbsoluteUri)
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine authority issuer must be an absolute URI.");
|
||||
}
|
||||
|
||||
if (issuerUri.Scheme != Uri.UriSchemeHttps && !issuerUri.IsLoopback)
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine authority issuer must use HTTPS unless targeting loopback.");
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(ClientId))
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine authority configuration requires a clientId.");
|
||||
}
|
||||
|
||||
if (Scopes.Count == 0)
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine authority configuration requires at least one scope.");
|
||||
}
|
||||
|
||||
if (BackchannelTimeoutSeconds <= 0)
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine authority backchannel timeout must be greater than zero.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class PolicyEngineWorkerOptions
|
||||
{
|
||||
public int SchedulerIntervalSeconds { get; set; } = 15;
|
||||
|
||||
public int MaxConcurrentEvaluations { get; set; } = 4;
|
||||
|
||||
public void Validate()
|
||||
{
|
||||
if (SchedulerIntervalSeconds <= 0)
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine worker interval must be greater than zero.");
|
||||
}
|
||||
|
||||
if (MaxConcurrentEvaluations <= 0)
|
||||
{
|
||||
throw new InvalidOperationException("Policy Engine worker concurrency must be greater than zero.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class PolicyEngineResourceServerOptions
|
||||
{
|
||||
public string Authority { get; set; } = "https://authority.stella-ops.local";
|
||||
|
||||
public IList<string> Audiences { get; } = new List<string> { "api://policy-engine" };
|
||||
|
||||
public IList<string> RequiredScopes { get; } = new List<string> { StellaOpsScopes.PolicyRun };
|
||||
|
||||
public IList<string> RequiredTenants { get; } = new List<string>();
|
||||
|
||||
public IList<string> BypassNetworks { get; } = new List<string> { "127.0.0.1/32", "::1/128" };
|
||||
|
||||
public bool RequireHttpsMetadata { get; set; } = true;
|
||||
|
||||
public void Validate()
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(Authority))
|
||||
{
|
||||
throw new InvalidOperationException("Resource server configuration requires an Authority URL.");
|
||||
}
|
||||
|
||||
if (!Uri.TryCreate(Authority.Trim(), UriKind.Absolute, out var uri))
|
||||
{
|
||||
throw new InvalidOperationException("Resource server Authority URL must be absolute.");
|
||||
}
|
||||
|
||||
if (RequireHttpsMetadata && !uri.IsLoopback && !string.Equals(uri.Scheme, Uri.UriSchemeHttps, StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
throw new InvalidOperationException("Resource server Authority URL must use HTTPS when HTTPS metadata is required.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class PolicyEngineCompilationOptions
|
||||
{
|
||||
/// <summary>
|
||||
/// Maximum allowed complexity score for compiled policies. Set to <c><= 0</c> to disable.
|
||||
/// </summary>
|
||||
public double MaxComplexityScore { get; set; } = 750d;
|
||||
|
||||
/// <summary>
|
||||
/// Maximum allowed compilation wall-clock duration in milliseconds. Set to <c><= 0</c> to disable.
|
||||
/// </summary>
|
||||
public int MaxDurationMilliseconds { get; set; } = 1500;
|
||||
|
||||
public bool EnforceComplexity => MaxComplexityScore > 0;
|
||||
|
||||
public bool EnforceDuration => MaxDurationMilliseconds > 0;
|
||||
|
||||
public void Validate()
|
||||
{
|
||||
if (MaxComplexityScore < 0)
|
||||
{
|
||||
throw new InvalidOperationException("Compilation.maxComplexityScore must be greater than or equal to zero.");
|
||||
}
|
||||
|
||||
if (MaxDurationMilliseconds < 0)
|
||||
{
|
||||
throw new InvalidOperationException("Compilation.maxDurationMilliseconds must be greater than or equal to zero.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public sealed class PolicyEngineActivationOptions
|
||||
{
|
||||
/// <summary>
|
||||
/// Forces two distinct approvals for every activation regardless of the request payload.
|
||||
/// </summary>
|
||||
public bool ForceTwoPersonApproval { get; set; } = false;
|
||||
|
||||
/// <summary>
|
||||
/// Default value applied when callers omit <c>requiresTwoPersonApproval</c>.
|
||||
/// </summary>
|
||||
public bool DefaultRequiresTwoPersonApproval { get; set; } = false;
|
||||
|
||||
/// <summary>
|
||||
/// Emits structured audit logs for every activation attempt.
|
||||
/// </summary>
|
||||
public bool EmitAuditLogs { get; set; } = true;
|
||||
|
||||
public void Validate()
|
||||
/// <summary>
|
||||
/// Forces two distinct approvals for every activation regardless of the request payload.
|
||||
/// </summary>
|
||||
public bool ForceTwoPersonApproval { get; set; } = false;
|
||||
|
||||
/// <summary>
|
||||
/// Default value applied when callers omit <c>requiresTwoPersonApproval</c>.
|
||||
/// </summary>
|
||||
public bool DefaultRequiresTwoPersonApproval { get; set; } = false;
|
||||
|
||||
/// <summary>
|
||||
/// Emits structured audit logs for every activation attempt.
|
||||
/// </summary>
|
||||
public bool EmitAuditLogs { get; set; } = true;
|
||||
|
||||
public void Validate()
|
||||
{
|
||||
}
|
||||
}
|
||||
@@ -319,128 +287,128 @@ public sealed class PolicyEngineEntropyOptions
|
||||
|
||||
public sealed class PolicyEngineRiskProfileOptions
|
||||
{
|
||||
/// <summary>
|
||||
/// Enables risk profile integration for policy evaluation.
|
||||
/// </summary>
|
||||
public bool Enabled { get; set; } = true;
|
||||
|
||||
/// <summary>
|
||||
/// Default profile ID to use when no profile is specified.
|
||||
/// </summary>
|
||||
public string DefaultProfileId { get; set; } = "default";
|
||||
|
||||
/// <summary>
|
||||
/// Directory containing risk profile JSON files.
|
||||
/// </summary>
|
||||
public string? ProfileDirectory { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Maximum inheritance depth for profile resolution.
|
||||
/// </summary>
|
||||
public int MaxInheritanceDepth { get; set; } = 10;
|
||||
|
||||
/// <summary>
|
||||
/// Whether to validate profiles against the JSON schema on load.
|
||||
/// </summary>
|
||||
public bool ValidateOnLoad { get; set; } = true;
|
||||
|
||||
/// <summary>
|
||||
/// Whether to cache resolved profiles in memory.
|
||||
/// </summary>
|
||||
public bool CacheResolvedProfiles { get; set; } = true;
|
||||
|
||||
/// <summary>
|
||||
/// Inline profile definitions (for config-based profiles).
|
||||
/// </summary>
|
||||
public List<RiskProfileDefinition> Profiles { get; } = new();
|
||||
|
||||
public void Validate()
|
||||
{
|
||||
if (MaxInheritanceDepth <= 0)
|
||||
{
|
||||
throw new InvalidOperationException("RiskProfile.MaxInheritanceDepth must be greater than zero.");
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(DefaultProfileId))
|
||||
{
|
||||
throw new InvalidOperationException("RiskProfile.DefaultProfileId is required.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Inline risk profile definition in configuration.
|
||||
/// </summary>
|
||||
public sealed class RiskProfileDefinition
|
||||
{
|
||||
/// <summary>
|
||||
/// Profile identifier.
|
||||
/// </summary>
|
||||
public required string Id { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Profile version (SemVer).
|
||||
/// </summary>
|
||||
public required string Version { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Human-readable description.
|
||||
/// </summary>
|
||||
public string? Description { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Parent profile ID for inheritance.
|
||||
/// </summary>
|
||||
public string? Extends { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Signal definitions for risk scoring.
|
||||
/// </summary>
|
||||
public List<RiskProfileSignalDefinition> Signals { get; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Weight per signal name.
|
||||
/// </summary>
|
||||
public Dictionary<string, double> Weights { get; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Optional metadata.
|
||||
/// </summary>
|
||||
public Dictionary<string, object?>? Metadata { get; set; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Inline signal definition in configuration.
|
||||
/// </summary>
|
||||
public sealed class RiskProfileSignalDefinition
|
||||
{
|
||||
/// <summary>
|
||||
/// Signal name.
|
||||
/// </summary>
|
||||
public required string Name { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Signal source.
|
||||
/// </summary>
|
||||
public required string Source { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Signal type (boolean, numeric, categorical).
|
||||
/// </summary>
|
||||
public required string Type { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// JSON Pointer path in evidence.
|
||||
/// </summary>
|
||||
public string? Path { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Optional transform expression.
|
||||
/// </summary>
|
||||
public string? Transform { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Optional unit for numeric signals.
|
||||
/// </summary>
|
||||
public string? Unit { get; set; }
|
||||
}
|
||||
/// <summary>
|
||||
/// Enables risk profile integration for policy evaluation.
|
||||
/// </summary>
|
||||
public bool Enabled { get; set; } = true;
|
||||
|
||||
/// <summary>
|
||||
/// Default profile ID to use when no profile is specified.
|
||||
/// </summary>
|
||||
public string DefaultProfileId { get; set; } = "default";
|
||||
|
||||
/// <summary>
|
||||
/// Directory containing risk profile JSON files.
|
||||
/// </summary>
|
||||
public string? ProfileDirectory { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Maximum inheritance depth for profile resolution.
|
||||
/// </summary>
|
||||
public int MaxInheritanceDepth { get; set; } = 10;
|
||||
|
||||
/// <summary>
|
||||
/// Whether to validate profiles against the JSON schema on load.
|
||||
/// </summary>
|
||||
public bool ValidateOnLoad { get; set; } = true;
|
||||
|
||||
/// <summary>
|
||||
/// Whether to cache resolved profiles in memory.
|
||||
/// </summary>
|
||||
public bool CacheResolvedProfiles { get; set; } = true;
|
||||
|
||||
/// <summary>
|
||||
/// Inline profile definitions (for config-based profiles).
|
||||
/// </summary>
|
||||
public List<RiskProfileDefinition> Profiles { get; } = new();
|
||||
|
||||
public void Validate()
|
||||
{
|
||||
if (MaxInheritanceDepth <= 0)
|
||||
{
|
||||
throw new InvalidOperationException("RiskProfile.MaxInheritanceDepth must be greater than zero.");
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(DefaultProfileId))
|
||||
{
|
||||
throw new InvalidOperationException("RiskProfile.DefaultProfileId is required.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Inline risk profile definition in configuration.
|
||||
/// </summary>
|
||||
public sealed class RiskProfileDefinition
|
||||
{
|
||||
/// <summary>
|
||||
/// Profile identifier.
|
||||
/// </summary>
|
||||
public required string Id { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Profile version (SemVer).
|
||||
/// </summary>
|
||||
public required string Version { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Human-readable description.
|
||||
/// </summary>
|
||||
public string? Description { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Parent profile ID for inheritance.
|
||||
/// </summary>
|
||||
public string? Extends { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Signal definitions for risk scoring.
|
||||
/// </summary>
|
||||
public List<RiskProfileSignalDefinition> Signals { get; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Weight per signal name.
|
||||
/// </summary>
|
||||
public Dictionary<string, double> Weights { get; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Optional metadata.
|
||||
/// </summary>
|
||||
public Dictionary<string, object?>? Metadata { get; set; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Inline signal definition in configuration.
|
||||
/// </summary>
|
||||
public sealed class RiskProfileSignalDefinition
|
||||
{
|
||||
/// <summary>
|
||||
/// Signal name.
|
||||
/// </summary>
|
||||
public required string Name { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Signal source.
|
||||
/// </summary>
|
||||
public required string Source { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Signal type (boolean, numeric, categorical).
|
||||
/// </summary>
|
||||
public required string Type { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// JSON Pointer path in evidence.
|
||||
/// </summary>
|
||||
public string? Path { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Optional transform expression.
|
||||
/// </summary>
|
||||
public string? Transform { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Optional unit for numeric signals.
|
||||
/// </summary>
|
||||
public string? Unit { get; set; }
|
||||
}
|
||||
|
||||
@@ -218,7 +218,6 @@ builder.Services.AddSingleton<StellaOps.Policy.Engine.Violations.ViolationEventS
|
||||
builder.Services.AddSingleton<StellaOps.Policy.Engine.Violations.SeverityFusionService>();
|
||||
builder.Services.AddSingleton<StellaOps.Policy.Engine.Violations.ConflictHandlingService>();
|
||||
builder.Services.AddSingleton<StellaOps.Policy.Engine.Services.PolicyDecisionService>();
|
||||
builder.Services.AddSingleton<IExceptionRepository, InMemoryExceptionRepository>();
|
||||
builder.Services.AddSingleton<IReachabilityFactsStore, InMemoryReachabilityFactsStore>();
|
||||
builder.Services.AddSingleton<IReachabilityFactsOverlayCache, InMemoryReachabilityFactsOverlayCache>();
|
||||
builder.Services.AddSingleton<ReachabilityFactsJoiningService>();
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
# Policy Engine Host Template
|
||||
|
||||
This service hosts the Policy Engine APIs and background workers introduced in **Policy Engine v2**. The project currently ships a minimal bootstrap that validates configuration, registers Authority clients, and exposes readiness/health endpoints. Future tasks will extend it with compilation, evaluation, and persistence features.
|
||||
|
||||
## Compliance Checklist
|
||||
|
||||
# Policy Engine Host Template
|
||||
|
||||
This service hosts the Policy Engine APIs and background workers introduced in **Policy Engine v2**. The project currently ships a minimal bootstrap that validates configuration, registers Authority clients, and exposes readiness/health endpoints. Future tasks will extend it with compilation, evaluation, and persistence features.
|
||||
|
||||
## Compliance Checklist
|
||||
|
||||
- [x] Configuration loads from `policy-engine.yaml`/environment variables and validates on startup.
|
||||
- [x] Authority client scaffolding enforces `policy:*` + `effective:write` scopes and respects back-channel timeouts.
|
||||
- [x] Resource server authentication requires Policy Engine scopes with tenant-aware policies.
|
||||
- [x] Health and readiness endpoints exist for platform probes.
|
||||
- [x] Deterministic policy evaluation pipeline implemented (POLICY-ENGINE-20-002).
|
||||
- [x] Mongo materialisation writers implemented (POLICY-ENGINE-20-004).
|
||||
- [x] PostgreSQL materialisation writers implemented (POLICY-ENGINE-20-004).
|
||||
- [x] Observability (metrics/traces/logs) completed (POLICY-ENGINE-20-007).
|
||||
- [x] Comprehensive test suites and perf baselines established (POLICY-ENGINE-20-008).
|
||||
|
||||
@@ -160,7 +160,7 @@ public sealed class InMemoryReachabilityFactsStore : IReachabilityFactsStore
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Index definitions for MongoDB reachability_facts collection.
|
||||
/// Index definitions for reachability_facts persistence (storage-agnostic hints).
|
||||
/// </summary>
|
||||
public static class ReachabilityFactsIndexes
|
||||
{
|
||||
@@ -180,7 +180,7 @@ public static class ReachabilityFactsIndexes
|
||||
public const string ExpirationIndex = "expires_at_ttl";
|
||||
|
||||
/// <summary>
|
||||
/// Gets the index definitions for creating MongoDB indexes.
|
||||
/// Gets the index definitions for creating persistence indexes.
|
||||
/// </summary>
|
||||
public static IReadOnlyList<ReachabilityIndexDefinition> GetIndexDefinitions()
|
||||
{
|
||||
@@ -204,7 +204,7 @@ public static class ReachabilityFactsIndexes
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Index definition for MongoDB collection.
|
||||
/// Index definition for reachability_facts collection.
|
||||
/// </summary>
|
||||
public sealed record ReachabilityIndexDefinition(
|
||||
string Name,
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="MongoDB.Driver" Version="3.5.0" />
|
||||
<PackageReference Include="StackExchange.Redis" Version="2.8.37" />
|
||||
<PackageReference Include="OpenTelemetry.Exporter.Console" Version="1.12.0" />
|
||||
<PackageReference Include="OpenTelemetry.Exporter.OpenTelemetryProtocol" Version="1.12.0" />
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
using System.Collections.Immutable;
|
||||
using System.Collections.Concurrent;
|
||||
using System.Linq;
|
||||
using StellaOps.Policy.Engine.Storage.Mongo.Documents;
|
||||
using StellaOps.Policy.Engine.Storage.Mongo.Repositories;
|
||||
using System.Collections.Immutable;
|
||||
using System.Text.RegularExpressions;
|
||||
using StellaOps.Policy.Storage.Postgres.Models;
|
||||
using StellaOps.Policy.Storage.Postgres.Repositories;
|
||||
|
||||
namespace StellaOps.Policy.Engine.Storage.InMemory;
|
||||
|
||||
@@ -12,340 +12,178 @@ namespace StellaOps.Policy.Engine.Storage.InMemory;
|
||||
/// </summary>
|
||||
public sealed class InMemoryExceptionRepository : IExceptionRepository
|
||||
{
|
||||
private readonly ConcurrentDictionary<(string Tenant, string Id), PolicyExceptionDocument> _exceptions = new();
|
||||
private readonly ConcurrentDictionary<(string Tenant, string Id), ExceptionBindingDocument> _bindings = new();
|
||||
private readonly ConcurrentDictionary<(string Tenant, Guid Id), ExceptionEntity> _exceptions = new();
|
||||
|
||||
public Task<PolicyExceptionDocument> CreateExceptionAsync(PolicyExceptionDocument exception, CancellationToken cancellationToken)
|
||||
public Task<ExceptionEntity> CreateAsync(ExceptionEntity exception, CancellationToken cancellationToken = default)
|
||||
{
|
||||
_exceptions[(exception.TenantId.ToLowerInvariant(), exception.Id)] = Clone(exception);
|
||||
return Task.FromResult(exception);
|
||||
var id = exception.Id == Guid.Empty ? Guid.NewGuid() : exception.Id;
|
||||
var stored = Copy(exception, id);
|
||||
_exceptions[(Normalize(exception.TenantId), id)] = stored;
|
||||
return Task.FromResult(stored);
|
||||
}
|
||||
|
||||
public Task<PolicyExceptionDocument?> GetExceptionAsync(string tenantId, string exceptionId, CancellationToken cancellationToken)
|
||||
public Task<ExceptionEntity?> GetByIdAsync(string tenantId, Guid id, CancellationToken cancellationToken = default)
|
||||
{
|
||||
_exceptions.TryGetValue((tenantId.ToLowerInvariant(), exceptionId), out var value);
|
||||
return Task.FromResult(value is null ? null : Clone(value));
|
||||
_exceptions.TryGetValue((Normalize(tenantId), id), out var entity);
|
||||
return Task.FromResult(entity is null ? null : Copy(entity));
|
||||
}
|
||||
|
||||
public Task<PolicyExceptionDocument?> UpdateExceptionAsync(PolicyExceptionDocument exception, CancellationToken cancellationToken)
|
||||
public Task<ExceptionEntity?> GetByNameAsync(string tenantId, string name, CancellationToken cancellationToken = default)
|
||||
{
|
||||
_exceptions[(exception.TenantId.ToLowerInvariant(), exception.Id)] = Clone(exception);
|
||||
return Task.FromResult<PolicyExceptionDocument?>(exception);
|
||||
var match = _exceptions
|
||||
.Where(kvp => kvp.Key.Tenant == Normalize(tenantId) && kvp.Value.Name.Equals(name, StringComparison.OrdinalIgnoreCase))
|
||||
.Select(kvp => Copy(kvp.Value))
|
||||
.FirstOrDefault();
|
||||
|
||||
return Task.FromResult(match);
|
||||
}
|
||||
|
||||
public Task<ImmutableArray<PolicyExceptionDocument>> ListExceptionsAsync(ExceptionQueryOptions options, CancellationToken cancellationToken)
|
||||
public Task<IReadOnlyList<ExceptionEntity>> GetAllAsync(
|
||||
string tenantId,
|
||||
ExceptionStatus? status = null,
|
||||
int limit = 100,
|
||||
int offset = 0,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var query = _exceptions.Values.AsEnumerable();
|
||||
var query = _exceptions
|
||||
.Where(kvp => kvp.Key.Tenant == Normalize(tenantId))
|
||||
.Select(kvp => kvp.Value);
|
||||
|
||||
if (options.Statuses.Any())
|
||||
if (status.HasValue)
|
||||
{
|
||||
query = query.Where(e => options.Statuses.Contains(e.Status, StringComparer.OrdinalIgnoreCase));
|
||||
query = query.Where(e => e.Status == status.Value);
|
||||
}
|
||||
|
||||
if (options.Types.Any())
|
||||
{
|
||||
query = query.Where(e => options.Types.Contains(e.ExceptionType, StringComparer.OrdinalIgnoreCase));
|
||||
}
|
||||
var results = query
|
||||
.Skip(offset)
|
||||
.Take(limit)
|
||||
.Select(x => Copy(x))
|
||||
.ToList();
|
||||
|
||||
return Task.FromResult(query.Select(Clone).ToImmutableArray());
|
||||
return Task.FromResult<IReadOnlyList<ExceptionEntity>>(results);
|
||||
}
|
||||
|
||||
public Task<ImmutableArray<PolicyExceptionDocument>> ListExceptionsAsync(string tenantId, ExceptionQueryOptions options, CancellationToken cancellationToken)
|
||||
public Task<IReadOnlyList<ExceptionEntity>> GetActiveForProjectAsync(
|
||||
string tenantId,
|
||||
string projectId,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var tenant = tenantId.ToLowerInvariant();
|
||||
var scoped = _exceptions.Values.Where(e => e.TenantId.Equals(tenant, StringComparison.OrdinalIgnoreCase)).ToList();
|
||||
var result = scoped.AsEnumerable();
|
||||
var results = _exceptions
|
||||
.Where(kvp => kvp.Key.Tenant == Normalize(tenantId))
|
||||
.Select(kvp => kvp.Value)
|
||||
.Where(e => e.Status == ExceptionStatus.Active)
|
||||
.Where(e => string.IsNullOrWhiteSpace(e.ProjectId) || string.Equals(e.ProjectId, projectId, StringComparison.OrdinalIgnoreCase))
|
||||
.Select(x => Copy(x))
|
||||
.ToList();
|
||||
|
||||
if (options.Statuses.Any())
|
||||
{
|
||||
result = result.Where(e => options.Statuses.Contains(e.Status, StringComparer.OrdinalIgnoreCase));
|
||||
}
|
||||
|
||||
if (options.Types.Any())
|
||||
{
|
||||
result = result.Where(e => options.Types.Contains(e.ExceptionType, StringComparer.OrdinalIgnoreCase));
|
||||
}
|
||||
|
||||
return Task.FromResult(result.Select(Clone).ToImmutableArray());
|
||||
return Task.FromResult<IReadOnlyList<ExceptionEntity>>(results);
|
||||
}
|
||||
|
||||
public Task<ImmutableArray<PolicyExceptionDocument>> FindApplicableExceptionsAsync(string tenantId, ExceptionQueryOptions options, CancellationToken cancellationToken)
|
||||
public Task<IReadOnlyList<ExceptionEntity>> GetActiveForRuleAsync(
|
||||
string tenantId,
|
||||
string ruleName,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var tenant = tenantId.ToLowerInvariant();
|
||||
var results = _exceptions.Values
|
||||
.Where(e => e.TenantId.Equals(tenant, StringComparison.OrdinalIgnoreCase))
|
||||
.Where(e => e.Status.Equals("active", StringComparison.OrdinalIgnoreCase))
|
||||
.Select(Clone)
|
||||
.ToImmutableArray();
|
||||
var results = _exceptions
|
||||
.Where(kvp => kvp.Key.Tenant == Normalize(tenantId))
|
||||
.Select(kvp => kvp.Value)
|
||||
.Where(e => e.Status == ExceptionStatus.Active)
|
||||
.Where(e =>
|
||||
string.IsNullOrWhiteSpace(e.RulePattern) ||
|
||||
Regex.IsMatch(ruleName, e.RulePattern, RegexOptions.IgnoreCase))
|
||||
.Select(x => Copy(x))
|
||||
.ToList();
|
||||
|
||||
return Task.FromResult(results);
|
||||
return Task.FromResult<IReadOnlyList<ExceptionEntity>>(results);
|
||||
}
|
||||
|
||||
public Task<bool> UpdateExceptionStatusAsync(string tenantId, string exceptionId, string newStatus, DateTimeOffset timestamp, CancellationToken cancellationToken)
|
||||
public Task<bool> UpdateAsync(ExceptionEntity exception, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var key = (tenantId.ToLowerInvariant(), exceptionId);
|
||||
if (!_exceptions.TryGetValue(key, out var existing))
|
||||
var key = (Normalize(exception.TenantId), exception.Id);
|
||||
if (!_exceptions.ContainsKey(key))
|
||||
{
|
||||
return Task.FromResult(false);
|
||||
}
|
||||
|
||||
var updated = Clone(existing);
|
||||
updated.Status = newStatus;
|
||||
updated.UpdatedAt = timestamp;
|
||||
if (newStatus == "active")
|
||||
{
|
||||
updated.ActivatedAt = timestamp;
|
||||
}
|
||||
if (newStatus == "expired")
|
||||
{
|
||||
updated.RevokedAt = timestamp;
|
||||
}
|
||||
|
||||
_exceptions[key] = updated;
|
||||
_exceptions[key] = Copy(exception);
|
||||
return Task.FromResult(true);
|
||||
}
|
||||
|
||||
public Task<bool> RevokeExceptionAsync(string tenantId, string exceptionId, string revokedBy, string? reason, DateTimeOffset timestamp, CancellationToken cancellationToken)
|
||||
public Task<bool> ApproveAsync(string tenantId, Guid id, string approvedBy, CancellationToken cancellationToken = default)
|
||||
{
|
||||
return UpdateExceptionStatusAsync(tenantId, exceptionId, "revoked", timestamp, cancellationToken);
|
||||
// In-memory implementation treats approve as no-op since status enum has no pending state.
|
||||
return Task.FromResult(true);
|
||||
}
|
||||
|
||||
public Task<ImmutableArray<PolicyExceptionDocument>> GetExpiringExceptionsAsync(string tenantId, DateTimeOffset from, DateTimeOffset to, CancellationToken cancellationToken)
|
||||
public Task<bool> RevokeAsync(string tenantId, Guid id, string revokedBy, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var tenant = tenantId.ToLowerInvariant();
|
||||
var results = _exceptions.Values
|
||||
.Where(e => e.TenantId.Equals(tenant, StringComparison.OrdinalIgnoreCase))
|
||||
.Where(e => e.Status.Equals("active", StringComparison.OrdinalIgnoreCase))
|
||||
.Where(e => e.ExpiresAt is not null && e.ExpiresAt >= from && e.ExpiresAt <= to)
|
||||
.Select(Clone)
|
||||
.ToImmutableArray();
|
||||
|
||||
return Task.FromResult(results);
|
||||
}
|
||||
|
||||
public Task<ImmutableArray<PolicyExceptionDocument>> GetPendingActivationsAsync(string tenantId, DateTimeOffset asOf, CancellationToken cancellationToken)
|
||||
{
|
||||
var tenant = tenantId.ToLowerInvariant();
|
||||
var results = _exceptions.Values
|
||||
.Where(e => e.TenantId.Equals(tenant, StringComparison.OrdinalIgnoreCase))
|
||||
.Where(e => e.Status.Equals("approved", StringComparison.OrdinalIgnoreCase))
|
||||
.Where(e => e.EffectiveFrom is null || e.EffectiveFrom <= asOf)
|
||||
.Select(Clone)
|
||||
.ToImmutableArray();
|
||||
|
||||
return Task.FromResult(results);
|
||||
}
|
||||
|
||||
public Task<ExceptionReviewDocument> CreateReviewAsync(ExceptionReviewDocument review, CancellationToken cancellationToken)
|
||||
{
|
||||
return Task.FromResult(review);
|
||||
}
|
||||
|
||||
public Task<ExceptionReviewDocument?> GetReviewAsync(string tenantId, string reviewId, CancellationToken cancellationToken)
|
||||
{
|
||||
return Task.FromResult<ExceptionReviewDocument?>(null);
|
||||
}
|
||||
|
||||
public Task<ExceptionReviewDocument?> AddReviewDecisionAsync(string tenantId, string reviewId, ReviewDecisionDocument decision, CancellationToken cancellationToken)
|
||||
{
|
||||
return Task.FromResult<ExceptionReviewDocument?>(null);
|
||||
}
|
||||
|
||||
public Task<ExceptionReviewDocument?> CompleteReviewAsync(string tenantId, string reviewId, string finalStatus, DateTimeOffset completedAt, CancellationToken cancellationToken)
|
||||
{
|
||||
return Task.FromResult<ExceptionReviewDocument?>(null);
|
||||
}
|
||||
|
||||
public Task<ImmutableArray<ExceptionReviewDocument>> GetReviewsForExceptionAsync(string tenantId, string exceptionId, CancellationToken cancellationToken)
|
||||
{
|
||||
return Task.FromResult(ImmutableArray<ExceptionReviewDocument>.Empty);
|
||||
}
|
||||
|
||||
public Task<ImmutableArray<ExceptionReviewDocument>> GetPendingReviewsAsync(string tenantId, string? reviewerId, CancellationToken cancellationToken)
|
||||
{
|
||||
return Task.FromResult(ImmutableArray<ExceptionReviewDocument>.Empty);
|
||||
}
|
||||
|
||||
public Task<ExceptionBindingDocument> UpsertBindingAsync(ExceptionBindingDocument binding, CancellationToken cancellationToken)
|
||||
{
|
||||
_bindings[(binding.TenantId.ToLowerInvariant(), binding.Id)] = Clone(binding);
|
||||
return Task.FromResult(binding);
|
||||
}
|
||||
|
||||
public Task<ImmutableArray<ExceptionBindingDocument>> GetBindingsForExceptionAsync(string tenantId, string exceptionId, CancellationToken cancellationToken)
|
||||
{
|
||||
var tenant = tenantId.ToLowerInvariant();
|
||||
var results = _bindings.Values
|
||||
.Where(b => b.TenantId.Equals(tenant, StringComparison.OrdinalIgnoreCase) && b.ExceptionId == exceptionId)
|
||||
.Select(Clone)
|
||||
.ToImmutableArray();
|
||||
return Task.FromResult(results);
|
||||
}
|
||||
|
||||
public Task<ImmutableArray<ExceptionBindingDocument>> GetActiveBindingsForAssetAsync(string tenantId, string assetId, DateTimeOffset asOf, CancellationToken cancellationToken)
|
||||
{
|
||||
var tenant = tenantId.ToLowerInvariant();
|
||||
var results = _bindings.Values
|
||||
.Where(b => b.TenantId.Equals(tenant, StringComparison.OrdinalIgnoreCase))
|
||||
.Where(b => b.AssetId == assetId)
|
||||
.Where(b => b.Status == "active")
|
||||
.Where(b => b.EffectiveFrom <= asOf && (b.ExpiresAt is null || b.ExpiresAt > asOf))
|
||||
.Select(Clone)
|
||||
.ToImmutableArray();
|
||||
return Task.FromResult(results);
|
||||
}
|
||||
|
||||
public Task<long> DeleteBindingsForExceptionAsync(string tenantId, string exceptionId, CancellationToken cancellationToken)
|
||||
{
|
||||
var tenant = tenantId.ToLowerInvariant();
|
||||
var removed = _bindings.Where(kvp => kvp.Key.Tenant == tenant && kvp.Value.ExceptionId == exceptionId).ToList();
|
||||
foreach (var kvp in removed)
|
||||
var key = (Normalize(tenantId), id);
|
||||
if (_exceptions.TryGetValue(key, out var existing))
|
||||
{
|
||||
_bindings.TryRemove(kvp.Key, out _);
|
||||
}
|
||||
|
||||
return Task.FromResult((long)removed.Count);
|
||||
}
|
||||
|
||||
public Task<ImmutableArray<ExceptionBindingDocument>> GetExpiredBindingsAsync(string tenantId, DateTimeOffset asOf, CancellationToken cancellationToken)
|
||||
{
|
||||
var tenant = tenantId.ToLowerInvariant();
|
||||
var results = _bindings.Values
|
||||
.Where(b => b.TenantId.Equals(tenant, StringComparison.OrdinalIgnoreCase))
|
||||
.Where(b => b.Status == "active")
|
||||
.Where(b => b.ExpiresAt is not null && b.ExpiresAt < asOf)
|
||||
.Select(Clone)
|
||||
.ToImmutableArray();
|
||||
return Task.FromResult(results);
|
||||
}
|
||||
|
||||
public Task<IReadOnlyDictionary<string, int>> GetExceptionCountsByStatusAsync(string tenantId, CancellationToken cancellationToken)
|
||||
{
|
||||
var tenant = tenantId.ToLowerInvariant();
|
||||
var counts = _exceptions.Values
|
||||
.Where(e => e.TenantId.Equals(tenant, StringComparison.OrdinalIgnoreCase))
|
||||
.GroupBy(e => e.Status)
|
||||
.ToDictionary(g => g.Key, g => g.Count(), StringComparer.OrdinalIgnoreCase);
|
||||
|
||||
return Task.FromResult((IReadOnlyDictionary<string, int>)counts);
|
||||
}
|
||||
|
||||
|
||||
public Task<ImmutableArray<ExceptionBindingDocument>> GetExpiredBindingsAsync(string tenantId, DateTimeOffset asOf, int limit, CancellationToken cancellationToken)
|
||||
{
|
||||
var tenant = tenantId.ToLowerInvariant();
|
||||
var results = _bindings.Values
|
||||
.Where(b => string.Equals(b.TenantId, tenant, StringComparison.OrdinalIgnoreCase))
|
||||
.Where(b => b.Status == "active")
|
||||
.Where(b => b.ExpiresAt is not null && b.ExpiresAt < asOf)
|
||||
.Take(limit)
|
||||
.Select(Clone)
|
||||
.ToImmutableArray();
|
||||
return Task.FromResult(results);
|
||||
}
|
||||
|
||||
public Task<bool> UpdateBindingStatusAsync(string tenantId, string bindingId, string newStatus, CancellationToken cancellationToken)
|
||||
{
|
||||
var key = _bindings.Keys.FirstOrDefault(k => string.Equals(k.Tenant, tenantId, StringComparison.OrdinalIgnoreCase) && k.Id == bindingId);
|
||||
if (key == default)
|
||||
{
|
||||
return Task.FromResult(false);
|
||||
}
|
||||
|
||||
if (_bindings.TryGetValue(key, out var binding))
|
||||
{
|
||||
var updated = Clone(binding);
|
||||
updated.Status = newStatus;
|
||||
_bindings[key] = updated;
|
||||
_exceptions[key] = Copy(
|
||||
existing,
|
||||
statusOverride: ExceptionStatus.Revoked,
|
||||
revokedAtOverride: DateTimeOffset.UtcNow,
|
||||
revokedByOverride: revokedBy);
|
||||
return Task.FromResult(true);
|
||||
}
|
||||
|
||||
return Task.FromResult(false);
|
||||
}
|
||||
|
||||
public Task<ImmutableArray<PolicyExceptionDocument>> FindApplicableExceptionsAsync(string tenantId, string assetId, string? advisoryId, DateTimeOffset asOf, CancellationToken cancellationToken)
|
||||
public Task<int> ExpireAsync(string tenantId, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var tenant = tenantId.ToLowerInvariant();
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
var normalizedTenant = Normalize(tenantId);
|
||||
var expired = 0;
|
||||
|
||||
var activeExceptions = _exceptions.Values
|
||||
.Where(e => string.Equals(e.TenantId, tenant, StringComparison.OrdinalIgnoreCase))
|
||||
.Where(e => e.Status.Equals("active", StringComparison.OrdinalIgnoreCase))
|
||||
.Where(e => (e.EffectiveFrom is null || e.EffectiveFrom <= asOf) && (e.ExpiresAt is null || e.ExpiresAt > asOf))
|
||||
.ToDictionary(e => e.Id, Clone);
|
||||
|
||||
if (activeExceptions.Count == 0)
|
||||
foreach (var kvp in _exceptions.Where(k => k.Key.Tenant == normalizedTenant))
|
||||
{
|
||||
return Task.FromResult(ImmutableArray<PolicyExceptionDocument>.Empty);
|
||||
}
|
||||
|
||||
var matchingIds = _bindings.Values
|
||||
.Where(b => string.Equals(b.TenantId, tenant, StringComparison.OrdinalIgnoreCase))
|
||||
.Where(b => b.Status == "active")
|
||||
.Where(b => b.EffectiveFrom <= asOf && (b.ExpiresAt is null || b.ExpiresAt > asOf))
|
||||
.Where(b => b.AssetId == assetId)
|
||||
.Where(b => advisoryId is null || string.IsNullOrEmpty(b.AdvisoryId) || b.AdvisoryId == advisoryId)
|
||||
.Select(b => b.ExceptionId)
|
||||
.ToHashSet(StringComparer.OrdinalIgnoreCase);
|
||||
|
||||
foreach (var ex in activeExceptions.Values)
|
||||
{
|
||||
if (ex.Scope.ApplyToAll)
|
||||
if (kvp.Value.Status == ExceptionStatus.Active && kvp.Value.ExpiresAt is not null && kvp.Value.ExpiresAt <= now)
|
||||
{
|
||||
matchingIds.Add(ex.Id);
|
||||
}
|
||||
else if (ex.Scope.AssetIds.Contains(assetId, StringComparer.OrdinalIgnoreCase))
|
||||
{
|
||||
matchingIds.Add(ex.Id);
|
||||
}
|
||||
else if (advisoryId is not null && ex.Scope.AdvisoryIds.Contains(advisoryId, StringComparer.OrdinalIgnoreCase))
|
||||
{
|
||||
matchingIds.Add(ex.Id);
|
||||
_exceptions[kvp.Key] = Copy(
|
||||
kvp.Value,
|
||||
statusOverride: ExceptionStatus.Expired,
|
||||
revokedAtOverride: now);
|
||||
expired++;
|
||||
}
|
||||
}
|
||||
|
||||
var result = matchingIds
|
||||
.Where(activeExceptions.ContainsKey)
|
||||
.Select(id => activeExceptions[id])
|
||||
.ToImmutableArray();
|
||||
|
||||
return Task.FromResult(result);
|
||||
return Task.FromResult(expired);
|
||||
}
|
||||
|
||||
private static PolicyExceptionDocument Clone(PolicyExceptionDocument source)
|
||||
public Task<bool> DeleteAsync(string tenantId, Guid id, CancellationToken cancellationToken = default)
|
||||
{
|
||||
return new PolicyExceptionDocument
|
||||
{
|
||||
Id = source.Id,
|
||||
TenantId = source.TenantId,
|
||||
Name = source.Name,
|
||||
ExceptionType = source.ExceptionType,
|
||||
Status = source.Status,
|
||||
EffectiveFrom = source.EffectiveFrom,
|
||||
ExpiresAt = source.ExpiresAt,
|
||||
CreatedAt = source.CreatedAt,
|
||||
UpdatedAt = source.UpdatedAt,
|
||||
ActivatedAt = source.ActivatedAt,
|
||||
RevokedAt = source.RevokedAt,
|
||||
RevokedBy = source.RevokedBy,
|
||||
RevocationReason = source.RevocationReason,
|
||||
Scope = source.Scope,
|
||||
RiskAssessment = source.RiskAssessment,
|
||||
Tags = source.Tags,
|
||||
};
|
||||
var key = (Normalize(tenantId), id);
|
||||
return Task.FromResult(_exceptions.TryRemove(key, out _));
|
||||
}
|
||||
|
||||
private static ExceptionBindingDocument Clone(ExceptionBindingDocument source)
|
||||
private static string Normalize(string value) => value.ToLowerInvariant();
|
||||
|
||||
private static ExceptionEntity Copy(
|
||||
ExceptionEntity source,
|
||||
Guid? idOverride = null,
|
||||
ExceptionStatus? statusOverride = null,
|
||||
DateTimeOffset? revokedAtOverride = null,
|
||||
string? revokedByOverride = null) => new()
|
||||
{
|
||||
return new ExceptionBindingDocument
|
||||
{
|
||||
Id = source.Id,
|
||||
TenantId = source.TenantId,
|
||||
ExceptionId = source.ExceptionId,
|
||||
AssetId = source.AssetId,
|
||||
AdvisoryId = source.AdvisoryId,
|
||||
Status = source.Status,
|
||||
EffectiveFrom = source.EffectiveFrom,
|
||||
ExpiresAt = source.ExpiresAt,
|
||||
};
|
||||
}
|
||||
Id = idOverride ?? source.Id,
|
||||
TenantId = source.TenantId,
|
||||
Name = source.Name,
|
||||
Description = source.Description,
|
||||
RulePattern = source.RulePattern,
|
||||
ResourcePattern = source.ResourcePattern,
|
||||
ArtifactPattern = source.ArtifactPattern,
|
||||
ProjectId = source.ProjectId,
|
||||
Reason = source.Reason,
|
||||
Status = statusOverride ?? source.Status,
|
||||
ExpiresAt = source.ExpiresAt,
|
||||
ApprovedBy = source.ApprovedBy,
|
||||
ApprovedAt = source.ApprovedAt,
|
||||
RevokedBy = revokedByOverride ?? source.RevokedBy,
|
||||
RevokedAt = revokedAtOverride ?? source.RevokedAt,
|
||||
Metadata = source.Metadata,
|
||||
CreatedAt = source.CreatedAt,
|
||||
CreatedBy = source.CreatedBy
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,12 +1,7 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
using System.Collections.Immutable;
|
||||
using System.Linq;
|
||||
using StellaOps.Policy.Engine.ExceptionCache;
|
||||
using StellaOps.Policy.Engine.Events;
|
||||
using StellaOps.Policy.Engine.Options;
|
||||
using StellaOps.Policy.Engine.Storage.Mongo.Repositories;
|
||||
using StellaOps.Policy.Engine.Telemetry;
|
||||
using StellaOps.Policy.Storage.Postgres.Repositories;
|
||||
|
||||
namespace StellaOps.Policy.Engine.Workers;
|
||||
|
||||
@@ -17,111 +12,40 @@ namespace StellaOps.Policy.Engine.Workers;
|
||||
internal sealed class ExceptionLifecycleService
|
||||
{
|
||||
private readonly IExceptionRepository _repository;
|
||||
private readonly IExceptionEventPublisher _publisher;
|
||||
private readonly IOptions<PolicyEngineOptions> _options;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
private readonly ILogger<ExceptionLifecycleService> _logger;
|
||||
|
||||
public ExceptionLifecycleService(
|
||||
IExceptionRepository repository,
|
||||
IExceptionEventPublisher publisher,
|
||||
IOptions<PolicyEngineOptions> options,
|
||||
TimeProvider timeProvider,
|
||||
ILogger<ExceptionLifecycleService> logger)
|
||||
{
|
||||
_repository = repository ?? throw new ArgumentNullException(nameof(repository));
|
||||
_publisher = publisher ?? throw new ArgumentNullException(nameof(publisher));
|
||||
_options = options ?? throw new ArgumentNullException(nameof(options));
|
||||
_timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
}
|
||||
|
||||
public async Task ProcessOnceAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var now = _timeProvider.GetUtcNow();
|
||||
var lifecycle = _options.Value.ExceptionLifecycle;
|
||||
var tenants = _options.Value.ResourceServer.RequiredTenants;
|
||||
|
||||
var pendingActivations = await _repository
|
||||
.ListExceptionsAsync(new ExceptionQueryOptions
|
||||
{
|
||||
Statuses = ImmutableArray.Create("approved"),
|
||||
}, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
pendingActivations = pendingActivations
|
||||
.Where(ex => ex.EffectiveFrom is null || ex.EffectiveFrom <= now)
|
||||
.Take(lifecycle.MaxBatchSize)
|
||||
.ToImmutableArray();
|
||||
|
||||
foreach (var ex in pendingActivations)
|
||||
if (tenants.Count == 0)
|
||||
{
|
||||
var activated = await _repository.UpdateExceptionStatusAsync(
|
||||
ex.TenantId, ex.Id, "active", now, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
if (!activated)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
PolicyEngineTelemetry.RecordExceptionLifecycle(ex.TenantId, "activated");
|
||||
await _publisher.PublishAsync(new ExceptionEvent
|
||||
{
|
||||
EventType = "activated",
|
||||
TenantId = ex.TenantId,
|
||||
ExceptionId = ex.Id,
|
||||
ExceptionName = ex.Name,
|
||||
ExceptionType = ex.ExceptionType,
|
||||
OccurredAt = now,
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
_logger.LogInformation(
|
||||
"Activated exception {ExceptionId} for tenant {TenantId} (effective from {EffectiveFrom:o})",
|
||||
ex.Id,
|
||||
ex.TenantId,
|
||||
ex.EffectiveFrom);
|
||||
_logger.LogDebug("No tenants configured for exception lifecycle processing; skipping.");
|
||||
return;
|
||||
}
|
||||
|
||||
var expiryWindowStart = now - lifecycle.ExpiryLookback;
|
||||
var expiryWindowEnd = now + lifecycle.ExpiryHorizon;
|
||||
|
||||
var expiring = await _repository
|
||||
.ListExceptionsAsync(new ExceptionQueryOptions
|
||||
{
|
||||
Statuses = ImmutableArray.Create("active"),
|
||||
}, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
expiring = expiring
|
||||
.Where(ex => ex.ExpiresAt is not null && ex.ExpiresAt >= expiryWindowStart && ex.ExpiresAt <= expiryWindowEnd)
|
||||
.Take(lifecycle.MaxBatchSize)
|
||||
.ToImmutableArray();
|
||||
|
||||
foreach (var ex in expiring)
|
||||
foreach (var tenant in tenants)
|
||||
{
|
||||
var expired = await _repository.UpdateExceptionStatusAsync(
|
||||
ex.TenantId, ex.Id, "expired", now, cancellationToken).ConfigureAwait(false);
|
||||
var expired = await _repository.ExpireAsync(tenant, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
if (!expired)
|
||||
if (expired > 0)
|
||||
{
|
||||
continue;
|
||||
_logger.LogInformation(
|
||||
"Expired {ExpiredCount} exceptions for tenant {TenantId}",
|
||||
expired,
|
||||
tenant);
|
||||
}
|
||||
|
||||
PolicyEngineTelemetry.RecordExceptionLifecycle(ex.TenantId, "expired");
|
||||
await _publisher.PublishAsync(new ExceptionEvent
|
||||
{
|
||||
EventType = "expired",
|
||||
TenantId = ex.TenantId,
|
||||
ExceptionId = ex.Id,
|
||||
ExceptionName = ex.Name,
|
||||
ExceptionType = ex.ExceptionType,
|
||||
OccurredAt = now,
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
_logger.LogInformation(
|
||||
"Expired exception {ExceptionId} for tenant {TenantId} at {ExpiresAt:o}",
|
||||
ex.Id,
|
||||
ex.TenantId,
|
||||
ex.ExpiresAt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,9 +29,9 @@ internal sealed class PolicyEngineBootstrapWorker : BackgroundService
|
||||
|
||||
protected override Task ExecuteAsync(CancellationToken stoppingToken)
|
||||
{
|
||||
logger.LogInformation("Policy Engine bootstrap worker started. Authority issuer: {AuthorityIssuer}. Database: {Database}.",
|
||||
options.Authority.Issuer,
|
||||
options.Storage.DatabaseName);
|
||||
logger.LogInformation(
|
||||
"Policy Engine bootstrap worker started. Authority issuer: {AuthorityIssuer}. Storage: PostgreSQL (configured via Postgres:Policy).",
|
||||
options.Authority.Issuer);
|
||||
|
||||
if (options.RiskProfile.Enabled)
|
||||
{
|
||||
|
||||
@@ -1,156 +0,0 @@
|
||||
using System.Net.Http.Json;
|
||||
using FluentAssertions;
|
||||
using Microsoft.AspNetCore.Mvc.Testing;
|
||||
using Mongo2Go;
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.SbomService.Models;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.SbomService.Tests;
|
||||
|
||||
public class SbomMongoStorageTests : IAsyncLifetime
|
||||
{
|
||||
private readonly WebApplicationFactory<Program> _factory;
|
||||
private MongoDbRunner? _runner;
|
||||
|
||||
public SbomMongoStorageTests(WebApplicationFactory<Program> factory)
|
||||
{
|
||||
_factory = factory;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Console_catalog_reads_from_mongo_storage()
|
||||
{
|
||||
using var client = CreateClient();
|
||||
|
||||
var response = await client.GetAsync("/console/sboms?artifact=mongo-api&limit=1");
|
||||
response.EnsureSuccessStatusCode();
|
||||
|
||||
var payload = await response.Content.ReadFromJsonAsync<SbomCatalogResult>();
|
||||
payload.Should().NotBeNull();
|
||||
payload!.Items.Should().ContainSingle();
|
||||
payload.Items[0].Artifact.Should().Be("ghcr.io/stellaops/mongo-api");
|
||||
payload.Items[0].ProjectionHash.Should().Be("sha256:proj-mongo-2");
|
||||
payload.NextCursor.Should().Be("1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Component_lookup_returns_storage_results_and_cursor()
|
||||
{
|
||||
using var client = CreateClient();
|
||||
|
||||
var response = await client.GetAsync("/components/lookup?purl=pkg:npm/mongo-lib@1.0.0&limit=1");
|
||||
response.EnsureSuccessStatusCode();
|
||||
|
||||
var payload = await response.Content.ReadFromJsonAsync<ComponentLookupResult>();
|
||||
payload.Should().NotBeNull();
|
||||
payload!.CacheHint.Should().Be("storage");
|
||||
payload.Neighbors.Should().ContainSingle();
|
||||
payload.Neighbors[0].Purl.Should().Be("pkg:npm/express@4.18.2");
|
||||
payload.NextCursor.Should().Be("1");
|
||||
}
|
||||
|
||||
public Task InitializeAsync()
|
||||
{
|
||||
_runner = MongoDbRunner.Start(singleNodeReplSet: false, additionalMongodArguments: "--quiet");
|
||||
return SeedMongoAsync();
|
||||
}
|
||||
|
||||
public Task DisposeAsync()
|
||||
{
|
||||
_runner?.Dispose();
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
private HttpClient CreateClient()
|
||||
{
|
||||
if (_runner is null)
|
||||
{
|
||||
throw new InvalidOperationException("Mongo runner not started");
|
||||
}
|
||||
|
||||
var factory = _factory.WithWebHostBuilder(builder =>
|
||||
{
|
||||
builder.ConfigureAppConfiguration((_, config) =>
|
||||
{
|
||||
var settings = new Dictionary<string, string?>
|
||||
{
|
||||
["SbomService:Mongo:ConnectionString"] = _runner.ConnectionString,
|
||||
["SbomService:Mongo:Database"] = "sbom_console_tests"
|
||||
};
|
||||
|
||||
config.AddInMemoryCollection(settings);
|
||||
});
|
||||
});
|
||||
|
||||
return factory.CreateClient();
|
||||
}
|
||||
|
||||
private async Task SeedMongoAsync()
|
||||
{
|
||||
if (_runner is null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var client = new MongoClient(_runner.ConnectionString);
|
||||
var database = client.GetDatabase("sbom_console_tests");
|
||||
|
||||
var catalog = database.GetCollection<BsonDocument>("sbom_catalog");
|
||||
await catalog.DeleteManyAsync(FilterDefinition<BsonDocument>.Empty);
|
||||
await catalog.InsertManyAsync(new[]
|
||||
{
|
||||
new BsonDocument
|
||||
{
|
||||
{ "artifact", "ghcr.io/stellaops/mongo-api" },
|
||||
{ "sbomVersion", "2025.12.04.2" },
|
||||
{ "digest", "sha256:bbb" },
|
||||
{ "license", "Apache-2.0" },
|
||||
{ "scope", "runtime" },
|
||||
{ "assetTags", new BsonDocument { { "owner", "storage" }, { "env", "prod" } } },
|
||||
{ "createdAt", new BsonDateTime(DateTime.SpecifyKind(new DateTime(2025, 12, 4, 12, 0, 0), DateTimeKind.Utc)) },
|
||||
{ "projectionHash", "sha256:proj-mongo-2" },
|
||||
{ "evaluationMetadata", "eval:storage" }
|
||||
},
|
||||
new BsonDocument
|
||||
{
|
||||
{ "artifact", "ghcr.io/stellaops/mongo-api" },
|
||||
{ "sbomVersion", "2025.12.04.1" },
|
||||
{ "digest", "sha256:aaa" },
|
||||
{ "license", "Apache-2.0" },
|
||||
{ "scope", "runtime" },
|
||||
{ "assetTags", new BsonDocument { { "owner", "storage" }, { "env", "prod" } } },
|
||||
{ "createdAt", new BsonDateTime(DateTime.SpecifyKind(new DateTime(2025, 12, 4, 11, 0, 0), DateTimeKind.Utc)) },
|
||||
{ "projectionHash", "sha256:proj-mongo-1" },
|
||||
{ "evaluationMetadata", "eval:storage" }
|
||||
}
|
||||
});
|
||||
|
||||
var components = database.GetCollection<BsonDocument>("sbom_component_neighbors");
|
||||
await components.DeleteManyAsync(FilterDefinition<BsonDocument>.Empty);
|
||||
await components.InsertManyAsync(new[]
|
||||
{
|
||||
new BsonDocument
|
||||
{
|
||||
{ "artifact", "ghcr.io/stellaops/mongo-api" },
|
||||
{ "purl", "pkg:npm/mongo-lib@1.0.0" },
|
||||
{ "neighborPurl", "pkg:npm/express@4.18.2" },
|
||||
{ "relationship", "DEPENDS_ON" },
|
||||
{ "license", "MIT" },
|
||||
{ "scope", "runtime" },
|
||||
{ "runtimeFlag", true }
|
||||
},
|
||||
new BsonDocument
|
||||
{
|
||||
{ "artifact", "ghcr.io/stellaops/mongo-api" },
|
||||
{ "purl", "pkg:npm/mongo-lib@1.0.0" },
|
||||
{ "neighborPurl", "pkg:npm/body-parser@1.20.2" },
|
||||
{ "relationship", "DEPENDS_ON" },
|
||||
{ "license", "MIT" },
|
||||
{ "scope", "runtime" },
|
||||
{ "runtimeFlag", true }
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -31,8 +31,6 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Concelier.Normali
|
||||
EndProject
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Ingestion.Telemetry", "..\__Libraries\StellaOps.Ingestion.Telemetry\StellaOps.Ingestion.Telemetry.csproj", "{F921862B-2057-4E57-9765-2C34764BC226}"
|
||||
EndProject
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Provenance.Mongo", "..\__Libraries\StellaOps.Provenance.Mongo\StellaOps.Provenance.Mongo.csproj", "{055EDD0B-F513-40C8-BAC0-80815BCE45E3}"
|
||||
EndProject
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Plugin", "..\__Libraries\StellaOps.Plugin\StellaOps.Plugin.csproj", "{872BE10D-03C8-4F6A-9D4C-F56FFDCC6B16}"
|
||||
EndProject
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Aoc", "..\Aoc\__Libraries\StellaOps.Aoc\StellaOps.Aoc.csproj", "{DA1297B3-5B0A-4B4F-A213-9D0E633233EE}"
|
||||
@@ -215,18 +213,6 @@ Global
|
||||
{F921862B-2057-4E57-9765-2C34764BC226}.Release|x64.Build.0 = Release|Any CPU
|
||||
{F921862B-2057-4E57-9765-2C34764BC226}.Release|x86.ActiveCfg = Release|Any CPU
|
||||
{F921862B-2057-4E57-9765-2C34764BC226}.Release|x86.Build.0 = Release|Any CPU
|
||||
{055EDD0B-F513-40C8-BAC0-80815BCE45E3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||
{055EDD0B-F513-40C8-BAC0-80815BCE45E3}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{055EDD0B-F513-40C8-BAC0-80815BCE45E3}.Debug|x64.ActiveCfg = Debug|Any CPU
|
||||
{055EDD0B-F513-40C8-BAC0-80815BCE45E3}.Debug|x64.Build.0 = Debug|Any CPU
|
||||
{055EDD0B-F513-40C8-BAC0-80815BCE45E3}.Debug|x86.ActiveCfg = Debug|Any CPU
|
||||
{055EDD0B-F513-40C8-BAC0-80815BCE45E3}.Debug|x86.Build.0 = Debug|Any CPU
|
||||
{055EDD0B-F513-40C8-BAC0-80815BCE45E3}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{055EDD0B-F513-40C8-BAC0-80815BCE45E3}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
{055EDD0B-F513-40C8-BAC0-80815BCE45E3}.Release|x64.ActiveCfg = Release|Any CPU
|
||||
{055EDD0B-F513-40C8-BAC0-80815BCE45E3}.Release|x64.Build.0 = Release|Any CPU
|
||||
{055EDD0B-F513-40C8-BAC0-80815BCE45E3}.Release|x86.ActiveCfg = Release|Any CPU
|
||||
{055EDD0B-F513-40C8-BAC0-80815BCE45E3}.Release|x86.Build.0 = Release|Any CPU
|
||||
{872BE10D-03C8-4F6A-9D4C-F56FFDCC6B16}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||
{872BE10D-03C8-4F6A-9D4C-F56FFDCC6B16}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{872BE10D-03C8-4F6A-9D4C-F56FFDCC6B16}.Debug|x64.ActiveCfg = Debug|Any CPU
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
namespace StellaOps.SbomService.Options;
|
||||
|
||||
/// <summary>
|
||||
/// MongoDB configuration for SBOM Service storage-backed endpoints.
|
||||
/// </summary>
|
||||
public sealed class SbomMongoOptions
|
||||
{
|
||||
public string? ConnectionString { get; set; }
|
||||
|
||||
public string Database { get; set; } = "sbom_service";
|
||||
|
||||
public string CatalogCollection { get; set; } = "sbom_catalog";
|
||||
|
||||
public string ComponentLookupCollection { get; set; } = "sbom_component_neighbors";
|
||||
}
|
||||
@@ -2,9 +2,7 @@ using System.Globalization;
|
||||
using System.Diagnostics.Metrics;
|
||||
using Microsoft.AspNetCore.Mvc;
|
||||
using Microsoft.Extensions.Options;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.SbomService.Models;
|
||||
using StellaOps.SbomService.Options;
|
||||
using StellaOps.SbomService.Services;
|
||||
using StellaOps.SbomService.Observability;
|
||||
using StellaOps.SbomService.Repositories;
|
||||
@@ -20,72 +18,38 @@ builder.Configuration
|
||||
builder.Services.AddOptions();
|
||||
builder.Services.AddLogging();
|
||||
|
||||
var mongoSection = builder.Configuration.GetSection("SbomService:Mongo");
|
||||
builder.Services.Configure<SbomMongoOptions>(mongoSection);
|
||||
var mongoConnectionString = mongoSection.GetValue<string>("ConnectionString");
|
||||
var mongoConfigured = !string.IsNullOrWhiteSpace(mongoConnectionString);
|
||||
|
||||
// Register SBOM query services (Mongo when configured; otherwise file-backed fixtures when present; fallback to in-memory seeds).
|
||||
if (mongoConfigured)
|
||||
// Register SBOM query services using file-backed fixtures when present; fallback to in-memory seeds.
|
||||
builder.Services.AddSingleton<IComponentLookupRepository>(sp =>
|
||||
{
|
||||
builder.Services.AddSingleton<IMongoClient>(sp =>
|
||||
var config = sp.GetRequiredService<IConfiguration>();
|
||||
var env = sp.GetRequiredService<IHostEnvironment>();
|
||||
var configured = config.GetValue<string>("SbomService:ComponentLookupPath");
|
||||
if (!string.IsNullOrWhiteSpace(configured) && File.Exists(configured))
|
||||
{
|
||||
var options = sp.GetRequiredService<IOptions<SbomMongoOptions>>().Value;
|
||||
var url = new MongoUrl(options.ConnectionString!);
|
||||
var settings = MongoClientSettings.FromUrl(url);
|
||||
settings.ServerSelectionTimeout = TimeSpan.FromSeconds(5);
|
||||
settings.RetryWrites = false;
|
||||
return new MongoClient(settings);
|
||||
});
|
||||
return new FileComponentLookupRepository(configured!);
|
||||
}
|
||||
|
||||
builder.Services.AddSingleton<IMongoDatabase>(sp =>
|
||||
{
|
||||
var options = sp.GetRequiredService<IOptions<SbomMongoOptions>>().Value;
|
||||
var client = sp.GetRequiredService<IMongoClient>();
|
||||
var url = new MongoUrl(options.ConnectionString!);
|
||||
var databaseName = string.IsNullOrWhiteSpace(options.Database)
|
||||
? url.DatabaseName ?? "sbom_service"
|
||||
: options.Database;
|
||||
return client.GetDatabase(databaseName);
|
||||
});
|
||||
var candidate = FindFixture(env, "component_lookup.json");
|
||||
return candidate is not null
|
||||
? new FileComponentLookupRepository(candidate)
|
||||
: new InMemoryComponentLookupRepository();
|
||||
});
|
||||
|
||||
builder.Services.AddSingleton<IComponentLookupRepository, MongoComponentLookupRepository>();
|
||||
builder.Services.AddSingleton<ICatalogRepository, MongoCatalogRepository>();
|
||||
}
|
||||
else
|
||||
builder.Services.AddSingleton<ICatalogRepository>(sp =>
|
||||
{
|
||||
builder.Services.AddSingleton<IComponentLookupRepository>(sp =>
|
||||
var config = sp.GetRequiredService<IConfiguration>();
|
||||
var env = sp.GetRequiredService<IHostEnvironment>();
|
||||
var configured = config.GetValue<string>("SbomService:CatalogPath");
|
||||
if (!string.IsNullOrWhiteSpace(configured) && File.Exists(configured))
|
||||
{
|
||||
var config = sp.GetRequiredService<IConfiguration>();
|
||||
var env = sp.GetRequiredService<IHostEnvironment>();
|
||||
var configured = config.GetValue<string>("SbomService:ComponentLookupPath");
|
||||
if (!string.IsNullOrWhiteSpace(configured) && File.Exists(configured))
|
||||
{
|
||||
return new FileComponentLookupRepository(configured!);
|
||||
}
|
||||
return new FileCatalogRepository(configured!);
|
||||
}
|
||||
|
||||
var candidate = FindFixture(env, "component_lookup.json");
|
||||
return candidate is not null
|
||||
? new FileComponentLookupRepository(candidate)
|
||||
: new InMemoryComponentLookupRepository();
|
||||
});
|
||||
|
||||
builder.Services.AddSingleton<ICatalogRepository>(sp =>
|
||||
{
|
||||
var config = sp.GetRequiredService<IConfiguration>();
|
||||
var env = sp.GetRequiredService<IHostEnvironment>();
|
||||
var configured = config.GetValue<string>("SbomService:CatalogPath");
|
||||
if (!string.IsNullOrWhiteSpace(configured) && File.Exists(configured))
|
||||
{
|
||||
return new FileCatalogRepository(configured!);
|
||||
}
|
||||
|
||||
var candidate = FindFixture(env, "catalog.json");
|
||||
return candidate is not null
|
||||
? new FileCatalogRepository(candidate)
|
||||
: new InMemoryCatalogRepository();
|
||||
});
|
||||
}
|
||||
var candidate = FindFixture(env, "catalog.json");
|
||||
return candidate is not null
|
||||
? new FileCatalogRepository(candidate)
|
||||
: new InMemoryCatalogRepository();
|
||||
});
|
||||
builder.Services.AddSingleton<IClock, SystemClock>();
|
||||
builder.Services.AddSingleton<ISbomEventStore, InMemorySbomEventStore>();
|
||||
builder.Services.AddSingleton<ISbomEventPublisher>(sp => sp.GetRequiredService<ISbomEventStore>());
|
||||
|
||||
@@ -1,138 +0,0 @@
|
||||
using System.Text.RegularExpressions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.SbomService.Models;
|
||||
using StellaOps.SbomService.Options;
|
||||
|
||||
namespace StellaOps.SbomService.Repositories;
|
||||
|
||||
public sealed class MongoCatalogRepository : ICatalogRepository
|
||||
{
|
||||
private readonly IMongoCollection<CatalogDocument> _collection;
|
||||
private readonly Collation _caseInsensitive = new("en", strength: CollationStrength.Secondary);
|
||||
|
||||
public MongoCatalogRepository(IMongoDatabase database, IOptions<SbomMongoOptions> options)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(database);
|
||||
ArgumentNullException.ThrowIfNull(options);
|
||||
|
||||
var opts = options.Value;
|
||||
var collectionName = string.IsNullOrWhiteSpace(opts.CatalogCollection)
|
||||
? "sbom_catalog"
|
||||
: opts.CatalogCollection;
|
||||
|
||||
_collection = database.GetCollection<CatalogDocument>(collectionName);
|
||||
EnsureIndexes();
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<CatalogRecord>> ListAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var items = await _collection
|
||||
.Find(FilterDefinition<CatalogDocument>.Empty)
|
||||
.SortByDescending(c => c.CreatedAt)
|
||||
.ThenBy(c => c.Artifact)
|
||||
.ToListAsync(cancellationToken);
|
||||
|
||||
return items.Select(Map).ToList();
|
||||
}
|
||||
|
||||
public async Task<(IReadOnlyList<CatalogRecord> Items, int Total)> QueryAsync(SbomCatalogQuery query, CancellationToken cancellationToken)
|
||||
{
|
||||
var filter = BuildFilter(query);
|
||||
|
||||
var total = (int)await _collection.CountDocumentsAsync(filter, cancellationToken: cancellationToken);
|
||||
|
||||
var items = await _collection
|
||||
.Find(filter, new FindOptions { Collation = _caseInsensitive })
|
||||
.SortByDescending(c => c.CreatedAt)
|
||||
.ThenBy(c => c.Artifact)
|
||||
.Skip(query.Offset)
|
||||
.Limit(query.Limit)
|
||||
.ToListAsync(cancellationToken);
|
||||
|
||||
return (items.Select(Map).ToList(), total);
|
||||
}
|
||||
|
||||
private FilterDefinition<CatalogDocument> BuildFilter(SbomCatalogQuery query)
|
||||
{
|
||||
var filter = Builders<CatalogDocument>.Filter.Empty;
|
||||
|
||||
if (!string.IsNullOrWhiteSpace(query.Artifact))
|
||||
{
|
||||
filter &= Builders<CatalogDocument>.Filter.Regex(c => c.Artifact, new BsonRegularExpression(query.Artifact, "i"));
|
||||
}
|
||||
|
||||
if (!string.IsNullOrWhiteSpace(query.License))
|
||||
{
|
||||
var escaped = Regex.Escape(query.License);
|
||||
filter &= Builders<CatalogDocument>.Filter.Regex(c => c.License, new BsonRegularExpression($"^{escaped}$", "i"));
|
||||
}
|
||||
|
||||
if (!string.IsNullOrWhiteSpace(query.Scope))
|
||||
{
|
||||
filter &= Builders<CatalogDocument>.Filter.Eq(c => c.Scope, query.Scope);
|
||||
}
|
||||
|
||||
if (!string.IsNullOrWhiteSpace(query.AssetTag))
|
||||
{
|
||||
filter &= Builders<CatalogDocument>.Filter.Exists($"AssetTags.{query.AssetTag}");
|
||||
}
|
||||
|
||||
return filter;
|
||||
}
|
||||
|
||||
private void EnsureIndexes()
|
||||
{
|
||||
var timestampIdx = Builders<CatalogDocument>.IndexKeys
|
||||
.Descending(c => c.CreatedAt)
|
||||
.Ascending(c => c.Artifact);
|
||||
|
||||
_collection.Indexes.CreateOne(new CreateIndexModel<CatalogDocument>(timestampIdx, new CreateIndexOptions
|
||||
{
|
||||
Name = "catalog_created_artifact"
|
||||
}));
|
||||
|
||||
var filterIdx = Builders<CatalogDocument>.IndexKeys
|
||||
.Ascending(c => c.Artifact)
|
||||
.Ascending(c => c.Scope)
|
||||
.Ascending(c => c.License);
|
||||
|
||||
_collection.Indexes.CreateOne(new CreateIndexModel<CatalogDocument>(filterIdx, new CreateIndexOptions
|
||||
{
|
||||
Name = "catalog_filters"
|
||||
}));
|
||||
|
||||
var assetTagIdx = Builders<CatalogDocument>.IndexKeys
|
||||
.Ascending("AssetTags");
|
||||
|
||||
_collection.Indexes.CreateOne(new CreateIndexModel<CatalogDocument>(assetTagIdx, new CreateIndexOptions
|
||||
{
|
||||
Name = "catalog_asset_tags"
|
||||
}));
|
||||
}
|
||||
|
||||
private static CatalogRecord Map(CatalogDocument doc) => new(
|
||||
doc.Artifact,
|
||||
doc.SbomVersion,
|
||||
doc.Digest,
|
||||
doc.License,
|
||||
doc.Scope,
|
||||
doc.AssetTags ?? new Dictionary<string, string>(StringComparer.Ordinal),
|
||||
doc.CreatedAt,
|
||||
doc.ProjectionHash,
|
||||
doc.EvaluationMetadata ?? string.Empty);
|
||||
|
||||
private sealed class CatalogDocument
|
||||
{
|
||||
public string Artifact { get; set; } = string.Empty;
|
||||
public string SbomVersion { get; set; } = string.Empty;
|
||||
public string Digest { get; set; } = string.Empty;
|
||||
public string? License { get; set; }
|
||||
public string Scope { get; set; } = string.Empty;
|
||||
public Dictionary<string, string>? AssetTags { get; set; }
|
||||
public DateTimeOffset CreatedAt { get; set; }
|
||||
public string ProjectionHash { get; set; } = string.Empty;
|
||||
public string? EvaluationMetadata { get; set; }
|
||||
}
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
using Microsoft.Extensions.Options;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.SbomService.Models;
|
||||
using StellaOps.SbomService.Options;
|
||||
|
||||
namespace StellaOps.SbomService.Repositories;
|
||||
|
||||
public sealed class MongoComponentLookupRepository : IComponentLookupRepository
|
||||
{
|
||||
private readonly IMongoCollection<ComponentDocument> _collection;
|
||||
private readonly Collation _caseInsensitive = new("en", strength: CollationStrength.Secondary);
|
||||
|
||||
public MongoComponentLookupRepository(IMongoDatabase database, IOptions<SbomMongoOptions> options)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(database);
|
||||
ArgumentNullException.ThrowIfNull(options);
|
||||
|
||||
var opts = options.Value;
|
||||
var collectionName = string.IsNullOrWhiteSpace(opts.ComponentLookupCollection)
|
||||
? "sbom_component_neighbors"
|
||||
: opts.ComponentLookupCollection;
|
||||
|
||||
_collection = database.GetCollection<ComponentDocument>(collectionName);
|
||||
EnsureIndexes();
|
||||
}
|
||||
|
||||
public async Task<(IReadOnlyList<ComponentLookupRecord> Items, int Total)> QueryAsync(ComponentLookupQuery query, CancellationToken cancellationToken)
|
||||
{
|
||||
var filter = Builders<ComponentDocument>.Filter.Eq(c => c.Purl, query.Purl);
|
||||
|
||||
if (!string.IsNullOrWhiteSpace(query.Artifact))
|
||||
{
|
||||
filter &= Builders<ComponentDocument>.Filter.Eq(c => c.Artifact, query.Artifact);
|
||||
}
|
||||
|
||||
var total = (int)await _collection.CountDocumentsAsync(filter, cancellationToken: cancellationToken);
|
||||
|
||||
var items = await _collection
|
||||
.Find(filter, new FindOptions { Collation = _caseInsensitive })
|
||||
.SortBy(c => c.Artifact)
|
||||
.ThenBy(c => c.NeighborPurl)
|
||||
.Skip(query.Offset)
|
||||
.Limit(query.Limit)
|
||||
.ToListAsync(cancellationToken);
|
||||
|
||||
return (items.Select(Map).ToList(), total);
|
||||
}
|
||||
|
||||
private void EnsureIndexes()
|
||||
{
|
||||
var purlArtifact = Builders<ComponentDocument>.IndexKeys
|
||||
.Ascending(c => c.Purl)
|
||||
.Ascending(c => c.Artifact);
|
||||
|
||||
_collection.Indexes.CreateOne(new CreateIndexModel<ComponentDocument>(purlArtifact, new CreateIndexOptions
|
||||
{
|
||||
Name = "component_lookup_purl_artifact"
|
||||
}));
|
||||
}
|
||||
|
||||
private static ComponentLookupRecord Map(ComponentDocument doc) => new(
|
||||
doc.Artifact,
|
||||
doc.Purl,
|
||||
doc.NeighborPurl,
|
||||
doc.Relationship,
|
||||
doc.License,
|
||||
doc.Scope,
|
||||
doc.RuntimeFlag);
|
||||
|
||||
private sealed class ComponentDocument
|
||||
{
|
||||
public string Artifact { get; set; } = string.Empty;
|
||||
public string Purl { get; set; } = string.Empty;
|
||||
public string NeighborPurl { get; set; } = string.Empty;
|
||||
public string Relationship { get; set; } = string.Empty;
|
||||
public string? License { get; set; }
|
||||
public string Scope { get; set; } = string.Empty;
|
||||
public bool RuntimeFlag { get; set; }
|
||||
}
|
||||
}
|
||||
@@ -15,6 +15,5 @@
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="MongoDB.Driver" Version="3.5.0" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
|
||||
@@ -2,31 +2,31 @@ using System;
|
||||
using System.Collections.Generic;
|
||||
using StellaOps.Configuration;
|
||||
using StellaOps.Scanner.Storage;
|
||||
|
||||
namespace StellaOps.Scanner.WebService.Options;
|
||||
|
||||
/// <summary>
|
||||
/// Strongly typed configuration for the Scanner WebService host.
|
||||
/// </summary>
|
||||
public sealed class ScannerWebServiceOptions
|
||||
{
|
||||
public const string SectionName = "scanner";
|
||||
|
||||
/// <summary>
|
||||
/// Schema version for configuration consumers to coordinate breaking changes.
|
||||
/// </summary>
|
||||
public int SchemaVersion { get; set; } = 1;
|
||||
|
||||
/// <summary>
|
||||
/// Mongo storage configuration used for catalog and job state.
|
||||
/// </summary>
|
||||
public StorageOptions Storage { get; set; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Queue configuration used to enqueue scan jobs.
|
||||
/// </summary>
|
||||
public QueueOptions Queue { get; set; } = new();
|
||||
|
||||
|
||||
namespace StellaOps.Scanner.WebService.Options;
|
||||
|
||||
/// <summary>
|
||||
/// Strongly typed configuration for the Scanner WebService host.
|
||||
/// </summary>
|
||||
public sealed class ScannerWebServiceOptions
|
||||
{
|
||||
public const string SectionName = "scanner";
|
||||
|
||||
/// <summary>
|
||||
/// Schema version for configuration consumers to coordinate breaking changes.
|
||||
/// </summary>
|
||||
public int SchemaVersion { get; set; } = 1;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL storage configuration used for catalog and job state.
|
||||
/// </summary>
|
||||
public StorageOptions Storage { get; set; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Queue configuration used to enqueue scan jobs.
|
||||
/// </summary>
|
||||
public QueueOptions Queue { get; set; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Object store configuration for SBOM artefacts.
|
||||
/// </summary>
|
||||
@@ -36,32 +36,32 @@ public sealed class ScannerWebServiceOptions
|
||||
/// Registry credential configuration for report/export operations.
|
||||
/// </summary>
|
||||
public RegistryOptions Registry { get; set; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Feature flags toggling optional behaviours.
|
||||
/// </summary>
|
||||
public FeatureFlagOptions Features { get; set; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Plug-in loader configuration.
|
||||
/// </summary>
|
||||
public PluginOptions Plugins { get; set; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Telemetry configuration for logs, metrics, traces.
|
||||
/// </summary>
|
||||
public TelemetryOptions Telemetry { get; set; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Authority / authentication configuration.
|
||||
/// </summary>
|
||||
public AuthorityOptions Authority { get; set; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Signing configuration for report envelopes and attestations.
|
||||
/// </summary>
|
||||
public SigningOptions Signing { get; set; } = new();
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Feature flags toggling optional behaviours.
|
||||
/// </summary>
|
||||
public FeatureFlagOptions Features { get; set; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Plug-in loader configuration.
|
||||
/// </summary>
|
||||
public PluginOptions Plugins { get; set; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Telemetry configuration for logs, metrics, traces.
|
||||
/// </summary>
|
||||
public TelemetryOptions Telemetry { get; set; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Authority / authentication configuration.
|
||||
/// </summary>
|
||||
public AuthorityOptions Authority { get; set; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Signing configuration for report envelopes and attestations.
|
||||
/// </summary>
|
||||
public SigningOptions Signing { get; set; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// API-specific settings such as base path.
|
||||
/// </summary>
|
||||
@@ -91,39 +91,42 @@ public sealed class ScannerWebServiceOptions
|
||||
/// Deterministic execution switches for tests and replay.
|
||||
/// </summary>
|
||||
public DeterminismOptions Determinism { get; set; } = new();
|
||||
|
||||
public sealed class StorageOptions
|
||||
{
|
||||
public string Driver { get; set; } = "mongo";
|
||||
|
||||
public string Dsn { get; set; } = string.Empty;
|
||||
|
||||
public string? Database { get; set; }
|
||||
|
||||
public int CommandTimeoutSeconds { get; set; } = 30;
|
||||
|
||||
public int HealthCheckTimeoutSeconds { get; set; } = 5;
|
||||
|
||||
public IList<string> Migrations { get; set; } = new List<string>();
|
||||
}
|
||||
|
||||
public sealed class QueueOptions
|
||||
{
|
||||
public string Driver { get; set; } = "redis";
|
||||
|
||||
public string Dsn { get; set; } = string.Empty;
|
||||
|
||||
public string Namespace { get; set; } = "scanner";
|
||||
|
||||
public int VisibilityTimeoutSeconds { get; set; } = 300;
|
||||
|
||||
public int LeaseHeartbeatSeconds { get; set; } = 30;
|
||||
|
||||
public int MaxDeliveryAttempts { get; set; } = 5;
|
||||
|
||||
public IDictionary<string, string> DriverSettings { get; set; } = new Dictionary<string, string>(StringComparer.OrdinalIgnoreCase);
|
||||
}
|
||||
|
||||
|
||||
public sealed class StorageOptions
|
||||
{
|
||||
public string Driver { get; set; } = "postgres";
|
||||
|
||||
public string Dsn { get; set; } = string.Empty;
|
||||
|
||||
/// <summary>
|
||||
/// Optional schema name for scanner tables. Defaults to <c>scanner</c> when not provided.
|
||||
/// </summary>
|
||||
public string? Database { get; set; }
|
||||
|
||||
public int CommandTimeoutSeconds { get; set; } = 30;
|
||||
|
||||
public int HealthCheckTimeoutSeconds { get; set; } = 5;
|
||||
|
||||
public IList<string> Migrations { get; set; } = new List<string>();
|
||||
}
|
||||
|
||||
public sealed class QueueOptions
|
||||
{
|
||||
public string Driver { get; set; } = "redis";
|
||||
|
||||
public string Dsn { get; set; } = string.Empty;
|
||||
|
||||
public string Namespace { get; set; } = "scanner";
|
||||
|
||||
public int VisibilityTimeoutSeconds { get; set; } = 300;
|
||||
|
||||
public int LeaseHeartbeatSeconds { get; set; } = 30;
|
||||
|
||||
public int MaxDeliveryAttempts { get; set; } = 5;
|
||||
|
||||
public IDictionary<string, string> DriverSettings { get; set; } = new Dictionary<string, string>(StringComparer.OrdinalIgnoreCase);
|
||||
}
|
||||
|
||||
public sealed class ArtifactStoreOptions
|
||||
{
|
||||
public string Driver { get; set; } = "rustfs";
|
||||
@@ -159,114 +162,114 @@ public sealed class ScannerWebServiceOptions
|
||||
|
||||
public IDictionary<string, string> Headers { get; set; } = new Dictionary<string, string>(StringComparer.OrdinalIgnoreCase);
|
||||
}
|
||||
|
||||
|
||||
public sealed class FeatureFlagOptions
|
||||
{
|
||||
public bool AllowAnonymousScanSubmission { get; set; }
|
||||
|
||||
public bool EnableSignedReports { get; set; } = true;
|
||||
|
||||
public bool EnablePolicyPreview { get; set; } = true;
|
||||
|
||||
public IDictionary<string, bool> Experimental { get; set; } = new Dictionary<string, bool>(StringComparer.OrdinalIgnoreCase);
|
||||
}
|
||||
|
||||
public sealed class PluginOptions
|
||||
{
|
||||
public string? BaseDirectory { get; set; }
|
||||
|
||||
public string? Directory { get; set; }
|
||||
|
||||
public IList<string> SearchPatterns { get; set; } = new List<string>();
|
||||
|
||||
public IList<string> OrderedPlugins { get; set; } = new List<string>();
|
||||
}
|
||||
|
||||
public sealed class TelemetryOptions
|
||||
{
|
||||
public bool Enabled { get; set; } = true;
|
||||
|
||||
public bool EnableTracing { get; set; } = true;
|
||||
|
||||
public bool EnableMetrics { get; set; } = true;
|
||||
|
||||
public bool EnableLogging { get; set; } = true;
|
||||
|
||||
public bool EnableRequestLogging { get; set; } = true;
|
||||
|
||||
public string MinimumLogLevel { get; set; } = "Information";
|
||||
|
||||
public string? ServiceName { get; set; }
|
||||
|
||||
public string? OtlpEndpoint { get; set; }
|
||||
|
||||
public IDictionary<string, string> OtlpHeaders { get; set; } = new Dictionary<string, string>(StringComparer.OrdinalIgnoreCase);
|
||||
|
||||
public IDictionary<string, string> ResourceAttributes { get; set; } = new Dictionary<string, string>(StringComparer.OrdinalIgnoreCase);
|
||||
}
|
||||
|
||||
public sealed class AuthorityOptions
|
||||
{
|
||||
public bool Enabled { get; set; }
|
||||
|
||||
public bool AllowAnonymousFallback { get; set; } = true;
|
||||
|
||||
public string Issuer { get; set; } = string.Empty;
|
||||
|
||||
public string? MetadataAddress { get; set; }
|
||||
|
||||
public bool RequireHttpsMetadata { get; set; } = true;
|
||||
|
||||
public int BackchannelTimeoutSeconds { get; set; } = 30;
|
||||
|
||||
public int TokenClockSkewSeconds { get; set; } = 60;
|
||||
|
||||
public IList<string> Audiences { get; set; } = new List<string>();
|
||||
|
||||
public IList<string> RequiredScopes { get; set; } = new List<string>();
|
||||
|
||||
public IList<string> BypassNetworks { get; set; } = new List<string>();
|
||||
|
||||
public string? ClientId { get; set; }
|
||||
|
||||
public string? ClientSecret { get; set; }
|
||||
|
||||
public string? ClientSecretFile { get; set; }
|
||||
|
||||
public IList<string> ClientScopes { get; set; } = new List<string>();
|
||||
|
||||
public ResilienceOptions Resilience { get; set; } = new();
|
||||
|
||||
public sealed class ResilienceOptions
|
||||
{
|
||||
public bool? EnableRetries { get; set; }
|
||||
|
||||
public IList<TimeSpan> RetryDelays { get; set; } = new List<TimeSpan>();
|
||||
|
||||
public bool? AllowOfflineCacheFallback { get; set; }
|
||||
|
||||
public TimeSpan? OfflineCacheTolerance { get; set; }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public bool EnablePolicyPreview { get; set; } = true;
|
||||
|
||||
public IDictionary<string, bool> Experimental { get; set; } = new Dictionary<string, bool>(StringComparer.OrdinalIgnoreCase);
|
||||
}
|
||||
|
||||
public sealed class PluginOptions
|
||||
{
|
||||
public string? BaseDirectory { get; set; }
|
||||
|
||||
public string? Directory { get; set; }
|
||||
|
||||
public IList<string> SearchPatterns { get; set; } = new List<string>();
|
||||
|
||||
public IList<string> OrderedPlugins { get; set; } = new List<string>();
|
||||
}
|
||||
|
||||
public sealed class TelemetryOptions
|
||||
{
|
||||
public bool Enabled { get; set; } = true;
|
||||
|
||||
public bool EnableTracing { get; set; } = true;
|
||||
|
||||
public bool EnableMetrics { get; set; } = true;
|
||||
|
||||
public bool EnableLogging { get; set; } = true;
|
||||
|
||||
public bool EnableRequestLogging { get; set; } = true;
|
||||
|
||||
public string MinimumLogLevel { get; set; } = "Information";
|
||||
|
||||
public string? ServiceName { get; set; }
|
||||
|
||||
public string? OtlpEndpoint { get; set; }
|
||||
|
||||
public IDictionary<string, string> OtlpHeaders { get; set; } = new Dictionary<string, string>(StringComparer.OrdinalIgnoreCase);
|
||||
|
||||
public IDictionary<string, string> ResourceAttributes { get; set; } = new Dictionary<string, string>(StringComparer.OrdinalIgnoreCase);
|
||||
}
|
||||
|
||||
public sealed class AuthorityOptions
|
||||
{
|
||||
public bool Enabled { get; set; }
|
||||
|
||||
public bool AllowAnonymousFallback { get; set; } = true;
|
||||
|
||||
public string Issuer { get; set; } = string.Empty;
|
||||
|
||||
public string? MetadataAddress { get; set; }
|
||||
|
||||
public bool RequireHttpsMetadata { get; set; } = true;
|
||||
|
||||
public int BackchannelTimeoutSeconds { get; set; } = 30;
|
||||
|
||||
public int TokenClockSkewSeconds { get; set; } = 60;
|
||||
|
||||
public IList<string> Audiences { get; set; } = new List<string>();
|
||||
|
||||
public IList<string> RequiredScopes { get; set; } = new List<string>();
|
||||
|
||||
public IList<string> BypassNetworks { get; set; } = new List<string>();
|
||||
|
||||
public string? ClientId { get; set; }
|
||||
|
||||
public string? ClientSecret { get; set; }
|
||||
|
||||
public string? ClientSecretFile { get; set; }
|
||||
|
||||
public IList<string> ClientScopes { get; set; } = new List<string>();
|
||||
|
||||
public ResilienceOptions Resilience { get; set; } = new();
|
||||
|
||||
public sealed class ResilienceOptions
|
||||
{
|
||||
public bool? EnableRetries { get; set; }
|
||||
|
||||
public IList<TimeSpan> RetryDelays { get; set; } = new List<TimeSpan>();
|
||||
|
||||
public bool? AllowOfflineCacheFallback { get; set; }
|
||||
|
||||
public TimeSpan? OfflineCacheTolerance { get; set; }
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class SigningOptions
|
||||
{
|
||||
public bool Enabled { get; set; } = false;
|
||||
|
||||
public string KeyId { get; set; } = string.Empty;
|
||||
|
||||
public string Algorithm { get; set; } = "ed25519";
|
||||
|
||||
public string? Provider { get; set; }
|
||||
|
||||
public string? KeyPem { get; set; }
|
||||
|
||||
public string? KeyPemFile { get; set; }
|
||||
|
||||
public string? CertificatePem { get; set; }
|
||||
|
||||
public string? CertificatePemFile { get; set; }
|
||||
|
||||
|
||||
public string Algorithm { get; set; } = "ed25519";
|
||||
|
||||
public string? Provider { get; set; }
|
||||
|
||||
public string? KeyPem { get; set; }
|
||||
|
||||
public string? KeyPemFile { get; set; }
|
||||
|
||||
public string? CertificatePem { get; set; }
|
||||
|
||||
public string? CertificatePemFile { get; set; }
|
||||
|
||||
public string? CertificateChainPem { get; set; }
|
||||
|
||||
public string? CertificateChainPemFile { get; set; }
|
||||
@@ -305,7 +308,7 @@ public sealed class ScannerWebServiceOptions
|
||||
|
||||
public string? Email { get; set; }
|
||||
}
|
||||
|
||||
|
||||
public sealed class ApiOptions
|
||||
{
|
||||
public string BasePath { get; set; } = "/api/v1";
|
||||
@@ -333,13 +336,13 @@ public sealed class ScannerWebServiceOptions
|
||||
public sealed class EventsOptions
|
||||
{
|
||||
public bool Enabled { get; set; }
|
||||
|
||||
public string Driver { get; set; } = "redis";
|
||||
|
||||
public string Dsn { get; set; } = string.Empty;
|
||||
|
||||
public string Stream { get; set; } = "stella.events";
|
||||
|
||||
|
||||
public string Driver { get; set; } = "redis";
|
||||
|
||||
public string Dsn { get; set; } = string.Empty;
|
||||
|
||||
public string Stream { get; set; } = "stella.events";
|
||||
|
||||
public double PublishTimeoutSeconds { get; set; } = 5;
|
||||
|
||||
public long MaxStreamLength { get; set; } = 10000;
|
||||
|
||||
@@ -38,7 +38,6 @@ using StellaOps.Scanner.WebService.Security;
|
||||
using StellaOps.Scanner.WebService.Replay;
|
||||
using StellaOps.Scanner.Storage;
|
||||
using StellaOps.Scanner.Storage.Extensions;
|
||||
using StellaOps.Scanner.Storage.Mongo;
|
||||
using StellaOps.Scanner.WebService.Endpoints;
|
||||
using StellaOps.Scanner.WebService.Options;
|
||||
|
||||
@@ -138,15 +137,12 @@ else
|
||||
builder.Services.AddSingleton<IReportEventDispatcher, ReportEventDispatcher>();
|
||||
builder.Services.AddScannerStorage(storageOptions =>
|
||||
{
|
||||
storageOptions.Mongo.ConnectionString = bootstrapOptions.Storage.Dsn;
|
||||
if (!string.IsNullOrWhiteSpace(bootstrapOptions.Storage.Database))
|
||||
{
|
||||
storageOptions.Mongo.DatabaseName = bootstrapOptions.Storage.Database;
|
||||
}
|
||||
|
||||
storageOptions.Mongo.CommandTimeout = TimeSpan.FromSeconds(bootstrapOptions.Storage.CommandTimeoutSeconds);
|
||||
storageOptions.Mongo.UseMajorityReadConcern = true;
|
||||
storageOptions.Mongo.UseMajorityWriteConcern = true;
|
||||
storageOptions.Postgres.ConnectionString = bootstrapOptions.Storage.Dsn;
|
||||
storageOptions.Postgres.SchemaName = string.IsNullOrWhiteSpace(bootstrapOptions.Storage.Database)
|
||||
? ScannerStorageDefaults.DefaultSchemaName
|
||||
: bootstrapOptions.Storage.Database!.Trim();
|
||||
storageOptions.Postgres.CommandTimeoutSeconds = bootstrapOptions.Storage.CommandTimeoutSeconds;
|
||||
storageOptions.Postgres.AutoMigrate = true;
|
||||
|
||||
storageOptions.ObjectStore.Headers.Clear();
|
||||
foreach (var header in bootstrapOptions.ArtifactStore.Headers)
|
||||
@@ -335,12 +331,6 @@ if (authorityConfigured && resolvedOptions.Authority.AllowAnonymousFallback)
|
||||
"Scanner authority authentication is enabled but anonymous fallback remains allowed. Disable fallback before production rollout.");
|
||||
}
|
||||
|
||||
using (var scope = app.Services.CreateScope())
|
||||
{
|
||||
var bootstrapper = scope.ServiceProvider.GetRequiredService<MongoBootstrapper>();
|
||||
await bootstrapper.InitializeAsync(CancellationToken.None).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
if (resolvedOptions.Telemetry.EnableLogging && resolvedOptions.Telemetry.EnableRequestLogging)
|
||||
{
|
||||
app.UseSerilogRequestLogging(options =>
|
||||
|
||||
@@ -184,7 +184,7 @@ internal sealed class RecordModeService : IRecordModeService
|
||||
{
|
||||
var manifest = new ReplayManifest
|
||||
{
|
||||
SchemaVersion = ReplayManifestVersions.V1,
|
||||
SchemaVersion = ReplayManifestVersions.V2,
|
||||
Scan = new ReplayScanMetadata
|
||||
{
|
||||
Id = request.ScanId,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user