Add unit tests and implementations for MongoDB index models and OpenAPI metadata
- Implemented `MongoIndexModelTests` to verify index models for various stores. - Created `OpenApiMetadataFactory` with methods to generate OpenAPI metadata. - Added tests for `OpenApiMetadataFactory` to ensure expected defaults and URL overrides. - Introduced `ObserverSurfaceSecrets` and `WebhookSurfaceSecrets` for managing secrets. - Developed `RuntimeSurfaceFsClient` and `WebhookSurfaceFsClient` for manifest retrieval. - Added dependency injection tests for `SurfaceEnvironmentRegistration` in both Observer and Webhook contexts. - Implemented tests for secret resolution in `ObserverSurfaceSecretsTests` and `WebhookSurfaceSecretsTests`. - Created `EnsureLinkNotMergeCollectionsMigrationTests` to validate MongoDB migration logic. - Added project files for MongoDB tests and NuGet package mirroring.
This commit is contained in:
501
docs/api/notify-openapi.yaml
Normal file
501
docs/api/notify-openapi.yaml
Normal file
@@ -0,0 +1,501 @@
|
||||
# OpenAPI 3.1 specification for StellaOps Notifier WebService (draft)
|
||||
openapi: 3.1.0
|
||||
info:
|
||||
title: StellaOps Notifier API
|
||||
version: 0.6.0-draft
|
||||
description: |
|
||||
Contract for Notifications Studio (Notifier) covering rules, templates, incidents,
|
||||
and quiet hours. Uses the platform error envelope and tenant header `X-StellaOps-Tenant`.
|
||||
servers:
|
||||
- url: https://api.stellaops.example.com
|
||||
description: Production
|
||||
- url: https://api.dev.stellaops.example.com
|
||||
description: Development
|
||||
security:
|
||||
- oauth2: [notify.viewer]
|
||||
- oauth2: [notify.operator]
|
||||
- oauth2: [notify.admin]
|
||||
paths:
|
||||
/api/v1/notify/rules:
|
||||
get:
|
||||
summary: List notification rules
|
||||
tags: [Rules]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
- $ref: '#/components/parameters/PageSize'
|
||||
- $ref: '#/components/parameters/PageToken'
|
||||
responses:
|
||||
'200':
|
||||
description: Paginated rule list
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
items:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/NotifyRule' }
|
||||
nextPageToken:
|
||||
type: string
|
||||
examples:
|
||||
default:
|
||||
value:
|
||||
items:
|
||||
- ruleId: rule-critical
|
||||
tenantId: tenant-dev
|
||||
name: Critical scanner verdicts
|
||||
enabled: true
|
||||
match:
|
||||
eventKinds: [scanner.report.ready]
|
||||
minSeverity: critical
|
||||
actions:
|
||||
- actionId: act-slack-critical
|
||||
channel: chn-slack-soc
|
||||
template: tmpl-critical
|
||||
digest: instant
|
||||
nextPageToken: null
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
post:
|
||||
summary: Create a notification rule
|
||||
tags: [Rules]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/NotifyRule' }
|
||||
examples:
|
||||
create-rule:
|
||||
value:
|
||||
ruleId: rule-attest-fail
|
||||
tenantId: tenant-dev
|
||||
name: Attestation failures → SOC
|
||||
enabled: true
|
||||
match:
|
||||
eventKinds: [attestor.verification.failed]
|
||||
actions:
|
||||
- actionId: act-soc
|
||||
channel: chn-webhook-soc
|
||||
template: tmpl-attest-verify-fail
|
||||
responses:
|
||||
'201':
|
||||
description: Rule created
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/NotifyRule' }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
|
||||
/api/v1/notify/rules/{ruleId}:
|
||||
get:
|
||||
summary: Fetch a rule
|
||||
tags: [Rules]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
- $ref: '#/components/parameters/RuleId'
|
||||
responses:
|
||||
'200':
|
||||
description: Rule
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/NotifyRule' }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
patch:
|
||||
summary: Update a rule (partial)
|
||||
tags: [Rules]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
- $ref: '#/components/parameters/RuleId'
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
description: JSON Merge Patch
|
||||
responses:
|
||||
'200':
|
||||
description: Updated rule
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/NotifyRule' }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
|
||||
/api/v1/notify/templates:
|
||||
get:
|
||||
summary: List templates
|
||||
tags: [Templates]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
- name: key
|
||||
in: query
|
||||
description: Filter by template key
|
||||
schema: { type: string }
|
||||
responses:
|
||||
'200':
|
||||
description: Templates
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/NotifyTemplate' }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
post:
|
||||
summary: Create a template
|
||||
tags: [Templates]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/NotifyTemplate' }
|
||||
responses:
|
||||
'201':
|
||||
description: Template created
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/NotifyTemplate' }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
|
||||
/api/v1/notify/templates/{templateId}:
|
||||
get:
|
||||
summary: Fetch a template
|
||||
tags: [Templates]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
- $ref: '#/components/parameters/TemplateId'
|
||||
responses:
|
||||
'200':
|
||||
description: Template
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/NotifyTemplate' }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
patch:
|
||||
summary: Update a template (partial)
|
||||
tags: [Templates]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
- $ref: '#/components/parameters/TemplateId'
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
description: JSON Merge Patch
|
||||
responses:
|
||||
'200':
|
||||
description: Updated template
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/NotifyTemplate' }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
|
||||
/api/v1/notify/incidents:
|
||||
get:
|
||||
summary: List incidents (paged)
|
||||
tags: [Incidents]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
- $ref: '#/components/parameters/PageSize'
|
||||
- $ref: '#/components/parameters/PageToken'
|
||||
responses:
|
||||
'200':
|
||||
description: Incident page
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
items:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/Incident' }
|
||||
nextPageToken: { type: string }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
post:
|
||||
summary: Raise an incident (ops/toggle/override)
|
||||
tags: [Incidents]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/Incident' }
|
||||
examples:
|
||||
start-incident:
|
||||
value:
|
||||
incidentId: inc-telemetry-outage
|
||||
kind: outage
|
||||
severity: major
|
||||
startedAt: 2025-11-17T04:02:00Z
|
||||
shortDescription: "Telemetry pipeline degraded; burn-rate breach"
|
||||
metadata:
|
||||
source: slo-evaluator
|
||||
responses:
|
||||
'202':
|
||||
description: Incident accepted
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
|
||||
/api/v1/notify/incidents/{incidentId}/ack:
|
||||
post:
|
||||
summary: Acknowledge an incident notification
|
||||
tags: [Incidents]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
- $ref: '#/components/parameters/IncidentId'
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
ackToken:
|
||||
type: string
|
||||
description: DSSE-signed acknowledgement token
|
||||
responses:
|
||||
'204':
|
||||
description: Acknowledged
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
|
||||
/api/v1/notify/quiet-hours:
|
||||
get:
|
||||
summary: Get quiet-hours schedule
|
||||
tags: [QuietHours]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
responses:
|
||||
'200':
|
||||
description: Quiet hours schedule
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/QuietHours' }
|
||||
examples:
|
||||
current:
|
||||
value:
|
||||
quietHoursId: qh-default
|
||||
windows:
|
||||
- timezone: UTC
|
||||
days: [Mon, Tue, Wed, Thu, Fri]
|
||||
start: "22:00"
|
||||
end: "06:00"
|
||||
exemptions:
|
||||
- eventKinds: [attestor.verification.failed]
|
||||
reason: "Always alert for attestation failures"
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
post:
|
||||
summary: Set quiet-hours schedule
|
||||
tags: [QuietHours]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/QuietHours' }
|
||||
responses:
|
||||
'200':
|
||||
description: Updated quiet hours
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/QuietHours' }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
|
||||
components:
|
||||
securitySchemes:
|
||||
oauth2:
|
||||
type: oauth2
|
||||
flows:
|
||||
clientCredentials:
|
||||
tokenUrl: https://auth.stellaops.example.com/oauth/token
|
||||
scopes:
|
||||
notify.viewer: Read-only Notifier access
|
||||
notify.operator: Manage rules/templates/incidents within tenant
|
||||
notify.admin: Tenant-scoped administration
|
||||
parameters:
|
||||
Tenant:
|
||||
name: X-StellaOps-Tenant
|
||||
in: header
|
||||
required: true
|
||||
description: Tenant slug
|
||||
schema: { type: string }
|
||||
PageSize:
|
||||
name: pageSize
|
||||
in: query
|
||||
schema: { type: integer, minimum: 1, maximum: 200, default: 50 }
|
||||
PageToken:
|
||||
name: pageToken
|
||||
in: query
|
||||
schema: { type: string }
|
||||
RuleId:
|
||||
name: ruleId
|
||||
in: path
|
||||
required: true
|
||||
schema: { type: string }
|
||||
TemplateId:
|
||||
name: templateId
|
||||
in: path
|
||||
required: true
|
||||
schema: { type: string }
|
||||
IncidentId:
|
||||
name: incidentId
|
||||
in: path
|
||||
required: true
|
||||
schema: { type: string }
|
||||
|
||||
responses:
|
||||
Error:
|
||||
description: Standard error envelope
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/ErrorEnvelope' }
|
||||
examples:
|
||||
validation:
|
||||
value:
|
||||
error:
|
||||
code: validation_failed
|
||||
message: "quietHours.windows[0].start must be HH:mm"
|
||||
traceId: "f62f3c2b9c8e4c53"
|
||||
|
||||
schemas:
|
||||
ErrorEnvelope:
|
||||
type: object
|
||||
required: [error]
|
||||
properties:
|
||||
error:
|
||||
type: object
|
||||
required: [code, message, traceId]
|
||||
properties:
|
||||
code: { type: string }
|
||||
message: { type: string }
|
||||
traceId: { type: string }
|
||||
|
||||
NotifyRule:
|
||||
type: object
|
||||
required: [ruleId, tenantId, name, match, actions]
|
||||
properties:
|
||||
ruleId: { type: string }
|
||||
tenantId: { type: string }
|
||||
name: { type: string }
|
||||
description: { type: string }
|
||||
enabled: { type: boolean, default: true }
|
||||
match: { $ref: '#/components/schemas/RuleMatch' }
|
||||
actions:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/RuleAction' }
|
||||
labels:
|
||||
type: object
|
||||
additionalProperties: { type: string }
|
||||
metadata:
|
||||
type: object
|
||||
additionalProperties: { type: string }
|
||||
|
||||
RuleMatch:
|
||||
type: object
|
||||
properties:
|
||||
eventKinds:
|
||||
type: array
|
||||
items: { type: string }
|
||||
minSeverity: { type: string, enum: [info, low, medium, high, critical] }
|
||||
verdicts:
|
||||
type: array
|
||||
items: { type: string }
|
||||
labels:
|
||||
type: array
|
||||
items: { type: string }
|
||||
kevOnly: { type: boolean }
|
||||
|
||||
RuleAction:
|
||||
type: object
|
||||
required: [actionId, channel]
|
||||
properties:
|
||||
actionId: { type: string }
|
||||
channel: { type: string }
|
||||
template: { type: string }
|
||||
digest: { type: string, description: "Digest window key e.g. instant|5m|15m|1h|1d" }
|
||||
throttle: { type: string, description: "ISO-8601 duration, e.g. PT5M" }
|
||||
locale: { type: string }
|
||||
enabled: { type: boolean, default: true }
|
||||
metadata:
|
||||
type: object
|
||||
additionalProperties: { type: string }
|
||||
|
||||
NotifyTemplate:
|
||||
type: object
|
||||
required: [templateId, tenantId, key, channelType, locale, body, renderMode, format]
|
||||
properties:
|
||||
templateId: { type: string }
|
||||
tenantId: { type: string }
|
||||
key: { type: string }
|
||||
channelType: { type: string, enum: [slack, teams, email, webhook, custom] }
|
||||
locale: { type: string, description: "BCP-47, lower-case" }
|
||||
renderMode: { type: string, enum: [Markdown, Html, AdaptiveCard, PlainText, Json] }
|
||||
format: { type: string, enum: [slack, teams, email, webhook, json] }
|
||||
description: { type: string }
|
||||
body: { type: string }
|
||||
metadata:
|
||||
type: object
|
||||
additionalProperties: { type: string }
|
||||
|
||||
Incident:
|
||||
type: object
|
||||
required: [incidentId, kind, severity, startedAt]
|
||||
properties:
|
||||
incidentId: { type: string }
|
||||
kind: { type: string, description: "outage|degradation|security|ops-drill" }
|
||||
severity: { type: string, enum: [minor, major, critical] }
|
||||
startedAt: { type: string, format: date-time }
|
||||
endedAt: { type: string, format: date-time }
|
||||
shortDescription: { type: string }
|
||||
description: { type: string }
|
||||
metadata:
|
||||
type: object
|
||||
additionalProperties: { type: string }
|
||||
|
||||
QuietHours:
|
||||
type: object
|
||||
required: [quietHoursId, windows]
|
||||
properties:
|
||||
quietHoursId: { type: string }
|
||||
windows:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/QuietHoursWindow' }
|
||||
exemptions:
|
||||
type: array
|
||||
description: Event kinds that bypass quiet hours
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
eventKinds:
|
||||
type: array
|
||||
items: { type: string }
|
||||
reason: { type: string }
|
||||
|
||||
QuietHoursWindow:
|
||||
type: object
|
||||
required: [timezone, days, start, end]
|
||||
properties:
|
||||
timezone: { type: string, description: "IANA TZ, e.g., UTC" }
|
||||
days:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
enum: [Mon, Tue, Wed, Thu, Fri, Sat, Sun]
|
||||
start: { type: string, description: "HH:mm" }
|
||||
end: { type: string, description: "HH:mm" }
|
||||
122
docs/api/notify-pack-approvals.yaml
Normal file
122
docs/api/notify-pack-approvals.yaml
Normal file
@@ -0,0 +1,122 @@
|
||||
openapi: 3.1.0
|
||||
info:
|
||||
title: Notifier Pack Approvals Ingestion (fragment)
|
||||
version: 0.1.0-draft
|
||||
description: >
|
||||
Contract for ingesting pack approval/policy decisions emitted by Task Runner and Policy Engine.
|
||||
Served under Notifier WebService.
|
||||
paths:
|
||||
/api/v1/notify/pack-approvals:
|
||||
post:
|
||||
summary: Ingest pack approval decision
|
||||
operationId: ingestPackApproval
|
||||
tags: [PackApprovals]
|
||||
security:
|
||||
- oauth2: [notify.operator]
|
||||
- hmac: []
|
||||
parameters:
|
||||
- name: X-StellaOps-Tenant
|
||||
in: header
|
||||
required: true
|
||||
schema: { type: string }
|
||||
- name: Idempotency-Key
|
||||
in: header
|
||||
required: true
|
||||
description: Stable UUID to dedupe retries.
|
||||
schema: { type: string, format: uuid }
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/PackApprovalEvent' }
|
||||
examples:
|
||||
approval-granted:
|
||||
value:
|
||||
eventId: "20e4e5fe-3d4a-4f57-9f9b-b1a1c1111111"
|
||||
issuedAt: "2025-11-17T16:00:00Z"
|
||||
kind: "pack.approval.granted"
|
||||
packId: "offline-kit-2025-11"
|
||||
policy:
|
||||
id: "policy-123"
|
||||
version: "v5"
|
||||
decision: "approved"
|
||||
actor: "task-runner"
|
||||
resumeToken: "rt-abc123"
|
||||
summary: "All required attestations verified."
|
||||
labels:
|
||||
environment: "prod"
|
||||
approver: "ops"
|
||||
responses:
|
||||
'202':
|
||||
description: Accepted; durable write queued for processing.
|
||||
headers:
|
||||
X-Resume-After:
|
||||
description: Resume token echo or replacement
|
||||
schema: { type: string }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
|
||||
components:
|
||||
securitySchemes:
|
||||
oauth2:
|
||||
type: oauth2
|
||||
flows:
|
||||
clientCredentials:
|
||||
tokenUrl: https://auth.stellaops.example.com/oauth/token
|
||||
scopes:
|
||||
notify.operator: Ingest approval events
|
||||
hmac:
|
||||
type: http
|
||||
scheme: bearer
|
||||
description: Pre-shared HMAC token (air-gap friendly) referenced by secretRef.
|
||||
|
||||
schemas:
|
||||
PackApprovalEvent:
|
||||
type: object
|
||||
required:
|
||||
- eventId
|
||||
- issuedAt
|
||||
- kind
|
||||
- packId
|
||||
- decision
|
||||
- actor
|
||||
properties:
|
||||
eventId: { type: string, format: uuid }
|
||||
issuedAt: { type: string, format: date-time }
|
||||
kind:
|
||||
type: string
|
||||
enum: [pack.approval.granted, pack.approval.denied, pack.policy.override]
|
||||
packId: { type: string }
|
||||
policy:
|
||||
type: object
|
||||
properties:
|
||||
id: { type: string }
|
||||
version: { type: string }
|
||||
decision:
|
||||
type: string
|
||||
enum: [approved, denied, overridden]
|
||||
actor: { type: string }
|
||||
resumeToken:
|
||||
type: string
|
||||
description: Opaque token for at-least-once resume.
|
||||
summary: { type: string }
|
||||
labels:
|
||||
type: object
|
||||
additionalProperties: { type: string }
|
||||
|
||||
responses:
|
||||
Error:
|
||||
description: Error envelope
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
required: [error]
|
||||
properties:
|
||||
error:
|
||||
type: object
|
||||
required: [code, message, traceId]
|
||||
properties:
|
||||
code: { type: string }
|
||||
message: { type: string }
|
||||
traceId: { type: string }
|
||||
137
docs/api/notify-sdk-examples.md
Normal file
137
docs/api/notify-sdk-examples.md
Normal file
@@ -0,0 +1,137 @@
|
||||
# Notifier SDK Usage Examples (rules, incidents, quiet hours)
|
||||
|
||||
> Work of this type must also be applied everywhere it should be applied. Keep examples air-gap friendly and deterministic.
|
||||
|
||||
## Prerequisites
|
||||
- Token with scopes: `notify.viewer` for reads, `notify.operator` for writes (tenant-scoped).
|
||||
- Tenant header: `X-StellaOps-Tenant: <tenant-id>`.
|
||||
- Base URL: `https://api.stellaops.example.com`.
|
||||
- OpenAPI document: `/.well-known/openapi` (served by Notifier).
|
||||
|
||||
## Rules CRUD
|
||||
### cURL
|
||||
```bash
|
||||
# Create rule
|
||||
curl -X POST "$BASE/api/v1/notify/rules" \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "X-StellaOps-Tenant: acme-prod" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"ruleId": "rule-attest-fail",
|
||||
"tenantId": "acme-prod",
|
||||
"name": "Attestation failures to SOC",
|
||||
"match": { "eventKinds": ["attestor.verification.failed"] },
|
||||
"actions": [{
|
||||
"actionId": "act-soc",
|
||||
"channel": "chn-soc-webhook",
|
||||
"template": "tmpl-attest-verify-fail",
|
||||
"digest": "instant"
|
||||
}]
|
||||
}'
|
||||
|
||||
# List rules (paginated)
|
||||
curl -H "Authorization: Bearer $TOKEN" \
|
||||
-H "X-StellaOps-Tenant: acme-prod" \
|
||||
"$BASE/api/v1/notify/rules?pageSize=50"
|
||||
```
|
||||
|
||||
### TypeScript (OpenAPI-generated client)
|
||||
```ts
|
||||
import { RulesApi, Configuration } from "./generated/notify-client";
|
||||
|
||||
const api = new RulesApi(new Configuration({
|
||||
basePath: process.env.BASE,
|
||||
accessToken: process.env.TOKEN
|
||||
}));
|
||||
|
||||
await api.createRule({
|
||||
xStellaOpsTenant: "acme-prod",
|
||||
notifyRule: {
|
||||
ruleId: "rule-attest-fail",
|
||||
tenantId: "acme-prod",
|
||||
name: "Attestation failures to SOC",
|
||||
match: { eventKinds: ["attestor.verification.failed"] },
|
||||
actions: [{
|
||||
actionId: "act-soc",
|
||||
channel: "chn-soc-webhook",
|
||||
template: "tmpl-attest-verify-fail",
|
||||
digest: "instant"
|
||||
}]
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Python (OpenAPI-generated client)
|
||||
```python
|
||||
from notify_client import RulesApi, Configuration, ApiClient
|
||||
|
||||
config = Configuration(host=BASE, access_token=TOKEN)
|
||||
with ApiClient(config) as client:
|
||||
api = RulesApi(client)
|
||||
api.create_rule(
|
||||
x_stella_ops_tenant="acme-prod",
|
||||
notify_rule={
|
||||
"ruleId": "rule-attest-fail",
|
||||
"tenantId": "acme-prod",
|
||||
"name": "Attestation failures to SOC",
|
||||
"match": {"eventKinds": ["attestor.verification.failed"]},
|
||||
"actions": [{
|
||||
"actionId": "act-soc",
|
||||
"channel": "chn-soc-webhook",
|
||||
"template": "tmpl-attest-verify-fail",
|
||||
"digest": "instant"
|
||||
}]
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
## Incident acknowledge
|
||||
### cURL
|
||||
```bash
|
||||
curl -X POST "$BASE/api/v1/notify/incidents/inc-telemetry/ack" \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "X-StellaOps-Tenant: acme-prod" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"ackToken":"<dsse-token>"}' \
|
||||
-i
|
||||
```
|
||||
|
||||
### TypeScript
|
||||
```ts
|
||||
import { IncidentsApi } from "./generated/notify-client";
|
||||
await new IncidentsApi(config).ackIncident({
|
||||
incidentId: "inc-telemetry",
|
||||
xStellaOpsTenant: "acme-prod",
|
||||
inlineObject: { ackToken: process.env.ACK_TOKEN }
|
||||
});
|
||||
```
|
||||
|
||||
## Quiet hours
|
||||
### cURL
|
||||
```bash
|
||||
curl -X POST "$BASE/api/v1/notify/quiet-hours" \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "X-StellaOps-Tenant: acme-prod" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"quietHoursId": "qh-default",
|
||||
"windows": [{
|
||||
"timezone": "UTC",
|
||||
"days": ["Mon","Tue","Wed","Thu","Fri"],
|
||||
"start": "22:00",
|
||||
"end": "06:00"
|
||||
}],
|
||||
"exemptions": [{
|
||||
"eventKinds": ["attestor.verification.failed"],
|
||||
"reason": "Always alert on attestation failures"
|
||||
}]
|
||||
}'
|
||||
```
|
||||
|
||||
## Smoke-test recipe (SDK CI)
|
||||
- Generate client from `/.well-known/openapi` (ts/python/go) with deterministic options.
|
||||
- Run:
|
||||
1) create rule → list rules → delete rule.
|
||||
2) set quiet hours → get quiet hours.
|
||||
3) ack incident with dummy token (expect 2xx or validation error envelope).
|
||||
- Assert deterministic headers: `X-OpenAPI-Scope=notify`, `ETag` stable for identical spec bytes.
|
||||
91
docs/implplan/SPRINT_0110_0001_0001_ingestion_evidence.md
Normal file
91
docs/implplan/SPRINT_0110_0001_0001_ingestion_evidence.md
Normal file
@@ -0,0 +1,91 @@
|
||||
# Sprint 0110-0001-0001 · Ingestion & Evidence (Phase 110)
|
||||
|
||||
## Topic & Scope
|
||||
- Finalise Advisory AI guardrail evidence (docs, SBOM feeds, policy knobs) without blocking customer rollout.
|
||||
- Land Concelier structured caching + telemetry so Link-Not-Merge schemas feed consoles, air-gap bundles, and attestations.
|
||||
- Prepare Excititor chunk API/telemetry/attestation contracts for deterministic VEX evidence delivery.
|
||||
- Staff and kick off Mirror assembler (DSSE/TUF metadata, OCI/time anchors, CLI/Export Center automation).
|
||||
- Working directories: `src/AdvisoryAI`, `src/Concelier`, `src/Excititor`, `ops/devops` (Mirror assembler).
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- Upstream: Sprint 0100.A (Attestor) must stay green; Link-Not-Merge schema set (`CONCELIER-LNM-21-*`, `CARTO-GRAPH-21-002`) gates Concelier/Excititor work. Advisory AI docs depend on SBOM/CLI/Policy/DevOps artefacts (`SBOM-AIAI-31-001`, `CLI-VULN-29-001`, `CLI-VEX-30-001`, `POLICY-ENGINE-31-001`, `DEVOPS-AIAI-31-001`).
|
||||
- Parallelism: Sprints in the 0110 decade must remain independent; avoid new intra-decade dependencies.
|
||||
- Evidence Locker contract and Mirror staffing decisions gate attestation work and Mirror tracks respectively.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- docs/modules/advisory-ai/architecture.md
|
||||
- docs/modules/concelier/architecture.md
|
||||
- docs/modules/excititor/architecture.md
|
||||
- docs/modules/export-center/architecture.md
|
||||
- docs/modules/airgap/architecture.md (timeline + bundle requirements)
|
||||
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | DOCS-AIAI-31-004 | DOING | CONSOLE-VULN-29-001; CONSOLE-VEX-30-001; SBOM-AIAI-31-001/003 | Docs Guild · Console Guild | Guardrail console doc; screenshots + SBOM evidence pending. |
|
||||
| 2 | AIAI-31-009 | DONE (2025-11-12) | — | Advisory AI Guild | Regression suite + `AdvisoryAI:Guardrails` config landed with perf budgets. |
|
||||
| 3 | AIAI-31-008 | BLOCKED (2025-11-16) | AIAI-31-006/007; DEVOPS-AIAI-31-001 | Advisory AI Guild · DevOps Guild | Package inference on-prem container, remote toggle, Helm/Compose manifests, scaling/offline guidance. |
|
||||
| 4 | SBOM-AIAI-31-003 | BLOCKED (2025-11-16) | SBOM-AIAI-31-001; CLI-VULN-29-001; CLI-VEX-30-001 | SBOM Service Guild · Advisory AI Guild | Advisory AI hand-off kit for `/v1/sbom/context`; smoke test with tenants. |
|
||||
| 5 | DOCS-AIAI-31-005/006/008/009 | BLOCKED | CLI-VULN-29-001; CLI-VEX-30-001; POLICY-ENGINE-31-001; DEVOPS-AIAI-31-001 | Docs Guild | CLI/policy/ops docs paused pending upstream artefacts. |
|
||||
| 6 | CONCELIER-AIAI-31-002 | DOING | CONCELIER-GRAPH-21-001/002; CARTO-GRAPH-21-002 (Link-Not-Merge) | Concelier Core · WebService Guilds | LNM schema drafted (`docs/modules/concelier/link-not-merge-schema.md`) + sample payloads; wiring can proceed while review runs. |
|
||||
| 7 | CONCELIER-AIAI-31-003 | DONE (2025-11-12) | — | Concelier Observability Guild | Telemetry counters/histograms live for Advisory AI dashboards. |
|
||||
| 8 | CONCELIER-AIRGAP-56-001..58-001 | BLOCKED | Link-Not-Merge schema; Evidence Locker contract | Concelier Core · AirGap Guilds | Mirror/offline provenance chain. |
|
||||
| 9 | CONCELIER-CONSOLE-23-001..003 | BLOCKED | Link-Not-Merge schema | Concelier Console Guild | Console advisory aggregation/search helpers. |
|
||||
| 10 | CONCELIER-ATTEST-73-001/002 | BLOCKED | CONCELIER-AIAI-31-002; Evidence Locker contract | Concelier Core · Evidence Locker Guild | Attestation inputs + transparency metadata. |
|
||||
| 11 | FEEDCONN-ICSCISA-02-012 / KISA-02-008 | BLOCKED | Feed owner remediation plan | Concelier Feed Owners | Overdue provenance refreshes. |
|
||||
| 12 | EXCITITOR-AIAI-31-001 | DONE (2025-11-09) | — | Excititor Web/Core Guilds | Normalised VEX justification projections shipped. |
|
||||
| 13 | EXCITITOR-AIAI-31-002 | BLOCKED | Link-Not-Merge schema; Evidence Locker contract | Excititor Web/Core Guilds | Chunk API for Advisory AI feeds. |
|
||||
| 14 | EXCITITOR-AIAI-31-003 | BLOCKED | EXCITITOR-AIAI-31-002 | Excititor Observability Guild | Telemetry gated on chunk API. |
|
||||
| 15 | EXCITITOR-AIAI-31-004 | BLOCKED | EXCITITOR-AIAI-31-002 | Docs Guild · Excititor Guild | Chunk API docs. |
|
||||
| 16 | EXCITITOR-ATTEST-01-003 / 73-001 / 73-002 | BLOCKED | EXCITITOR-AIAI-31-002; Evidence Locker contract | Excititor Guild · Evidence Locker Guild | Attestation scope + payloads. |
|
||||
| 17 | EXCITITOR-AIRGAP-56/57/58 · CONN-TRUST-01-001 | BLOCKED | Link-Not-Merge schema; attestation plan | Excititor Guild · AirGap Guilds | Air-gap ingest + connector trust tasks. |
|
||||
| 18 | MIRROR-CRT-56-001 | BLOCKED | Staffing decision overdue | Mirror Creator Guild | Kickoff slipped past 2025-11-15. |
|
||||
| 19 | MIRROR-CRT-56-002 | BLOCKED | MIRROR-CRT-56-001; PROV-OBS-53-001 | Mirror Creator · Security Guilds | Needs assembler owner first. |
|
||||
| 20 | MIRROR-CRT-57-001/002 | BLOCKED | MIRROR-CRT-56-001; AIRGAP-TIME-57-001 | Mirror Creator Guild · AirGap Time Guild | Waiting on staffing. |
|
||||
| 21 | MIRROR-CRT-58-001/002 | BLOCKED | MIRROR-CRT-56-001; EXPORT-OBS-54-001; CLI-AIRGAP-56-001 | Mirror Creator · CLI · Exporter Guilds | Requires assembler staffing + upstream contracts. |
|
||||
| 22 | EXPORT-OBS-51-001 / 54-001 · AIRGAP-TIME-57-001 · CLI-AIRGAP-56-001 · PROV-OBS-53-001 | BLOCKED | MIRROR-CRT-56-001 ownership | Exporter Guild · AirGap Time · CLI Guild | Blocked until assembler staffed. |
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-11-09 | Captured initial wave scope, interlocks, risks for SBOM/CLI/Policy/DevOps artefacts, Link-Not-Merge schemas, Excititor justification backlog, Mirror commitments. | Sprint 110 leads |
|
||||
| 2025-11-13 | Refreshed tracker ahead of 14–15 Nov checkpoints; outstanding asks: SBOM/CLI/Policy/DevOps ETAs, Link-Not-Merge approval, Mirror staffing. | Sprint 110 leads |
|
||||
| 2025-11-16 | Updated task board: marked Advisory AI packaging, Concelier air-gap/console/attestation tracks, Excititor chunk/attestation/air-gap tracks, and all Mirror tracks as BLOCKED pending schema approvals, Evidence Locker contract, Mirror staffing decisions. | Implementer |
|
||||
| 2025-11-16 | Drafted LNM schema + samples (`docs/modules/concelier/link-not-merge-schema.md`, `docs/samples/lnm/*`); moved CONCELIER-AIAI-31-002 to DOING pending review; added migration + tests to Mongo storage. | Implementer |
|
||||
| 2025-11-17 | Wired LNM ingestion writes: observations+linksets persisted via Mongo sinks, WebService DI updated, build green. Next: expose read APIs and backfill. | Implementer |
|
||||
| 2025-11-17 | Added cursor-paged `/linksets` API with normalized purls/versions; implemented linkset lookup/paging + unit test coverage. | Implementer |
|
||||
| 2025-11-17 | Persisted normalized linksets (purls/versions) in ingestion/backfill; added /linksets integration tests for normalized fields and cursor paging. Full solution test run aborted mid-build; rerun targeted Concelier WebService tests. | Implementer |
|
||||
| 2025-11-17 | Targeted `/linksets` WebService tests invoked; `dotnet test` fails early with MSBuild switch `--no-restore,workdir:` injected by toolchain, so tests remain pending until runner is fixed. | Implementer |
|
||||
| 2025-11-16 | Normalised sprint file to standard template and renamed from `SPRINT_110_ingestion_evidence.md` to `SPRINT_0110_0001_0001_ingestion_evidence.md`; no semantic changes. | Planning |
|
||||
|
||||
## Decisions & Risks
|
||||
### Decisions in flight
|
||||
| Decision | Blocking work | Accountable owner(s) | Due date |
|
||||
| --- | --- | --- | --- |
|
||||
| Confirm SBOM/CLI/Policy/DevOps delivery dates | DOCS-AIAI backlog, SBOM-AIAI-31-003, AIAI-31-008 | SBOM Service · CLI · Policy · DevOps guild leads | 2025-11-14 |
|
||||
| Approve Link-Not-Merge schema (`CONCELIER-GRAPH-21-001/002`, `CARTO-GRAPH-21-002`) | CONCELIER-AIAI-31-002; EXCITITOR-AIAI-31-002/003/004; air-gap + attestation tasks | Concelier Core · Cartographer Guild · SBOM Service Guild | 2025-11-14 |
|
||||
| Review & ratify drafted LNM schema doc (`docs/modules/concelier/link-not-merge-schema.md`) | CONCELIER-AIAI-31-002 | Concelier Core · Architecture Guild | 2025-11-17 |
|
||||
| Assign MIRROR-CRT-56-001 owner | Entire Mirror wave + Export Center + AirGap Time automation | Mirror Creator Guild · Exporter Guild · AirGap Time Guild | 2025-11-15 |
|
||||
| Evidence Locker attestation scope sign-off | EXCITITOR-ATTEST-01-003/73-001/73-002; CONCELIER-ATTEST-73-001/002 | Evidence Locker Guild · Excititor Guild · Concelier Guild | 2025-11-15 |
|
||||
| Approve DOCS-AIAI-31-004 screenshot plan | Publication of console guardrail doc | Docs Guild · Console Guild | 2025-11-15 |
|
||||
|
||||
### Risk outlook (2025-11-13)
|
||||
| Risk | Impact | Mitigation / owner |
|
||||
| --- | --- | --- |
|
||||
| SBOM/CLI/Policy/DevOps artefacts slip past 2025-11-14 | Advisory AI docs + SBOM feeds stay blocked, delaying rollout & dependent sprints. | Lock ETAs during 14 Nov interlock; escalate to Advisory AI leadership if commitments slip. |
|
||||
| Link-Not-Merge schema approval delayed | Concelier/Excititor APIs, console overlays, air-gap bundles remain gated. | Close 14 Nov review with migration notes; unblock tasks immediately after approval. |
|
||||
| Excititor attestation backlog stalls | VEX evidence + air-gap parity cannot progress; Mirror support drifts. | Use 15 Nov sequencing session to lock order and reserve engineering capacity. |
|
||||
| MIRROR-CRT-56-001 remains unstaffed | DSSE/TUF, OCI/time-anchor, CLI, Export Center automation cannot start (Sprint 0125 slips). | Assign owner at kickoff; reallocate Export/AirGap engineers if needed. |
|
||||
| Connector refreshes (ICSCISA/KISA) remain overdue | Advisory AI may serve stale advisories; telemetry accuracy suffers. | Feed owners to publish remediation plan + interim mitigations by 2025-11-15 stand-up. |
|
||||
| Concelier WebService tests blocked by injected MSBuild switch `workdir:` | Cannot validate new `/linksets` integration; release confidence reduced. | Fix runner/tooling or execute tests in environment that does not append `workdir:` to MSBuild args. |
|
||||
|
||||
## Next Checkpoints
|
||||
| Date (UTC) | Session | Goal | Impacted wave(s) | Prep owner(s) |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| 2025-11-14 | Advisory AI customer surfaces follow-up | Capture SBOM/CLI/Policy/DevOps ETAs to restart DOCS/SBOM work. | 110.A | Advisory AI · SBOM · CLI · Policy · DevOps guild leads |
|
||||
| 2025-11-14 | Link-Not-Merge schema review | Approve schema payloads + migration notes. | 110.B · 110.C | Concelier Core · Cartographer Guild · SBOM Service Guild |
|
||||
| 2025-11-15 | Excititor attestation sequencing | Lock Evidence Locker contract + backlog order. | 110.C | Excititor Web/Core · Evidence Locker Guild |
|
||||
| 2025-11-15 | Mirror evidence kickoff | Assign MIRROR-CRT-56-001 owner; confirm staffing; outline DSSE/TUF + OCI milestones. | 110.D | Mirror Creator · Exporter · AirGap Time · Security guilds |
|
||||
|
||||
## Appendix
|
||||
- Detailed coordination artefacts, contingency playbook, and historical notes live at `docs/implplan/archived/SPRINT_110_ingestion_evidence_2025-11-13.md`.
|
||||
64
docs/implplan/SPRINT_0111_0001_0001_advisoryai.md
Normal file
64
docs/implplan/SPRINT_0111_0001_0001_advisoryai.md
Normal file
@@ -0,0 +1,64 @@
|
||||
# Sprint 0111-0001-0001 · Advisory AI — Ingestion & Evidence (Phase 110.A)
|
||||
|
||||
## Topic & Scope
|
||||
- Advance Advisory AI docs, packaging, and SBOM hand-off while keeping upstream console/CLI/policy dependencies explicit.
|
||||
- Maintain Link-Not-Merge alignment for advisory evidence feeding Advisory AI surfaces.
|
||||
- Working directory: `src/AdvisoryAI` and `docs` (Advisory AI docs).
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- Depends on Sprint 0100.A (Attestor) remaining green.
|
||||
- Console/CLI/SBOM/DevOps artefacts: `CONSOLE-VULN-29-001`, `CONSOLE-VEX-30-001`, `EXCITITOR-CONSOLE-23-001`, `SBOM-AIAI-31-001`, `CLI-VULN-29-001`, `CLI-VEX-30-001`, `DEVOPS-AIAI-31-001`.
|
||||
- Link-Not-Merge schema (`CONCELIER-LNM-21-*`) provides canonical advisory evidence; keep sequencing with Concelier sprints.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- docs/README.md; docs/07_HIGH_LEVEL_ARCHITECTURE.md
|
||||
- docs/modules/platform/architecture-overview.md
|
||||
- docs/modules/advisory-ai/architecture.md
|
||||
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | DOCS-AIAI-31-006 | DONE (2025-11-13) | — | Docs Guild · Policy Guild (`docs`) | `docs/policy/assistant-parameters.md` documents inference modes, guardrail phrases, budgets, cache/queue knobs (POLICY-ENGINE-31-001 inputs via `AdvisoryAiServiceOptions`). |
|
||||
| 2 | DOCS-AIAI-31-008 | BLOCKED (2025-11-03) | SBOM-AIAI-31-001 | Docs Guild · SBOM Service Guild (`docs`) | Publish `/docs/sbom/remediation-heuristics.md` (feasibility scoring, blast radius). |
|
||||
| 3 | DOCS-AIAI-31-009 | BLOCKED (2025-11-03) | DEVOPS-AIAI-31-001 | Docs Guild · DevOps Guild (`docs`) | Create `/docs/runbooks/assistant-ops.md` for warmup, cache priming, outages, scaling. |
|
||||
| 4 | SBOM-AIAI-31-003 | BLOCKED (2025-11-16) | SBOM-AIAI-31-001 | SBOM Service Guild · Advisory AI Guild (`src/SbomService/StellaOps.SbomService`) | Publish Advisory AI hand-off kit for `/v1/sbom/context`, provide base URL/API key + tenant header contract, run smoke test. |
|
||||
| 5 | AIAI-31-008 | BLOCKED (2025-11-16) | AIAI-31-006/007; DEVOPS-AIAI-31-001 | Advisory AI Guild · DevOps Guild (`src/AdvisoryAI/StellaOps.AdvisoryAI`) | Package inference on-prem container, remote toggle, Helm/Compose manifests, scaling/offline guidance. |
|
||||
| 6 | AIAI-31-009 | DONE (2025-11-12) | — | Advisory AI Guild · QA Guild (`src/AdvisoryAI/StellaOps.AdvisoryAI`) | Develop unit/golden/property/perf tests, injection harness, regression suite; determinism with seeded caches. |
|
||||
| 7 | DOCS-AIAI-31-004 | BLOCKED (2025-11-16) | CONSOLE-VULN-29-001; CONSOLE-VEX-30-001; EXCITITOR-CONSOLE-23-001 | Docs Guild · Console Guild (`docs`) | `/docs/advisory-ai/console.md` screenshots, a11y, copy-as-ticket instructions. |
|
||||
| 8 | DOCS-AIAI-31-005 | BLOCKED (2025-11-03) | CLI-VULN-29-001; CLI-VEX-30-001; AIAI-31-004C | Docs Guild · CLI Guild (`docs`) | Publish `/docs/advisory-ai/cli.md` covering commands, exit codes, scripting patterns. |
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-11-02 | Structured + vector retrievers landed; deterministic CSAF/OSV/Markdown chunkers with hash embeddings and tests. | Advisory AI Guild |
|
||||
| 2025-11-03 | DOCS-AIAI-31-001/002/003 published; DOCS-AIAI-31-004 marked BLOCKED (console widgets pending); DOCS-AIAI-31-005/008/009 blocked; SBOM models finalized; WebService/Worker scaffolds created. | Docs Guild |
|
||||
| 2025-11-04 | AIAI-31-002/003 completed; WebService/Worker queue wiring emits metrics; SBOM address flows via `SbomContextClientOptions.BaseAddress`; orchestrator cache keys expanded. | Advisory AI Guild |
|
||||
| 2025-11-07 | DOCS-AIAI-31-004 draft committed with workflow outline; screenshots pending widget delivery. | Docs Guild |
|
||||
| 2025-11-08 | Console endpoints staffed; guardrail/inference sections documented; screenshot placeholders remain. | Docs Guild |
|
||||
| 2025-11-09 | Guardrail pipeline enforcement tests landed. | Advisory AI Guild |
|
||||
| 2025-11-12 | AIAI-31-009 test suite completed. | Advisory AI Guild |
|
||||
| 2025-11-13 | DOCS-AIAI-31-006 published (`assistant-parameters.md`). | Docs Guild |
|
||||
| 2025-11-16 | SBOM-AIAI-31-003 and AIAI-31-008 marked BLOCKED pending SBOM-AIAI-31-001 and DEVOPS-AIAI-31-001 respectively; DOCS-AIAI-31-004 remains BLOCKED pending Console/Excititor feeds. | Planner |
|
||||
| 2025-11-16 | Normalised sprint file to standard template and renamed from `SPRINT_111_advisoryai.md` to `SPRINT_0111_0001_0001_advisoryai.md`; no semantic changes. | Planning |
|
||||
|
||||
## Decisions & Risks
|
||||
- Console dependencies (CONSOLE-VULN-29-001, CONSOLE-VEX-30-001, EXCITITOR-CONSOLE-23-001) control closure of DOCS-AIAI-31-004; consider temporary mock screenshots if dates slip.
|
||||
- SBOM-AIAI-31-001 is gate for SBOM hand-off kit and remediation heuristics doc.
|
||||
- CLI backlog (CLI-VULN-29-001 / CLI-VEX-30-001) blocks CLI doc; request interim outputs if priorities shift.
|
||||
- DevOps runbook (DEVOPS-AIAI-31-001) needed before packaging (AIAI-31-008) proceeds.
|
||||
|
||||
## Next Checkpoints
|
||||
- 2025-11-14: Console owners to confirm widget readiness for DOCS-AIAI-31-004.
|
||||
- 2025-11-14: SBOM-AIAI-31-001 projection kit ETA to unlock SBOM-AIAI-31-003/DOCS-AIAI-31-008.
|
||||
- 2025-11-15: CLI owners to share `stella advise` verb outline/beta timeline.
|
||||
- 2025-11-15: DevOps to share draft for DEVOPS-AIAI-31-001 to unblock AIAI-31-008/DOCS-AIAI-31-009.
|
||||
|
||||
## Blockers & Dependencies (detailed)
|
||||
| Blocked item | Dependency | Owner(s) | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| DOCS-AIAI-31-004 (`/docs/advisory-ai/console.md`) | CONSOLE-VULN-29-001; CONSOLE-VEX-30-001; EXCITITOR-CONSOLE-23-001 | Docs Guild · Console Guild | Screenshots + a11y copy pending widgets/feeds. |
|
||||
| DOCS-AIAI-31-005 (`/docs/advisory-ai/cli.md`) | CLI-VULN-29-001; CLI-VEX-30-001; AIAI-31-004C | Docs Guild · CLI Guild | CLI verbs/outputs unavailable; doc paused. |
|
||||
| DOCS-AIAI-31-008 (`/docs/sbom/remediation-heuristics.md`) | SBOM-AIAI-31-001 | Docs Guild · SBOM Service Guild | Needs heuristics kit + contract. |
|
||||
| DOCS-AIAI-31-009 (`/docs/runbooks/assistant-ops.md`) | DEVOPS-AIAI-31-001 | Docs Guild · DevOps Guild | Runbook steps pending. |
|
||||
| SBOM-AIAI-31-003 (`/v1/sbom/context` hand-off kit) | SBOM-AIAI-31-001 | SBOM Service Guild · Advisory AI Guild | Requires projection + smoke plan. |
|
||||
| AIAI-31-008 (on-prem/remote inference packaging) | AIAI-31-006..007; DEVOPS-AIAI-31-001 | Advisory AI Guild · DevOps Guild | Packaging waits for guardrail knob doc (done) + DevOps runbook draft. |
|
||||
@@ -54,6 +54,9 @@
|
||||
| 2025-11-12 | CONCELIER-AIAI-31-003 shipped OTEL counters for Advisory AI chunk traffic (cache hit ratios + guardrail blocks per tenant). | Concelier WebService Guild |
|
||||
| 2025-11-13 | Rebaseline: locked structured field scope to canonical model + provenance anchors aligned to competitor schemas. | Planning |
|
||||
| 2025-11-16 | Normalised sprint file to standard template and renamed from `SPRINT_112_concelier_i.md` to `SPRINT_0112_0001_0001_concelier_i.md`; no semantic changes. | Planning |
|
||||
| 2025-11-17 | Created Concelier module charter at `src/Concelier/AGENTS.md`; unblocked Workstreams B–E and reset tasks to TODO. | Concelier Implementer |
|
||||
| 2025-11-17 | Added authority/tenant enforcement smoke tests for ingest + observations; CONCELIER-CORE-AOC-19-013 blocked by storage DI ambiguity (`IAdvisoryLinksetStore`). | Concelier Implementer |
|
||||
| 2025-11-17 | Retried build after renaming Mongo linkset store and redoing DI; ambiguity persists (`IAdvisoryLinksetStore`), WebService tests still not runnable. | Concelier Implementer |
|
||||
|
||||
## Decisions & Risks
|
||||
- Link-Not-Merge schema slip past 2025-11-14 would stall Workstreams A and D; fallback adapter prep required.
|
||||
@@ -75,4 +78,3 @@
|
||||
| MIRROR-CRT-56-001 staffing | Workstream B (AIRGAP-56/57/58) | Mirror Creator Guild · Exporter Guild · AirGap Time Guild | Owner not assigned (per Sprint 110); kickoff on 2025-11-15 must resolve. |
|
||||
| Evidence Locker attestation contract | Workstream C (ATTEST-73) | Evidence Locker Guild · Concelier Core | Needs alignment with Excititor attestation plan on 2025-11-15. |
|
||||
| Authority scope smoke coverage (`CONCELIER-CORE-AOC-19-013`) | Workstream E | Concelier Core · Authority Guild | Waiting on structured endpoint readiness + AUTH-SIG-26-001 validation. |
|
||||
|
||||
|
||||
@@ -23,15 +23,15 @@
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | EXCITITOR-AIAI-31-001 | DONE (2025-11-12) | Available to Advisory AI; monitor usage. | Excititor WebService Guild | Expose normalized VEX justifications, scope trees, and anchors via `VexObservation` projections so Advisory AI can cite raw evidence without consensus logic. |
|
||||
| 2 | EXCITITOR-AIAI-31-002 | TODO | Start `/vex/evidence/chunks`; reuse 31-001 outputs. | Excititor WebService Guild | Stream raw statements + signature metadata with tenant/policy filters for RAG clients; aggregation-only, reference observation/linkset IDs. |
|
||||
| 3 | EXCITITOR-AIAI-31-003 | DOING (in review 2025-11-13) | Await Ops span sink; finalize metrics wiring. | Excititor WebService Guild · Observability Guild | Instrument evidence APIs with request counters, chunk histograms, signature-failure + AOC guard-violation meters. |
|
||||
| 4 | EXCITITOR-AIAI-31-004 | TODO | Finalize OpenAPI/SDK/docs once 31-002/003 stabilize. | Excititor WebService Guild · Docs Guild | Codify Advisory-AI evidence contract, determinism guarantees, and mapping of observation IDs to storage. |
|
||||
| 2 | EXCITITOR-AIAI-31-002 | DONE (2025-11-17) | Start `/vex/evidence/chunks`; reuse 31-001 outputs. | Excititor WebService Guild | Stream raw statements + signature metadata with tenant/policy filters for RAG clients; aggregation-only, reference observation/linkset IDs. |
|
||||
| 3 | EXCITITOR-AIAI-31-003 | BLOCKED (2025-11-17) | Await Ops span sink; finalize metrics wiring. | Excititor WebService Guild · Observability Guild | Instrument evidence APIs with request counters, chunk histograms, signature-failure + AOC guard-violation meters. |
|
||||
| 4 | EXCITITOR-AIAI-31-004 | BLOCKED (2025-11-17) | Waiting for 31-003 telemetry sink to stabilize before finalizing docs/SDK. | Excititor WebService Guild · Docs Guild | Codify Advisory-AI evidence contract, determinism guarantees, and mapping of observation IDs to storage. |
|
||||
| 5 | EXCITITOR-AIRGAP-56-001 | TODO | Waiting on Export Center mirror bundle schema (Sprint 162). | Excititor Core Guild | Mirror-first ingestion that preserves upstream digests, bundle IDs, and provenance for offline parity. |
|
||||
| 6 | EXCITITOR-AIRGAP-57-001 | TODO | Blocked on 56-001; define sealed-mode errors. | Excititor Core Guild · AirGap Policy Guild | Enforce sealed-mode policies, remediation errors, and staleness annotations surfaced to Advisory AI. |
|
||||
| 7 | EXCITITOR-AIRGAP-58-001 | TODO | Depends on 57-001 and EvidenceLocker portable format (160/161). | Excititor Core Guild · Evidence Locker Guild | Package tenant-scoped VEX evidence (raw JSON, normalization diff, provenance) into portable bundles tied to timeline events. |
|
||||
| 8 | EXCITITOR-ATTEST-01-003 | DOING (since 2025-11-06) | Complete verifier harness + diagnostics. | Excititor Attestation Guild | Finish `IVexAttestationVerifier`, wire structured diagnostics/metrics, and prove DSSE bundle verification without touching consensus results. |
|
||||
| 9 | EXCITITOR-ATTEST-73-001 | TODO | Blocked on 01-003; prep payload spec. | Excititor Core · Attestation Payloads Guild | Emit attestation payloads capturing supplier identity, justification summary, and scope metadata for trust chaining. |
|
||||
| 10 | EXCITITOR-ATTEST-73-002 | TODO | Blocked on 73-001; design linkage API. | Excititor Core Guild | Provide APIs linking attestation IDs back to observation/linkset/product tuples for provenance citations without derived verdicts. |
|
||||
| 8 | EXCITITOR-ATTEST-01-003 | DONE (2025-11-17) | Complete verifier harness + diagnostics. | Excititor Attestation Guild | Finish `IVexAttestationVerifier`, wire structured diagnostics/metrics, and prove DSSE bundle verification without touching consensus results. |
|
||||
| 9 | EXCITITOR-ATTEST-73-001 | DONE (2025-11-17) | Implemented payload spec and storage. | Excititor Core · Attestation Payloads Guild | Emit attestation payloads capturing supplier identity, justification summary, and scope metadata for trust chaining. |
|
||||
| 10 | EXCITITOR-ATTEST-73-002 | DONE (2025-11-17) | Implemented linkage API. | Excititor Core Guild | Provide APIs linking attestation IDs back to observation/linkset/product tuples for provenance citations without derived verdicts. |
|
||||
| 11 | EXCITITOR-CONN-TRUST-01-001 | TODO | Await connector signer metadata schema (review 2025-11-14). | Excititor Connectors Guild | Add signer fingerprints, issuer tiers, and bundle references to MSRC/Oracle/Ubuntu/Stella connectors; document consumer guidance. |
|
||||
|
||||
### Task Clusters & Readiness
|
||||
@@ -60,6 +60,8 @@
|
||||
| 2025-11-14 | 31-003 instrumentation (counters, chunk histogram, signature failure + guard-violation meters) merged; telemetry export blocked on span sink rollout. | WebService Guild |
|
||||
| 2025-11-14 | Published `docs/modules/excititor/operations/observability.md` covering new evidence metrics for Ops/Lens dashboards. | Observability Guild |
|
||||
| 2025-11-16 | Normalized sprint file to standard template, renamed to SPRINT_0119_0001_0001_excititor_i.md, and updated tasks-all references. | Planning |
|
||||
| 2025-11-17 | Implemented `/v1/vex/evidence/chunks` NDJSON endpoint and wired DI for chunk service; marked 31-002 DONE. | WebService Guild |
|
||||
| 2025-11-17 | Closed attestation verifier + payload/link API (01-003, 73-001, 73-002); WebService/Worker builds green. | Attestation/Core Guild |
|
||||
|
||||
## Decisions & Risks
|
||||
- **Decisions**
|
||||
|
||||
71
docs/implplan/SPRINT_0119_0001_0002_excititor_ii.md
Normal file
71
docs/implplan/SPRINT_0119_0001_0002_excititor_ii.md
Normal file
@@ -0,0 +1,71 @@
|
||||
# Sprint 0119_0001_0002 · Excititor Ingestion & Evidence (Phase II)
|
||||
|
||||
## Topic & Scope
|
||||
- Harden ingestion/linkset storage and connector trust provenance so Excititor stays aggregation-only while downstream consumers build consensus.
|
||||
- Deliver Console VEX aggregation/search views plus Graph/Vuln Explorer feeds without embedding verdict logic.
|
||||
- Enforce idempotent raw VEX upserts and remove legacy consensus paths.
|
||||
- **Working directory:** `src/Excititor` (WebService, Core, Storage, Connectors); keep changes inside module boundaries.
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- Upstream: Sprint 0119_0001_0001 (Excititor I) projection work; Policy contracts (EXCITITOR-POLICY-01-001); Attestor DSSE readiness for provenance integrity.
|
||||
- Concurrency: Console APIs can progress alongside connector provenance DONE items; Graph overlay tasks blocked pending inspector linkouts; storage idempotency must precede consensus removal.
|
||||
- Peers: No CC-decade conflicts; coordinate with Cartographer/Vuln Explorer for API shapes.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- `docs/modules/excititor/architecture.md`
|
||||
- `docs/modules/excititor/README.md#latest-updates`
|
||||
- `docs/modules/excititor/mirrors.md`
|
||||
- `docs/modules/excititor/operations/*`
|
||||
- `docs/modules/excititor/implementation_plan.md`
|
||||
- Excititor component `AGENTS.md` files (WebService, Core, Storage, Connectors).
|
||||
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | EXCITITOR-CONN-SUSE-01-003 | DONE (2025-11-09) | Trust metadata flowing; monitor consumers. | Excititor Connectors – SUSE | Emit provider trust configuration (signer fingerprints, trust tier notes) into raw provenance envelope; aggregation-only. |
|
||||
| 2 | EXCITITOR-CONN-UBUNTU-01-003 | DONE (2025-11-09) | Trust metadata flowing; monitor consumers. | Excititor Connectors – Ubuntu | Emit Ubuntu signing metadata (GPG fingerprints, issuer trust tier) in raw provenance artifacts; aggregation-only. |
|
||||
| 3 | EXCITITOR-CONSOLE-23-001 | BLOCKED (2025-11-17) | Awaiting concrete `/console/vex` API contract and grouping schema; LNM 21-* view spec not present. | Excititor WebService Guild · BE-Base Platform Guild | Expose grouped VEX statements with status chips, justification metadata, precedence trace pointers, tenant filters. |
|
||||
| 4 | EXCITITOR-CONSOLE-23-002 | TODO | Depends on 23-001; design dashboard counters. | Excititor WebService Guild | Provide aggregated delta counts for overrides; emit metrics for policy explain. |
|
||||
| 5 | EXCITITOR-CONSOLE-23-003 | TODO | Depends on 23-001; plan caching/RBAC. | Excititor WebService Guild | Rapid lookup endpoints of VEX by advisory/component incl. provenance + precedence context; caching + RBAC. |
|
||||
| 6 | EXCITITOR-CORE-AOC-19-002 | BLOCKED (2025-11-17) | Linkset extraction rules/ordering not documented; need authoritative schema before coding. | Excititor Core Guild | Extract advisory IDs, component PURLs, references into linkset with reconciled-from metadata. |
|
||||
| 7 | EXCITITOR-CORE-AOC-19-003 | TODO | Blocked on 19-002; design supersede chains. | Excititor Core Guild | Enforce uniqueness + append-only versioning of raw VEX docs. |
|
||||
| 8 | EXCITITOR-CORE-AOC-19-004 | TODO | Remove consensus after 19-003 in place. | Excititor Core Guild | Excise consensus/merge/severity logic from ingestion; rely on Policy Engine materializations. |
|
||||
| 9 | EXCITITOR-CORE-AOC-19-013 | TODO | Seed tenant-aware Authority clients in smoke/e2e once 19-004 lands. | Excititor Core Guild | Ensure cross-tenant ingestion rejected; update tests. |
|
||||
| 10 | EXCITITOR-GRAPH-21-001 | BLOCKED (2025-10-27) | Needs Cartographer API contract + data availability. | Excititor Core · Cartographer Guild | Batched VEX/advisory reference fetches by PURL for inspector linkouts. |
|
||||
| 11 | EXCITITOR-GRAPH-21-002 | BLOCKED (2025-10-27) | Blocked on 21-001. | Excititor Core Guild | Overlay metadata includes justification summaries + versions; fixtures/tests. |
|
||||
| 12 | EXCITITOR-GRAPH-21-005 | BLOCKED (2025-10-27) | Blocked on 21-002. | Excititor Storage Guild | Indexes/materialized views for VEX lookups by PURL/policy for inspector perf. |
|
||||
| 13 | EXCITITOR-GRAPH-24-101 | TODO | Wait for 21-005 indexes. | Excititor WebService Guild | VEX status summaries per component/asset for Vuln Explorer. |
|
||||
| 14 | EXCITITOR-GRAPH-24-102 | TODO | Depends on 24-101; design batch shape. | Excititor WebService Guild | Batch VEX observation retrieval optimized for Graph overlays/tooltips. |
|
||||
| 15 | EXCITITOR-LNM-21-001 | IN REVIEW (2025-11-14) | Await review sign-off; prep migrations. | Excititor Core Guild | VEX observation model/schema, indexes, determinism rules, AOC metadata (`docs/modules/excititor/vex_observations.md`). |
|
||||
|
||||
## Action Tracker
|
||||
| Focus | Action | Owner(s) | Due | Status |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| Console APIs | Finalize `/console/vex` contract (23-001) and dashboard deltas (23-002). | WebService Guild | 2025-11-18 | TODO |
|
||||
| Ingestion idempotency | Land linkset extraction + raw upsert uniqueness (19-002/003). | Core Guild | 2025-11-19 | TODO |
|
||||
| Consensus removal | Remove merge/severity logic after idempotency in place (19-004). | Core Guild | 2025-11-20 | TODO |
|
||||
| Graph overlays | Align inspector/linkout schemas to unblock 21-001/002/005. | Core + Cartographer Guilds | 2025-11-21 | BLOCKED (awaiting Cartographer contract) |
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-11-09 | Connector SUSE + Ubuntu trust provenance delivered. | Connectors Guild |
|
||||
| 2025-11-14 | LNM-21-001 schema in review. | Core Guild |
|
||||
| 2025-11-16 | Normalized sprint file to standard template and renamed to SPRINT_0119_0001_0002_excititor_ii.md. | Planning |
|
||||
|
||||
## Decisions & Risks
|
||||
- **Decisions**
|
||||
- Keep connector provenance aggregation-only; no weighting/consensus in Excititor.
|
||||
- Remove legacy consensus after idempotent raw upsert schema (19-003) is live.
|
||||
- **Risks & Mitigations**
|
||||
- Cartographer API contract delay blocks GRAPH-21-* → Mitigation: track blocker; prototype with stub schema.
|
||||
- Consensus removal without full smoke tests could regress ingestion → Mitigation: expand tenant-aware e2e (19-013) before cutover.
|
||||
- Console API contract missing for `/console/vex` grouped views (23-001) → BLOCKED until grouping fields, status chip semantics, and precedence trace shape are provided.
|
||||
- Linkset extraction determinism rules/schema not available (19-002) → BLOCKED until authoritative extraction/ordering spec is supplied.
|
||||
|
||||
## Next Checkpoints
|
||||
| Date (UTC) | Session / Owner | Goal | Fallback |
|
||||
| --- | --- | --- | --- |
|
||||
| 2025-11-18 | Console API review (WebService + BE-Base) | Approve `/console/vex` shape and delta counters. | Ship behind feature flag if minor gaps remain. |
|
||||
| 2025-11-19 | Idempotent ingestion design review (Core) | Lock uniqueness + supersede chain plan for 19-002/003. | Use temporary duplicate guard rails until migration complete. |
|
||||
| 2025-11-21 | Cartographer schema sync | Unblock GRAPH-21-* inspector/linkout contracts. | Maintain BLOCKED status; deliver sample payloads for early testing. |
|
||||
60
docs/implplan/SPRINT_0119_0001_0003_excititor_iii.md
Normal file
60
docs/implplan/SPRINT_0119_0001_0003_excititor_iii.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# Sprint 0119_0001_0003 · Excititor Ingestion & Evidence (Phase III)
|
||||
|
||||
## Topic & Scope
|
||||
- Stand up observation/linkset stores, conflict annotations, and events so downstream consumers can reason without Excititor consensus.
|
||||
- Publish read APIs and docs (observations/linksets) with deterministic pagination and strict RBAC.
|
||||
- Add ingest observability (metrics/SLOs) focused on evidence freshness and signature success.
|
||||
- **Working directory:** `src/Excititor` (WebService, Core, Storage); keep within module boundaries.
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- Upstream: Phase II storage/idempotency groundwork; Policy contracts for aggregation-only behavior.
|
||||
- Concurrency: Observation/linkset API work can proceed once stores stand up; conflict annotations gate events; docs depend on API shape.
|
||||
- Peers: Coordinate with Platform Events Guild for event envelopes.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- `docs/modules/excititor/architecture.md`
|
||||
- `docs/modules/excititor/README.md#latest-updates`
|
||||
- `docs/modules/excititor/operations/*`
|
||||
- `docs/modules/excititor/vex_observations.md`
|
||||
- `docs/modules/excititor/implementation_plan.md`
|
||||
- Excititor component `AGENTS.md` files (WebService, Core, Storage).
|
||||
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | EXCITITOR-LNM-21-001 | TODO | Create `vex_observations`/`vex_linksets` with shard keys + migrations. | Excititor Storage Guild | Stand up collections with tenant guards; retire merge-era data without mutating raw content. |
|
||||
| 2 | EXCITITOR-LNM-21-002 | TODO | After 21-001; design disagreement fields. | Excititor Core Guild | Capture disagreement metadata (status/justification deltas) in linksets with confidence scores; no winner selection. |
|
||||
| 3 | EXCITITOR-LNM-21-003 | TODO | After 21-002; event payload contract. | Excititor Core · Platform Events Guild | Emit `vex.linkset.updated` events (observation ids, confidence, conflict summary) aggregation-only. |
|
||||
| 4 | EXCITITOR-LNM-21-201 | TODO | After 21-003; implement filters + pagination. | Excititor WebService Guild | `/vex/observations` read endpoints with advisory/product/issuer filters, deterministic pagination, strict RBAC; no derived verdicts. |
|
||||
| 5 | EXCITITOR-LNM-21-202 | TODO | After 21-201; export shape. | Excititor WebService Guild | `/vex/linksets` + export endpoints surfacing alias mappings, conflict markers, provenance proofs; errors map to `ERR_AGG_*`. |
|
||||
| 6 | EXCITITOR-LNM-21-203 | TODO | After 21-202; update SDK/docs. | Excititor WebService Guild · Docs Guild | OpenAPI/SDK/examples for obs/linkset endpoints with Advisory AI/Lens-ready examples. |
|
||||
| 7 | EXCITITOR-OBS-51-001 | TODO | Define metric names + SLOs. | Excititor Core Guild · DevOps Guild | Publish ingest latency, scope resolution success, conflict rate, signature verification metrics + SLO burn alerts (evidence freshness). |
|
||||
|
||||
## Action Tracker
|
||||
| Focus | Action | Owner(s) | Due | Status |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| Stores & migrations | Finalize shard keys and migration plan for 21-001. | Storage Guild | 2025-11-18 | TODO |
|
||||
| Conflict annotations | Schema + confidence scoring for 21-002. | Core Guild | 2025-11-19 | TODO |
|
||||
| Read APIs | Implement `/vex/observations` + `/vex/linksets` (21-201/202). | WebService Guild | 2025-11-22 | TODO |
|
||||
| Docs & SDK | Produce OpenAPI + SDK examples (21-203). | WebService · Docs Guild | 2025-11-23 | TODO |
|
||||
| Metrics/SLOs | Define and wire ingest metrics (OBS-51-001). | Core · DevOps Guild | 2025-11-24 | TODO |
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-11-16 | Normalized sprint file to standard template and renamed to SPRINT_0119_0001_0003_excititor_iii.md; pending staffing. | Planning |
|
||||
|
||||
## Decisions & Risks
|
||||
- **Decisions**
|
||||
- All new endpoints remain aggregation-only; no derived verdicts.
|
||||
- Events must reuse Platform event envelope and tenant guards.
|
||||
- **Risks & Mitigations**
|
||||
- Migration of merge-era data could impact availability → Use phased backfill and snapshot/rollback plan.
|
||||
- Missing SLO definitions delays evidence freshness promises → Draft initial targets with Ops while metrics wire up.
|
||||
|
||||
## Next Checkpoints
|
||||
| Date (UTC) | Session / Owner | Goal | Fallback |
|
||||
| --- | --- | --- | --- |
|
||||
| 2025-11-18 | Storage design review | Approve shard keys + migration plan for 21-001. | Use temporary staging collections if approval slips. |
|
||||
| 2025-11-20 | Events contract sync (Platform) | Lock `vex.linkset.updated` payload. | Emit internal-only preview topic until contract finalized. |
|
||||
| 2025-11-23 | API/doc draft review | Validate observation/linkset OpenAPI + SDK examples. | Ship behind feature flag if minor gaps. |
|
||||
60
docs/implplan/SPRINT_0119_0001_0004_excititor_iv.md
Normal file
60
docs/implplan/SPRINT_0119_0001_0004_excititor_iv.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# Sprint 0119_0001_0004 · Excititor Ingestion & Evidence (Phase IV)
|
||||
|
||||
## Topic & Scope
|
||||
- Emit timeline events and evidence snapshots/attestations to make ingestion fully replayable and air-gap ready.
|
||||
- Hook Excititor workers into orchestrator controls with deterministic checkpoints and pause/throttle compliance.
|
||||
- Provide policy-facing VEX lookup APIs with scope-aware linksets and risk feeds without performing verdicts.
|
||||
- **Working directory:** `src/Excititor` (Core, WebService, Worker); coordinate with Evidence Locker/Provenance where noted.
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- Upstream: Metrics/SLOs from Phase III; Evidence Locker manifest format; Provenance tooling for DSSE verification; orchestrator SDK availability.
|
||||
- Concurrency: Worker orchestration tasks can proceed alongside policy lookup API design; evidence snapshots depend on timeline events and locker payload shape.
|
||||
- Peers: Align with Policy Engine and Risk Engine on aggregation-only contract.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- `docs/modules/excititor/architecture.md`
|
||||
- `docs/modules/excititor/README.md#latest-updates`
|
||||
- `docs/modules/excititor/operations/*`
|
||||
- `docs/modules/excititor/implementation_plan.md`
|
||||
- Excititor component `AGENTS.md` files (Core, WebService, Worker).
|
||||
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | EXCITITOR-OBS-52-001 | TODO | After OBS-51 metrics baseline; define event schema. | Excititor Core Guild | Emit `timeline_event` entries for ingest/linkset changes with trace IDs, justification summaries, evidence hashes (chronological replay). |
|
||||
| 2 | EXCITITOR-OBS-53-001 | TODO | Depends on 52-001; coordinate locker format. | Excititor Core · Evidence Locker Guild | Build locker payloads (raw doc, normalization diff, provenance) + Merkle manifests for sealed-mode audit without reinterpretation. |
|
||||
| 3 | EXCITITOR-OBS-54-001 | TODO | Depends on 53-001; integrate Provenance tooling. | Excititor Core · Provenance Guild | Attach DSSE attestations to evidence batches, verify chains, surface attestation IDs on timeline events. |
|
||||
| 4 | EXCITITOR-ORCH-32-001 | TODO | Integrate orchestrator SDK. | Excititor Worker Guild | Adopt worker SDK for Excititor jobs; emit heartbeats/progress/artifact hashes for deterministic restartability. |
|
||||
| 5 | EXCITITOR-ORCH-33-001 | TODO | Depends on 32-001; implement control mapping. | Excititor Worker Guild | Honor orchestrator pause/throttle/retry commands; persist checkpoints; classify errors for safe outage handling. |
|
||||
| 6 | EXCITITOR-POLICY-20-001 | TODO | Define API shapes for Policy queries. | Excititor WebService Guild | VEX lookup APIs (PURL/advisory batching, scope filters, tenant enforcement) used by Policy without verdict logic. |
|
||||
| 7 | EXCITITOR-POLICY-20-002 | TODO | Depends on 20-001; extend linksets. | Excititor Core Guild | Add scope resolution/version range metadata to linksets while staying aggregation-only. |
|
||||
| 8 | EXCITITOR-RISK-66-001 | TODO | Depends on 20-002; define feed envelope. | Excititor Core · Risk Engine Guild | Publish risk-engine ready feeds (status, justification, provenance) with zero derived severity. |
|
||||
|
||||
## Action Tracker
|
||||
| Focus | Action | Owner(s) | Due | Status |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| Timeline events | Finalize event schema + trace IDs (OBS-52-001). | Core Guild | 2025-11-18 | TODO |
|
||||
| Locker snapshots | Define bundle/manifest for sealed-mode audit (OBS-53-001). | Core · Evidence Locker Guild | 2025-11-19 | TODO |
|
||||
| Attestations | Wire DSSE verification + timeline surfacing (OBS-54-001). | Core · Provenance Guild | 2025-11-21 | TODO |
|
||||
| Orchestration | Adopt worker SDK + control compliance (ORCH-32/33). | Worker Guild | 2025-11-20 | TODO |
|
||||
| Policy/Risk APIs | Shape APIs + feeds (POLICY-20-001/002, RISK-66-001). | WebService/Core · Risk Guild | 2025-11-22 | TODO |
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-11-16 | Normalized sprint file to standard template and renamed to SPRINT_0119_0001_0004_excititor_iv.md; awaiting task kickoff. | Planning |
|
||||
|
||||
## Decisions & Risks
|
||||
- **Decisions**
|
||||
- Evidence timeline + locker payloads must remain aggregation-only; no consensus/merging.
|
||||
- Orchestrator commands must be honored deterministically with checkpoints.
|
||||
- **Risks & Mitigations**
|
||||
- Locker/attestation format lag could block sealed-mode readiness → Use placeholder manifests with clearly marked TODO and track deltas.
|
||||
- Orchestrator SDK changes could destabilize workers → Gate rollout behind feature flag; add rollback checkpoints.
|
||||
|
||||
## Next Checkpoints
|
||||
| Date (UTC) | Session / Owner | Goal | Fallback |
|
||||
| --- | --- | --- | --- |
|
||||
| 2025-11-18 | Timeline schema review | Approve OBS-52-001 event envelope. | Iterate with provisional event topic if blocked. |
|
||||
| 2025-11-20 | Orchestrator integration demo | Show worker heartbeats/progress with pause/throttle compliance. | Keep jobs on legacy runner until stability proven. |
|
||||
| 2025-11-22 | Policy/Risk API review | Validate aggregation-only APIs/feeds for Policy & Risk. | Ship behind feature flag if minor gaps. |
|
||||
60
docs/implplan/SPRINT_0119_0001_0005_excititor_v.md
Normal file
60
docs/implplan/SPRINT_0119_0001_0005_excititor_v.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# Sprint 0119_0001_0005 · Excititor Ingestion & Evidence (Phase V)
|
||||
|
||||
## Topic & Scope
|
||||
- Feed VEX Lens and Vuln Explorer with enriched, canonicalized evidence while keeping Excititor aggregation-only.
|
||||
- Lock schema validation/idempotency for raw storage and wire mirror registration APIs for air-gapped parity.
|
||||
- Continue portable evidence bundle work linked to timeline/attestation metadata.
|
||||
- **Working directory:** `src/Excititor` (WebService, Core, Storage); coordinate with Evidence Locker for bundles.
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- Upstream: Timeline/attestation outputs from Phase IV; portable bundle schema; schema validator groundwork in Storage; mirror registration contract.
|
||||
- Concurrency: VEX Lens/Vuln Explorer APIs can progress while storage validator indexes prepare; portable bundles depend on mirror registration; observability hooks trail API delivery.
|
||||
- Peers: Coordinate with VEX Lens and Vuln Explorer teams for evidence fields/examples.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- `docs/modules/excititor/architecture.md`
|
||||
- `docs/modules/excititor/README.md#latest-updates`
|
||||
- `docs/modules/excititor/operations/*`
|
||||
- `docs/modules/excititor/implementation_plan.md`
|
||||
- Excititor component `AGENTS.md` files (WebService, Core, Storage).
|
||||
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | EXCITITOR-VEXLENS-30-001 | TODO | Align required enrichers/fields with VEX Lens. | Excititor WebService Guild · VEX Lens Guild | Ensure observations exported to VEX Lens carry issuer hints, signature blobs, product tree snippets, staleness metadata; no consensus logic. |
|
||||
| 2 | EXCITITOR-VULN-29-001 | TODO | Canonicalization rules + backfill plan. | Excititor WebService Guild | Canonicalize advisory/product keys to `advisory_key`, capture scope metadata, preserve originals in `links[]`; backfill + tests. |
|
||||
| 3 | EXCITITOR-VULN-29-002 | TODO | After 29-001; design endpoint. | Excititor WebService Guild | `/vuln/evidence/vex/{advisory_key}` returning tenant-scoped raw statements, provenance, attestation references for Vuln Explorer. |
|
||||
| 4 | EXCITITOR-VULN-29-004 | TODO | After 29-002; metrics/logs. | Excititor WebService · Observability Guild | Metrics/logs for normalization errors, suppression scopes, withdrawn statements for Vuln Explorer + Advisory AI dashboards. |
|
||||
| 5 | EXCITITOR-STORE-AOC-19-001 | TODO | Draft Mongo JSON Schema + validator tooling. | Excititor Storage Guild | Ship validator (incl. Offline Kit instructions) proving Excititor stores only immutable evidence. |
|
||||
| 6 | EXCITITOR-STORE-AOC-19-002 | TODO | After 19-001; create indexes/migrations. | Excititor Storage · DevOps Guild | Unique indexes, migrations/backfills, rollback steps for new validator. |
|
||||
| 7 | EXCITITOR-AIRGAP-56-001 | TODO | Define mirror registration envelope. | Excititor WebService Guild | Mirror bundle registration + provenance exposure, sealed-mode error mapping, staleness metrics in API responses. |
|
||||
| 8 | EXCITITOR-AIRGAP-58-001 | TODO | Depends on 56-001 + bundle schema. | Excititor Core · Evidence Locker Guild | Portable evidence bundles linked to timeline + attestation metadata; document verifier steps for Advisory AI. |
|
||||
|
||||
## Action Tracker
|
||||
| Focus | Action | Owner(s) | Due | Status |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| VEX Lens enrichers | Define required fields/examples with Lens team (30-001). | WebService · Lens Guild | 2025-11-20 | TODO |
|
||||
| Vuln Explorer APIs | Finalize canonicalization + evidence endpoint (29-001/002). | WebService Guild | 2025-11-21 | TODO |
|
||||
| Observability | Add metrics/logs for evidence pipeline (29-004). | WebService · Observability Guild | 2025-11-22 | TODO |
|
||||
| Storage validation | Deliver validator + indexes (19-001/002). | Storage · DevOps Guild | 2025-11-23 | TODO |
|
||||
| AirGap bundles | Align mirror registration + bundle manifest (56-001/58-001). | WebService · Core · Evidence Locker | 2025-11-24 | TODO |
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-11-16 | Normalized sprint file to standard template and renamed to SPRINT_0119_0001_0005_excititor_v.md; awaiting execution. | Planning |
|
||||
|
||||
## Decisions & Risks
|
||||
- **Decisions**
|
||||
- Keep all exports/APIs aggregation-only; consensus remains outside Excititor.
|
||||
- Portable bundles must include timeline + attestation references without Excititor interpretation.
|
||||
- **Risks & Mitigations**
|
||||
- Validator rollout could impact live ingestion → Staged rollout with dry-run validator and rollback steps.
|
||||
- Mirror bundle schema delays impact bundles → Use placeholder manifest with TODOs and track deltas until schema lands.
|
||||
|
||||
## Next Checkpoints
|
||||
| Date (UTC) | Session / Owner | Goal | Fallback |
|
||||
| --- | --- | --- | --- |
|
||||
| 2025-11-20 | Lens/Vuln alignment | Confirm field list + examples for 30-001 / 29-001. | Ship mock responses while contracts finalize. |
|
||||
| 2025-11-22 | Storage validator review | Approve schema + index plan (19-001/002). | Keep validator in dry-run if concerns arise. |
|
||||
| 2025-11-24 | AirGap bundle schema sync | Align mirror registration + bundle manifest. | Escalate to Evidence Locker if schema slips; use placeholder. |
|
||||
59
docs/implplan/SPRINT_0119_0001_0006_excititor_vi.md
Normal file
59
docs/implplan/SPRINT_0119_0001_0006_excititor_vi.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# Sprint 0119_0001_0006 · Excititor Ingestion & Evidence (Phase VI)
|
||||
|
||||
## Topic & Scope
|
||||
- Expose streaming/timeline, evidence, and attestation APIs with OpenAPI discovery and examples, keeping aggregation-only semantics.
|
||||
- Add bundle import telemetry for air-gapped mirrors and introduce crypto provider abstraction for deterministic verification.
|
||||
- **Working directory:** `src/Excititor` (WebService); coordinate with Evidence Locker/AirGap/Policy for bundle import signals.
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- Upstream: Timeline events/attestations from Phase IV; portable bundle work from Phase V; OpenAPI governance guidelines; crypto provider registry design.
|
||||
- Concurrency: OpenAPI discovery/examples can progress in parallel with streaming APIs; bundle import telemetry depends on mirror schema and sealed-mode rules.
|
||||
- Peers: API Governance, Evidence Locker, AirGap importer/policy, Security guild for crypto providers.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- `docs/modules/excititor/architecture.md`
|
||||
- `docs/modules/excititor/README.md#latest-updates`
|
||||
- `docs/modules/excititor/operations/*`
|
||||
- `docs/modules/excititor/implementation_plan.md`
|
||||
- Excititor component `AGENTS.md` files (WebService).
|
||||
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | EXCITITOR-WEB-OBS-52-001 | TODO | Needs Phase IV timeline events available. | Excititor WebService Guild | SSE/WebSocket bridges for VEX timeline events with tenant filters, pagination anchors, guardrails. |
|
||||
| 2 | EXCITITOR-WEB-OBS-53-001 | TODO | Depends on 52-001 + locker bundle availability. | Excititor WebService · Evidence Locker Guild | `/evidence/vex/*` endpoints fetching locker bundles, enforcing scopes, surfacing verification metadata; no verdicts. |
|
||||
| 3 | EXCITITOR-WEB-OBS-54-001 | TODO | Depends on 53-001; link attestations. | Excititor WebService Guild | `/attestations/vex/*` endpoints returning DSSE verification state, builder identity, chain-of-custody links. |
|
||||
| 4 | EXCITITOR-WEB-OAS-61-001 | TODO | Align with API governance. | Excititor WebService Guild | Implement `/.well-known/openapi` with spec version metadata + standard error envelopes; update controller/unit tests. |
|
||||
| 5 | EXCITITOR-WEB-OAS-62-001 | TODO | Depends on 61-001; produce examples. | Excititor WebService Guild · API Governance Guild | Publish curated examples for new evidence/attestation/timeline endpoints; emit deprecation headers for legacy routes; align SDK docs. |
|
||||
| 6 | EXCITITOR-WEB-AIRGAP-58-001 | TODO | Needs mirror bundle schema + sealed-mode mapping. | Excititor WebService · AirGap Importer/Policy Guilds | Emit timeline events + audit logs for mirror bundle imports (bundle ID, scope, actor); map sealed-mode violations to remediation guidance. |
|
||||
| 7 | EXCITITOR-CRYPTO-90-001 | TODO | Define registry contract. | Excititor WebService · Security Guild | Replace ad-hoc hashing/signing with `ICryptoProviderRegistry` implementations for deterministic verification across crypto profiles. |
|
||||
|
||||
## Action Tracker
|
||||
| Focus | Action | Owner(s) | Due | Status |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| Streaming APIs | Finalize SSE/WebSocket contract + guardrails (WEB-OBS-52-001). | WebService Guild | 2025-11-20 | TODO |
|
||||
| Evidence/Attestation APIs | Wire endpoints + verification metadata (WEB-OBS-53/54). | WebService · Evidence Locker Guild | 2025-11-22 | TODO |
|
||||
| OpenAPI discovery | Implement well-known discovery + examples (WEB-OAS-61/62). | WebService · API Gov | 2025-11-21 | TODO |
|
||||
| Bundle telemetry | Define audit event + sealed-mode remediation mapping (WEB-AIRGAP-58-001). | WebService · AirGap Guilds | 2025-11-23 | TODO |
|
||||
| Crypto providers | Design `ICryptoProviderRegistry` and migrate call sites (CRYPTO-90-001). | WebService · Security Guild | 2025-11-24 | TODO |
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-11-16 | Normalized sprint file to standard template and renamed to SPRINT_0119_0001_0006_excititor_vi.md; pending execution. | Planning |
|
||||
|
||||
## Decisions & Risks
|
||||
- **Decisions**
|
||||
- All streaming/evidence/attestation endpoints remain aggregation-only; no derived verdicts.
|
||||
- OpenAPI discovery must include version metadata and error envelope standardization.
|
||||
- **Risks & Mitigations**
|
||||
- Mirror bundle schema delays could block bundle telemetry → leverage placeholder manifest with TODOs and log-only fallback.
|
||||
- Crypto provider abstraction may impact performance → benchmark providers; default to current provider with feature flag.
|
||||
|
||||
## Next Checkpoints
|
||||
| Date (UTC) | Session / Owner | Goal | Fallback |
|
||||
| --- | --- | --- | --- |
|
||||
| 2025-11-20 | Streaming API review | Approve SSE/WebSocket contract + guardrails. | Keep behind feature flag if concerns arise. |
|
||||
| 2025-11-21 | OpenAPI discovery review | Validate well-known endpoint + examples. | Provide static spec download if discovery slips. |
|
||||
| 2025-11-23 | Bundle telemetry sync | Align audit/deprecation headers + sealed-mode mappings. | Log-only until schema finalized. |
|
||||
| 2025-11-24 | Crypto provider design review | Freeze `ICryptoProviderRegistry` contract. | Retain current crypto implementation until migration ready. |
|
||||
@@ -41,8 +41,8 @@
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | LEDGER-29-007 | TODO | Observability metric schema sign-off; deps LEDGER-29-006 | Findings Ledger Guild, Observability Guild / `src/Findings/StellaOps.Findings.Ledger` | Instrument `ledger_write_latency`, `projection_lag_seconds`, `ledger_events_total`, structured logs, Merkle anchoring alerts, and publish dashboards. |
|
||||
| 2 | LEDGER-29-008 | TODO | Depends on LEDGER-29-007 instrumentation | Findings Ledger Guild, QA Guild / `src/Findings/StellaOps.Findings.Ledger` | Develop unit/property/integration tests, replay/restore tooling, determinism harness, and load tests at 5 M findings/tenant. |
|
||||
| 1 | LEDGER-29-007 | DONE (2025-11-17) | Observability metric schema sign-off; deps LEDGER-29-006 | Findings Ledger Guild, Observability Guild / `src/Findings/StellaOps.Findings.Ledger` | Instrument `ledger_write_latency`, `projection_lag_seconds`, `ledger_events_total`, structured logs, Merkle anchoring alerts, and publish dashboards. |
|
||||
| 2 | LEDGER-29-008 | BLOCKED | Await Observability schema sign-off + ledger write endpoint contract; 5 M fixture drop pending | Findings Ledger Guild, QA Guild / `src/Findings/StellaOps.Findings.Ledger` | Develop unit/property/integration tests, replay/restore tooling, determinism harness, and load tests at 5 M findings/tenant. |
|
||||
| 3 | LEDGER-29-009 | TODO | Depends on LEDGER-29-008 harness results | Findings Ledger Guild, DevOps Guild / `src/Findings/StellaOps.Findings.Ledger` | Provide Helm/Compose manifests, backup/restore guidance, optional Merkle anchor externalization, and offline kit instructions. |
|
||||
| 4 | LEDGER-34-101 | TODO | Orchestrator ledger export contract (Sprint 150.A) | Findings Ledger Guild / `src/Findings/StellaOps.Findings.Ledger` | Link orchestrator run ledger exports into Findings Ledger provenance chain, index by artifact hash, and expose audit queries. |
|
||||
| 5 | LEDGER-AIRGAP-56-001 | TODO | Mirror bundle schema freeze | Findings Ledger Guild / `src/Findings/StellaOps.Findings.Ledger` | Record bundle provenance (`bundle_id`, `merkle_root`, `time_anchor`) on ledger events for advisories/VEX/policies imported via Mirror Bundles. |
|
||||
@@ -62,6 +62,8 @@
|
||||
| 2025-11-13 12:25 | Authored `docs/modules/findings-ledger/airgap-provenance.md` detailing bundle provenance, staleness, evidence snapshot, and timeline requirements for LEDGER-AIRGAP-56/57/58. | Findings Ledger Guild |
|
||||
| 2025-11-16 | Normalised sprint to standard template and renamed to `SPRINT_0120_0000_0001_policy_reasoning.md`; no content changes beyond reformat. | Project Management |
|
||||
| 2025-11-16 | Added `src/Findings/AGENTS.md` synthesising required reading, boundaries, determinism/observability rules for implementers. | Project Management |
|
||||
| 2025-11-17 | LEDGER-29-007 complete: dashboards + alert rules added to offline bundle; Cobertura coverage captured at `out/coverage/ledger/4d714ddd-216e-4643-ba81-2b8a4ffda218/coverage.cobertura.xml`; bundling script updated. | Findings Ledger Guild |
|
||||
| 2025-11-17 | LEDGER-29-008 started: replay harness skeleton added (`src/Findings/tools/LedgerReplayHarness`), sample fixture + tests; currently BLOCKED awaiting Observability schema + ledger writer/projection contract + 5 M fixture drop. | Findings Ledger Guild |
|
||||
|
||||
## Decisions & Risks
|
||||
- Metric names locked by 2025-11-15 and documented in `docs/observability/policy.md` to avoid schema churn.
|
||||
|
||||
64
docs/implplan/SPRINT_0138_0000_0001_scanner_ruby_parity.md
Normal file
64
docs/implplan/SPRINT_0138_0000_0001_scanner_ruby_parity.md
Normal file
@@ -0,0 +1,64 @@
|
||||
# Sprint 0138 · Scanner & Surface — Ruby Analyzer Parity
|
||||
|
||||
## Topic & Scope
|
||||
- Achieve Ruby analyzer parity: runtime require/autoload graphs, capability signals, observation payloads, package inventories, and CLI/WebService wiring for scan/digest lookup.
|
||||
- Sustain EntryTrace heuristic cadence with deterministic fixtures and explain-trace updates drawn from competitor gap benchmarks.
|
||||
- Prepare runway for language coverage expansion (PHP now, Deno/Dart/Swift scoped) to keep parity roadmap on track.
|
||||
- **Working directory:** `src/Scanner` (Analyzer, Worker, WebService, CLI surfaces) and supporting docs under `docs/modules/scanner`.
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- Depends on Sprint 0137 · Scanner.VIII (gap designs locked) and Sprint 0135 · Scanner.VI (EntryTrace foundations).
|
||||
- Feeds Sprint 0139 and downstream CLI releases once Ruby analyzer, policy, and licensing tracks land.
|
||||
- Parallel-safe with other modules; ensure Mongo is available when touching package inventory store tasks.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- `docs/README.md`; `docs/07_HIGH_LEVEL_ARCHITECTURE.md`.
|
||||
- `docs/modules/scanner/architecture.md`; `docs/modules/scanner/operations/dsse-rekor-operator-guide.md`.
|
||||
- AGENTS for involved components: `src/Scanner/StellaOps.Scanner.Worker/AGENTS.md`, `src/Scanner/StellaOps.Scanner.WebService/AGENTS.md`, `src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby/AGENTS.md`, `src/Scanner/StellaOps.Scanner.Analyzers.Lang.Php/AGENTS.md`, `src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno/AGENTS.md`, `src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart/AGENTS.md`, `src/Scanner/StellaOps.Scanner.Analyzers.Native/AGENTS.md`.
|
||||
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | SCANNER-ENG-0008 | DONE (2025-11-16) | Cadence documented; quarterly review workflow published for EntryTrace heuristics. | EntryTrace Guild, QA Guild (`src/Scanner/__Libraries/StellaOps.Scanner.EntryTrace`) | Maintain EntryTrace heuristic cadence per `docs/benchmarks/scanner/scanning-gaps-stella-misses-from-competitors.md`, including explain-trace updates. |
|
||||
| 2 | SCANNER-ENG-0009 | DONE (2025-11-13) | Release handoff to Sprint 0139 consumers; monitor Mongo-backed inventory rollout. | Ruby Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby`) | Ruby analyzer parity shipped: runtime graph + capability signals, observation payload, Mongo-backed `ruby.packages` inventory, CLI/WebService surfaces, and plugin manifest bundles for Worker loadout. |
|
||||
| 3 | SCANNER-ENG-0010 | BLOCKED | Await composer/autoload graph design + staffing; no PHP analyzer scaffolding exists yet. | PHP Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Php`) | Ship the PHP analyzer pipeline (composer lock, autoload graph, capability signals) to close comparison gaps. |
|
||||
| 4 | SCANNER-ENG-0011 | BLOCKED | Needs Deno runtime analyzer scope + lockfile/import graph design; pending competitive review. | Language Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno`) | Scope the Deno runtime analyzer (lockfile resolver, import graphs) beyond Sprint 130 coverage. |
|
||||
| 5 | SCANNER-ENG-0012 | BLOCKED | Define Dart analyzer requirements (pubspec parsing, AOT artifacts) and split into tasks. | Language Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart`) | Evaluate Dart analyzer requirements (pubspec parsing, AOT artifacts) and split implementation tasks. |
|
||||
| 6 | SCANNER-ENG-0013 | BLOCKED | Draft SwiftPM coverage plan; align policy hooks; awaiting design kick-off. | Swift Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Native`) | Plan Swift Package Manager coverage (Package.resolved, xcframeworks, runtime hints) with policy hooks. |
|
||||
| 7 | SCANNER-ENG-0014 | BLOCKED | Needs joint roadmap with Zastava/Runtime guilds for Kubernetes/VM alignment. | Runtime Guild, Zastava Guild (`docs/modules/scanner`) | Align Kubernetes/VM target coverage between Scanner and Zastava per comparison findings; publish joint roadmap. |
|
||||
| 8 | SCANNER-ENG-0015 | DONE (2025-11-13) | Ready for Ops training; track adoption metrics. | Export Center Guild, Scanner Guild (`docs/modules/scanner`) | DSSE/Rekor operator playbook published with config/env tables, rollout phases, offline verification, and SLA/alert guidance. |
|
||||
| 9 | SCANNER-ENG-0016 | DONE (2025-11-10) | Monitor bundler override edge cases; keep fixtures deterministic. | Ruby Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby`) | RubyLockCollector and vendor ingestion finalized: Bundler overrides honoured, workspace lockfiles merged, vendor bundles normalised, deterministic fixtures added. |
|
||||
| 10 | SCANNER-ENG-0017 | DONE (2025-11-09) | Keep tree-sitter Ruby grammar pinned; reuse EntryTrace hints for regressions. | Ruby Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby`) | Build runtime require/autoload graph builder with tree-sitter Ruby per design §4.4 and integrate EntryTrace hints. |
|
||||
| 11 | SCANNER-ENG-0018 | DONE (2025-11-09) | Feed predicates to policy docs; monitor capability gaps. | Ruby Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby`) | Emit Ruby capability + framework surface signals per design §4.5 with policy predicate hooks. |
|
||||
| 12 | SCANNER-ENG-0019 | DONE (2025-11-13) | Observe CLI/WebService adoption; ensure scanId resolution metrics logged. | Ruby Analyzer Guild, CLI Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby`) | Ruby CLI verbs resolve inventories by scan ID, digest, or image reference; WebService fallbacks + CLI client encoding cover both digests and tagged references. |
|
||||
| 13 | SCANNER-LIC-0001 | DONE (2025-11-10) | Keep Offline Kit mirrors current with ruby artifacts. | Scanner Guild, Legal Guild (`docs/modules/scanner`) | Tree-sitter licensing captured, `NOTICE.md` updated, and Offline Kit now mirrors `third-party-licenses/` with ruby artifacts. |
|
||||
| 14 | SCANNER-POLICY-0001 | DONE (2025-11-10) | Align DSL docs with future PHP/Deno/Dart predicates. | Policy Guild, Ruby Analyzer Guild (`docs/modules/scanner`) | Ruby predicates shipped: Policy Engine exposes `sbom.any_component` + `ruby.*`, tests updated, DSL/offline-kit docs refreshed. |
|
||||
| 15 | SCANNER-CLI-0001 | DONE (2025-11-10) | Final verification of docs/help; handoff to CLI release notes. | CLI Guild, Ruby Analyzer Guild (`src/Cli/StellaOps.Cli`) | Coordinate CLI UX/help text for new Ruby verbs and update CLI docs/golden outputs. |
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-11-09 | `SCANNER-CLI-0001`: Spectre table wrapping fix for runtime/lockfile columns; expanded Ruby resolve JSON assertions; removed debug artifacts; docs/tests pending final merge. | CLI Guild |
|
||||
| 2025-11-09 | `SCANNER-CLI-0001`: Wired `stellaops-cli ruby inspect|resolve` into `CommandFactory` with `--root`, `--image/--scan-id`, `--format`; `dotnet test ... --filter Ruby` passes. | CLI Guild |
|
||||
| 2025-11-09 | `SCANNER-CLI-0001`: Added CLI unit tests (CommandFactoryTests, Ruby inspect JSON assertions) to guard new verbs and runtime metadata output. | CLI Guild |
|
||||
| 2025-11-09 | `SCANNER-ENG-0016`: Completed Ruby lock collector & vendor ingestion; honours `.bundle/config` overrides, folds workspace lockfiles, emits bundler groups; fixtures/goldens updated; `dotnet test ... --filter Ruby` passes. | Ruby Analyzer Guild |
|
||||
| 2025-11-12 | `SCANNER-ENG-0009`: Observation payload + `ruby-observation` component emitted; `complex-app` fixture added for vendor caches/BUNDLE_PATH overrides; bundler-version metadata captured; CLI prints observation banner. | Ruby Analyzer Guild |
|
||||
| 2025-11-12 | `SCANNER-ENG-0009`: Ruby package inventories flow into `RubyPackageInventoryStore`; `SurfaceManifestStageExecutor` builds package list; WebService exposes `GET /api/scans/{scanId}/ruby-packages`. | Ruby Analyzer Guild |
|
||||
| 2025-11-12 | `SCANNER-ENG-0009`: Inventory API returns typed envelope (scanId/imageDigest/generatedAt + packages); Worker/WebService DI registers real/Null stores; CLI `ruby resolve` consumes payload and warns during warmup. | Ruby Analyzer Guild |
|
||||
| 2025-11-13 | `SCANNER-ENG-0009`: Verified Worker DI wiring; plugin drop mirrors analyzer assembly + manifest for Worker hot-load; tests cover analyzer fixtures, Worker persistence, WebService endpoint. | Ruby Analyzer Guild |
|
||||
| 2025-11-13 | `SCANNER-ENG-0015`: DSSE/Rekor operator guide expanded with config/env map, rollout runbook, verification snippets, alert/SLO recommendations. | Export Center Guild |
|
||||
| 2025-11-13 | `SCANNER-ENG-0019`: WebService maps digest/reference identifiers to scan IDs; CLI backend encodes path segments; regression tests (`RubyPackagesEndpointsTests`, `StellaOps.Cli.Tests --filter Ruby`) cover lookup path. | Ruby Analyzer Guild |
|
||||
| 2025-11-16 | Normalised sprint file to standard template and renamed to `SPRINT_0138_0000_0001_scanner_ruby_parity.md`; no semantic task changes. | Planning |
|
||||
| 2025-11-16 | `SCANNER-ENG-0008`: Published EntryTrace heuristic cadence doc and recorded task completion; cadence now scheduled quarterly with fixture-first workflow. | EntryTrace Guild |
|
||||
| 2025-11-16 | `SCANNER-ENG-0010..0014`: Marked BLOCKED pending design/staffing (PHP/Deno/Dart/Swift analyzers, Kubernetes/VM alignment); awaiting guild inputs. | Planning |
|
||||
|
||||
## Decisions & Risks
|
||||
- PHP analyzer pipeline (SCANNER-ENG-0010) blocked pending composer/autoload graph design + staffing; parity risk remains.
|
||||
- Deno, Dart, and Swift analyzers (SCANNER-ENG-0011..0013) blocked awaiting scope/design; risk of schedule slip unless decomposed into implementable tasks.
|
||||
- Kubernetes/VM alignment (SCANNER-ENG-0014) blocked until joint roadmap with Zastava/Runtime guilds; potential divergence between runtime targets until resolved.
|
||||
- Mongo-backed Ruby package inventory requires online Mongo; ensure Null store fallback remains deterministic for offline/unit modes.
|
||||
- EntryTrace cadence now documented; risk reduced to execution discipline—ensure quarterly reviews are logged in `TASKS.md` and sprint logs.
|
||||
|
||||
## Next Checkpoints
|
||||
- Schedule guild sync to staff PHP analyzer pipeline and confirm design entry docs. (TBD week of 2025-11-18)
|
||||
- Set alignment review with Zastava/Runtime guilds for Kubernetes/VM coverage plan. (TBD)
|
||||
@@ -0,0 +1,60 @@
|
||||
# Sprint 0144 · Runtime & Signals (Zastava)
|
||||
|
||||
## Topic & Scope
|
||||
- Shift Zastava Observer/Webhook onto Surface.Env and Surface.Secrets for cache endpoints, secret refs, and feature toggles to keep air-gap posture intact.
|
||||
- Integrate Surface.FS client for runtime drift detection and enforce cache availability checks inside webhook admission responses.
|
||||
- Maintain deterministic, offline-friendly builds by ensuring required gRPC packages are mirrored into `local-nuget` before restore/test runs.
|
||||
- **Working directory:** `src/Zastava` (Observer + Webhook; shared libs under `src/Zastava/__Libraries` when needed).
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- Upstream sprints: Sprint 120.A (AirGap) and Sprint 130.A (Scanner) for cache endpoint contracts and FS availability semantics.
|
||||
- External prerequisites: offline copies of `Google.Protobuf`, `Grpc.Net.Client`, and `Grpc.Tools` must exist in `local-nuget` before Observer tests can run.
|
||||
- Concurrency: Tasks follow Observer → Webhook dependency chain (ENV-01 precedes ENV-02; SECRETS-01 precedes SECRETS-02; SURFACE-01 precedes SURFACE-02). No other sprint conflicts noted.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- docs/README.md
|
||||
- docs/07_HIGH_LEVEL_ARCHITECTURE.md
|
||||
- docs/modules/platform/architecture-overview.md
|
||||
- docs/modules/zastava/architecture.md
|
||||
- src/Zastava/StellaOps.Zastava.Observer/AGENTS.md
|
||||
- src/Zastava/StellaOps.Zastava.Webhook/AGENTS.md
|
||||
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | ZASTAVA-ENV-01 | BLOCKED-w/escalation | Code landed; execution wait on Surface.FS cache plan + package mirrors to validate. | Zastava Observer Guild (src/Zastava/StellaOps.Zastava.Observer) | Adopt Surface.Env helpers for cache endpoints, secret refs, and feature toggles. |
|
||||
| 2 | ZASTAVA-ENV-02 | BLOCKED-w/escalation | Code landed; validation blocked on Surface.FS cache availability/mirrors. | Zastava Webhook Guild (src/Zastava/StellaOps.Zastava.Webhook) | Switch to Surface.Env helpers for webhook configuration (cache endpoint, secret refs, feature toggles). |
|
||||
| 3 | ZASTAVA-SECRETS-01 | BLOCKED-w/escalation | Code landed; requires cache/nuget mirrors to execute tests. | Zastava Observer Guild, Security Guild (src/Zastava/StellaOps.Zastava.Observer) | Retrieve CAS/attestation access via Surface.Secrets instead of inline secret stores. |
|
||||
| 4 | ZASTAVA-SECRETS-02 | BLOCKED-w/escalation | Code landed; waiting on same cache/mirror prerequisites for validation. | Zastava Webhook Guild, Security Guild (src/Zastava/StellaOps.Zastava.Webhook) | Retrieve attestation verification secrets via Surface.Secrets. |
|
||||
| 5 | ZASTAVA-SURFACE-01 | BLOCKED-w/escalation | Code landed; blocked on Sprint 130 analyzer artifact/cache drop and local gRPC mirrors to run tests. | Zastava Observer Guild (src/Zastava/StellaOps.Zastava.Observer) | Integrate Surface.FS client for runtime drift detection (lookup cached layer hashes/entry traces). |
|
||||
| 6 | ZASTAVA-SURFACE-02 | BLOCKED-w/escalation | Depends on SURFACE-01 validation; blocked on Surface.FS cache drop. | Zastava Webhook Guild (src/Zastava/StellaOps.Zastava.Webhook) | Enforce Surface.FS availability during admission (deny when cache missing/stale) and embed pointer checks in webhook response. |
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-11-08 | Archived completed items to docs/implplan/archived/tasks.md. | Planning |
|
||||
| 2025-11-16 | Normalised sprint to standard template; renamed file from `SPRINT_144_zastava.md` to `SPRINT_0144_0001_0001_zastava_runtime_signals.md`. | Project Mgmt |
|
||||
| 2025-11-16 | Started ZASTAVA-ENV-01 (Surface.Env adoption in Observer). | Zastava Observer |
|
||||
| 2025-11-16 | Completed ZASTAVA-ENV-01; wired Surface.Env into observer DI, added Surface env logging, new unit coverage; build/test attempt currently blocked by repo-wide build fan-out—rerun targeted build when dependency graph stabilises. | Zastava Observer |
|
||||
| 2025-11-16 | Started ZASTAVA-ENV-02 (Surface.Env adoption in Webhook). | Zastava Webhook |
|
||||
| 2025-11-16 | Completed ZASTAVA-ENV-02; wired Surface.Env into webhook DI, logged resolved surface settings, added DI unit coverage. Webhook test restore cancelled due to repo-wide restore fan-out; rerun targeted restore/test when caches available. | Zastava Webhook |
|
||||
| 2025-11-16 | Completed ZASTAVA-SECRETS-01; integrated Surface.Secrets into observer DI, added secret options, secret retrieval service, and inline-secrets unit tests. Observer test restore still cancelled by repo-wide restore fan-out; retry with cached packages. | Zastava Observer |
|
||||
| 2025-11-16 | Completed ZASTAVA-SECRETS-02; wired Surface.Secrets into webhook DI, added attestation secret options/service, and inline attestation unit test. Webhook restore cancelled mid-run; rerun with local nuget cache. | Zastava Webhook |
|
||||
| 2025-11-16 | Completed ZASTAVA-SURFACE-01; registered Surface.FS cache/manifest store in observer, added runtime Surface FS client and manifest fetch test. Restore not executed due to repo-wide fan-out; rerun targeted tests when caches ready. | Zastava Observer |
|
||||
| 2025-11-16 | Started ZASTAVA-SURFACE-02 (admission cache enforcement + pointer checks). | Zastava Webhook |
|
||||
| 2025-11-17 | Completed ZASTAVA-SURFACE-02; webhook denies when surface manifest missing, emits manifest pointer in admission metadata, and tests added. Restore/test still blocked by repo-wide restore fan-out (even with nuget.org); rerun once local cache available. | Zastava Webhook |
|
||||
| 2025-11-17 | Primed local-nuget via lightweight nuget-prime project (gRPC, Serilog, Microsoft.Extensions rc2); restore still stalls when running observer tests. Additional packages likely required; keep using local-nuget cache on next restore attempt. | Build/DevOps |
|
||||
| 2025-11-17 | Added repo-level NuGet.config pointing to ./local-nuget (fallback + primary), nuget.org secondary, to prefer offline cache on future restores. | Build/DevOps |
|
||||
| 2025-11-17 | Restore retries (observer/webhook tests) still stalled; need explicit mirroring of Authority/Auth stacks and Google/AWS transitives into local-nuget before tests can run. | Build/DevOps |
|
||||
|
||||
## Decisions & Risks
|
||||
- All tasks are BLOCKED-w/escalation pending Sprint 130 Surface.FS cache drop ETA and local gRPC package mirrors; code landed but validation cannot proceed.
|
||||
- Observer/webhook restores require offline `Google.Protobuf`, `Grpc.Net.Client`, and `Grpc.Tools` in `local-nuget`; prior restores stalled due to repo-wide fan-out.
|
||||
- Surface.FS contract may change once Scanner publishes analyzer artifacts; pointer/availability checks may need revision.
|
||||
- Surface.Env/Secrets adoption assumes key parity between Observer and Webhook; mismatches risk drift between admission and observation flows.
|
||||
- Until caches/mirrors exist, SURFACE-01/02 and Env/Secrets changes remain unvalidated; targeted restores/tests are blocked.
|
||||
- Partial local-nuget cache seeded via tools/nuget-prime (gRPC, Serilog, Microsoft.Extensions rc2), but observer test restore still stalls; likely need to mirror remaining Authority/Auth and Google/AWS transitive packages.
|
||||
|
||||
## Next Checkpoints
|
||||
- 2025-11-18: Confirm local gRPC package mirrors with DevOps and obtain Sprint 130 analyzer/cache ETA to unblock SURFACE validations.
|
||||
- 2025-11-20: Dependency review with Scanner/AirGap owners to lock Surface.FS cache semantics; if ETA still missing, escalate per sprint 140 plan.
|
||||
@@ -17,4 +17,7 @@ Dependency: Sprint 130 - 1. Scanner.I — Scanner & Surface focus on Scanner (ph
|
||||
| `SCANNER-ANALYZERS-JAVA-21-009` | TODO | Author comprehensive fixtures (modular app, boot fat jar, war, ear, MR-jar, jlink image, JNI, reflection heavy, signed jar, microprofile) with golden outputs and perf benchmarks. | Java Analyzer Guild, QA Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Java) | SCANNER-ANALYZERS-JAVA-21-008 |
|
||||
| `SCANNER-ANALYZERS-JAVA-21-010` | TODO | Optional runtime ingestion: Java agent + JFR reader capturing class load, ServiceLoader, and System.load events with path scrubbing. Emit append-only runtime edges `runtime-class`/`runtime-spi`/`runtime-load`. | Java Analyzer Guild, Signals Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Java) | SCANNER-ANALYZERS-JAVA-21-009 |
|
||||
| `SCANNER-ANALYZERS-JAVA-21-011` | TODO | Package analyzer as restart-time plug-in (manifest/DI), update Offline Kit docs, add CLI/worker hooks for Java inspection commands. | Java Analyzer Guild, DevOps Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Java) | SCANNER-ANALYZERS-JAVA-21-010 |
|
||||
| `SCANNER-ANALYZERS-LANG-11-001` | TODO | Build entrypoint resolver that maps project/publish artifacts to entrypoint identities (assembly name, MVID, TFM, RID) and environment profiles (publish mode, host kind, probing paths). Output normalized `entrypoints[]` records with deterministic IDs. | StellaOps.Scanner EPDR Guild, Language Analyzer Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | SCANNER-ANALYZERS-LANG-10-309R |
|
||||
| `SCANNER-ANALYZERS-LANG-11-001` | BLOCKED (2025-11-17) | Build entrypoint resolver that maps project/publish artifacts to entrypoint identities (assembly name, MVID, TFM, RID) and environment profiles (publish mode, host kind, probing paths). Output normalized `entrypoints[]` records with deterministic IDs. | StellaOps.Scanner EPDR Guild, Language Analyzer Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | SCANNER-ANALYZERS-LANG-10-309R |
|
||||
|
||||
## Decisions & Risks
|
||||
- SCANNER-ANALYZERS-LANG-11-001 blocked (2025-11-17): local `dotnet test` hangs/returns empty output; requires clean runner/CI hang diagnostics to complete entrypoint resolver implementation and golden regeneration.
|
||||
|
||||
@@ -8,17 +8,17 @@ This file now only tracks the runtime & signals status snapshot. Active backlog
|
||||
|
||||
| Wave | Guild owners | Shared prerequisites | Status | Notes |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| 140.A Graph | Graph Indexer Guild · Observability Guild | Sprint 120.A – AirGap; Sprint 130.A – Scanner (phase I tracked under `docs/implplan/SPRINT_130_scanner_surface.md`) | TODO | Hold until Scanner surface work emits the analyzer artifacts required for clustering jobs. |
|
||||
| 140.A Graph | Graph Indexer Guild · Observability Guild | Sprint 120.A – AirGap; Sprint 130.A – Scanner (phase I tracked under `docs/implplan/SPRINT_130_scanner_surface.md`) | BLOCKED | Analyzer artifacts ETA from Sprint 130 is overdue (missed 2025-11-13); clustering/backfill waits on ETA or mock payload plan. |
|
||||
| 140.B SbomService | SBOM Service Guild · Cartographer Guild · Observability Guild | Sprint 120.A – AirGap; Sprint 130.A – Scanner | TODO | Projection schema remains blocked on Concelier outputs; keep AirGap parity requirements in scope. |
|
||||
| 140.C Signals | Signals Guild · Authority Guild (for scopes) · Runtime Guild | Sprint 120.A – AirGap; Sprint 130.A – Scanner | DOING | API skeleton and callgraph ingestion are active; runtime facts endpoint still depends on the same shared prerequisites. |
|
||||
| 140.D Zastava | Zastava Observer/Webhook Guilds · Security Guild | Sprint 120.A – AirGap; Sprint 130.A – Scanner | TODO | Surface.FS integration waits on Scanner surface caches; prep sealed-mode env helpers meanwhile. |
|
||||
| 140.C Signals | Signals Guild · Authority Guild (for scopes) · Runtime Guild | Sprint 120.A – AirGap; Sprint 130.A – Scanner | BLOCKED | CAS checklist + provenance appendix overdue; callgraph retrieval live but artifacts not trusted until CAS/signing lands. |
|
||||
| 140.D Zastava | Zastava Observer/Webhook Guilds · Security Guild | Sprint 120.A – AirGap; Sprint 130.A – Scanner | BLOCKED | Surface.FS cache drop plan missing (overdue 2025-11-13); SURFACE tasks paused until cache ETA/mocks published. |
|
||||
|
||||
# Status snapshot (2025-11-13)
|
||||
# Status snapshot (2025-11-18)
|
||||
|
||||
- **140.A Graph** – GRAPH-INDEX-28-007/008/009/010 remain TODO while Scanner surface artifacts and SBOM projection schemas are outstanding; clustering/backfill/fixture scaffolds are staged but cannot progress until analyzer payloads arrive.
|
||||
- **140.A Graph** – GRAPH-INDEX-28-007/008/009/010 are BLOCKED while Sprint 130 analyzer artifacts remain overdue; clustering/backfill/fixture scaffolds stay staged pending ETA or mock payloads.
|
||||
- **140.B SbomService** – Advisory AI, console, and orchestrator tracks stay TODO; SBOM-SERVICE-21-001..004 remain BLOCKED waiting for Concelier Link-Not-Merge (`CONCELIER-GRAPH-21-001`) plus Cartographer schema (`CARTO-GRAPH-21-002`), and AirGap parity must be re-validated once schemas land. Teams are refining projection docs so we can flip to DOING as soon as payloads land.
|
||||
- **140.C Signals** – SIGNALS-24-001 shipped on 2025-11-09; SIGNALS-24-002 is DOING with callgraph retrieval live but CAS promotion + signed manifest tooling still pending; SIGNALS-24-003 is DOING after JSON/NDJSON ingestion merged, yet provenance/context enrichment and runtime feed reconciliation remain in-flight. Scoring/cache work (SIGNALS-24-004/005) stays BLOCKED until runtime uploads publish consistently and scope propagation validation (post `AUTH-SIG-26-001`) completes.
|
||||
- **140.D Zastava** – ZASTAVA-ENV/SECRETS/SURFACE tracks remain TODO because Surface.FS cache outputs from Scanner are still unavailable; guilds continue prepping Surface.Env helper adoption and sealed-mode scaffolding.
|
||||
- **140.C Signals** – SIGNALS-24-001 shipped on 2025-11-09; SIGNALS-24-002 is RED/BLOCKED with CAS promotion + signed manifest tooling pending; SIGNALS-24-003 is DOING but awaits provenance appendix and runtime feed reconciliation. Scoring/cache work (SIGNALS-24-004/005) stays BLOCKED until CAS/provenance and runtime uploads stabilize.
|
||||
- **140.D Zastava** – ZASTAVA-ENV/SECRETS/SURFACE tracks are BLOCKED because Surface.FS cache outputs from Scanner are still unavailable; guilds continue prepping Surface.Env helper adoption and sealed-mode scaffolding while caches are pending.
|
||||
|
||||
## Wave task tracker (refreshed 2025-11-13)
|
||||
|
||||
@@ -26,10 +26,10 @@ This file now only tracks the runtime & signals status snapshot. Active backlog
|
||||
|
||||
| Task ID | State | Notes |
|
||||
| --- | --- | --- |
|
||||
| GRAPH-INDEX-28-007 | TODO | Clustering/centrality jobs queued behind Scanner surface analyzer artifacts; design work complete but implementation held. |
|
||||
| GRAPH-INDEX-28-008 | TODO | Incremental update/backfill pipeline depends on 28-007 artifacts; retry/backoff plumbing sketched but blocked. |
|
||||
| GRAPH-INDEX-28-009 | TODO | Test/fixture/chaos coverage waits on earlier jobs to exist so determinism checks have data. |
|
||||
| GRAPH-INDEX-28-010 | TODO | Packaging/offline bundles paused until upstream graph jobs are available to embed. |
|
||||
| GRAPH-INDEX-28-007 | BLOCKED-w/escalation | Clustering/centrality jobs queued behind overdue Sprint 130 analyzer artifacts; design work complete but implementation held. |
|
||||
| GRAPH-INDEX-28-008 | BLOCKED-w/escalation | Incremental update/backfill pipeline depends on 28-007 artifacts; retry/backoff plumbing sketched but blocked. |
|
||||
| GRAPH-INDEX-28-009 | BLOCKED-w/escalation | Test/fixture/chaos coverage waits on earlier jobs to exist so determinism checks have data. |
|
||||
| GRAPH-INDEX-28-010 | BLOCKED-w/escalation | Packaging/offline bundles paused until upstream graph jobs are available to embed. |
|
||||
|
||||
### 140.B SbomService
|
||||
|
||||
@@ -204,14 +204,14 @@ This file now only tracks the runtime & signals status snapshot. Active backlog
|
||||
| 140.A Graph | `docs/implplan/SPRINT_141_graph.md` (Graph clustering/backfill) and downstream Graph UI overlays | Graph insights, policy overlays, and runtime clustering views cannot progress without GRAPH-INDEX-28-007+ landing. |
|
||||
| 140.B SbomService | `docs/implplan/SPRINT_142_sbomservice.md`, Advisory AI (Sprint 111), Policy/Vuln Explorer feeds | SBOM projections/events stay unavailable, blocking Advisory AI remedation heuristics, policy joins, and Vuln Explorer candidate generation. |
|
||||
| 140.C Signals | `docs/implplan/SPRINT_143_signals.md` plus Runtime/Reachability dashboards | Reachability scoring, cache/event layers, and runtime facts outputs cannot start until SIGNALS-24-001/002 merge and Scanner runtime data flows. |
|
||||
| 140.D Zastava | `docs/implplan/SPRINT_144_zastava.md`, Runtime admission enforcement | Surface-integrated drift/admission hooks remain stalled; sealed-mode env helpers cannot ship without Surface.FS metadata. |
|
||||
| 140.D Zastava | `docs/implplan/SPRINT_0144_0001_0001_zastava_runtime_signals.md`, Runtime admission enforcement | Surface-integrated drift/admission hooks remain stalled; sealed-mode env helpers cannot ship without Surface.FS metadata. |
|
||||
|
||||
# Risk log
|
||||
|
||||
| Risk | Impact | Mitigation / owner |
|
||||
| --- | --- | --- |
|
||||
| Concelier Link-Not-Merge schema slips | SBOM-SERVICE-21-001..004 + Advisory AI SBOM endpoints stay blocked | Concelier + Cartographer guilds to publish CARTO-GRAPH-21-002 ETA during next coordination call; SBOM guild to prep schema doc meanwhile. |
|
||||
| Scanner surface artifact delay | GRAPH-INDEX-28-007+ and ZASTAVA-SURFACE-* cannot even start | Scanner guild to deliver analyzer artifact roadmap; Graph/Zastava teams to prepare mocks/tests in advance. |
|
||||
| Scanner surface artifact delay | GRAPH-INDEX-28-007+ and ZASTAVA-SURFACE-* cannot even start | Scanner guild to deliver analyzer artifact roadmap; Graph/Zastava teams to prepare mocks/tests in advance; escalation sent 2025-11-17. |
|
||||
| Signals host/callgraph merge misses 2025-11-09 | SIGNALS-24-003/004/005 remain blocked, pushing reachability scoring past sprint goals | Signals + Authority guilds to prioritize AUTH-SIG-26-001 review and merge SIGNALS-24-001/002 before 2025-11-10 standup. |
|
||||
| Authority build regression (`PackApprovalFreshAuthWindow`) | Signals test suite cannot run in CI, delaying validation of new endpoints | Coordinate with Authority guild to restore missing constant in `StellaOps.Auth.ServerIntegration`; rerun Signals tests once fixed. |
|
||||
| CAS promotion slips past 2025-11-14 | SIGNALS-24-002 cannot close; reachability scoring has no trusted graph artifacts | Signals + Platform Storage to co-own CAS rollout checklist, escalate blockers during 2025-11-13 runtime sync. |
|
||||
@@ -221,6 +221,7 @@ This file now only tracks the runtime & signals status snapshot. Active backlog
|
||||
|
||||
| Date | Notes |
|
||||
| --- | --- |
|
||||
| 2025-11-17 | Marked Graph/Zastava waves BLOCKED (missing Sprint 130 analyzer ETA); escalated to Scanner leadership per contingency. |
|
||||
| 2025-11-13 | Snapshot, wave tracker, meeting prep, and action items refreshed ahead of Nov 13 checkpoints; awaiting outcomes before flipping statuses. |
|
||||
| 2025-11-11 | Runtime + Signals ran NDJSON ingestion soak test; Authority flagged remaining provenance fields for schema freeze ahead of 2025-11-13 sync. |
|
||||
| 2025-11-09 | Sprint 140 snapshot refreshed; awaiting Scanner surface artifact ETA, Concelier/CARTO schema delivery, and Signals host merge before any wave can advance to DOING. |
|
||||
|
||||
151
docs/implplan/blocked-all.md
Normal file
151
docs/implplan/blocked-all.md
Normal file
@@ -0,0 +1,151 @@
|
||||
# Blocked / dependency-linked tasks (as of 2025-11-17)
|
||||
|
||||
## Decisions to unblock (ordered by blast-radius reduction)
|
||||
1) **Ratify Link-Not-Merge schema** (Concelier + Cartographer) — unblocks Concelier GRAPH-21-001/002, CONCELIER-AIRGAP/CONSOLE/ATTEST, SBOM-SERVICE-21-001..004, SBOM-AIAI-31-002/003, Excititor AIAI chunk/attestation, Graph 140.A, Signals ingest overlays. Options: (A) Freeze current schema with examples and fixtures this week; (B) Publish interim “mock schema” + feature flag while full review completes; (C) Slip one sprint and re-baseline all dependents.
|
||||
2) **Publish Sprint 130 scanner surface artifacts + cache drop ETA** — unblocks GRAPH-INDEX-28-007..010 (Sprint 141), ZASTAVA-SURFACE-01/02 (Sprint 0144), runtime signals 140.D, build/test for Zastava Env/Secrets. Options: (A) Deliver real analyzer caches + hashes; (B) Ship deterministic mock bundle within 24h plus firm delivery date; (C) Declare slip and set new start dates in downstream sprints.
|
||||
3) **Staff MIRROR-CRT-56-001 assembler** — prerequisite for MIRROR-CRT-56/57/58, Exporter OBS-51/54, CLI-AIRGAP-56, PROV-OBS-53, ExportCenter timeline. Options: (A) Assign primary + backup engineer today and start thin bundle; (B) Re-scope to “minimal thin bundle” to unblock EvidenceLocker/ExportCenter first; (C) Escalate staffing if no owner by EOD.
|
||||
4) **Expose SBOM-AIAI-31-001 contract** — required for SBOM-AIAI-31-003, DOCS-AIAI-31-008/009, AIAI-31-008 packaging. Options: (A) Ship production with auth header contract; (B) Provide sandbox/mock endpoint + recorded responses with “beta” label; (C) Slip and re-forecast dependent docs/devops tasks.
|
||||
5) **Ops span sink deployment for Excititor telemetry (31-003)** — gates observability export. Options: (A) Deploy span sink on 2025-11-18; (B) Approve temporary counters/logs-only path until sink is live.
|
||||
6) **Complete CAS checklist + signed manifest rollout (Signals)** — unblocks SIGNALS-24-002 → 24-004/005. Options: (A) Accept current manifest after spot-check; (B) Time-box remediation with risk waiver; (C) Keep RED/BLOCKED and re-plan delivery.
|
||||
7) **Orchestrator ledger export contract** — pre-req for LEDGER-34-101, EvidenceLocker/ExportCenter (160.A/B/C), TimelineIndexer. Options: (A) Ship minimal ledger payload (job_id, capsule_digest, tenant) now; (B) Wait for full capsule envelope from Orchestrator/Notifications and slip dependents; (C) Provide mock export + fixtures for Ledger tests meantime.
|
||||
8) **AdvisoryAI evidence bundle schema freeze (Nov 14 sync slip)** — needed by EvidenceLocker ingest and ExportCenter profiles. Options: (A) Freeze DSSE manifest + payload notes immediately; (B) Provide sample bundle + checksum for contract testing; (C) Move related tasks to BLOCKED-w/escalation with new date.
|
||||
9) **Policy risk export availability** — blocks NOTIFY-RISK-66/67/68. Options: (A) Release minimal read-only profile feed now; (B) Add history metadata with ≤4 day slip; (C) Freeze schema and allow Notifications to mock results.
|
||||
10) **Telemetry SLO webhook schema (TELEMETRY-OBS-50)** — blocks NOTIFY-OBS-51/55. Options: (A) Freeze current draft and hand to Notifications; (B) Provide stub contract + fixtures and allow coding against mocks; (C) Slip and re-baseline notifier tasks.
|
||||
11) **Language analyzer design kickoffs (PHP/Deno/Dart/Swift) & Java 21-008 dependency** — blocks SCANNER-ENG-0010..0014 and SCANNER-ANALYZERS-JAVA-21-008. Options: (A) Run design triage per language this week and staff leads; (B) De-scope to one language per sprint, mark others slipped; (C) Provide interim capability matrix and mock outputs for dependency unlocks.
|
||||
12) **Surface.FS cache/mirror availability** — needed to validate ZASTAVA ENV/SECRETS/SURFACE tasks and unblock SURFACE-01/02 execution. Options: (A) Stand up temporary local cache/mirror in CI; (B) Accept “code complete, unvalidated” with dated follow-up window; (C) Slip validation to align with scanner cache drop.
|
||||
13) **Timeline schema review OBS-52-001** — blocks excititor timeline overlays. Options: (A) Approve current envelope; (B) Add required fields (e.g., provenance buckets) with ≤2 day slip; (C) Provide mock topic for early pipeline tests.
|
||||
14) **SCHED-WORKER-20-301 delivery** — prerequisite for SCHED-WEB-20-002 sim trigger endpoint. Options: (A) Prioritize worker fix to unblock web; (B) Let web mock worker response for integration tests; (C) Re-scope to deliver read-only preview first.
|
||||
15) **PacksRegistry tenancy scaffolding (150.B)** — needed before PacksRegistry work starts. Options: (A) Land orchestrator tenancy scaffolding now; (B) Allow PacksRegistry to target single-tenant mode temporarily; (C) Slip PacksRegistry wave and note in sprint.
|
||||
16) **Authority pack RBAC approvals/log-stream APIs (AUTH-PACKS-43-001)** — blocking Sprint 153 start. Options: (A) Approve current RBAC model; (B) Provide interim token-scoped access; (C) Slip sprint with new date and escalation.
|
||||
17) **Export Center bootstrap (EXPORT-SVC-35-001)** — blocked on upstream Orchestrator/Scheduler telemetry readiness. Options: (A) Provide synthetic telemetry feeds for bootstrap; (B) Start migrations/config in isolation; (C) Slip with dated dependency.
|
||||
18) **Notifications OAS / SDK parity ( → )** — SDK generator blocked on schema. Options: (A) Freeze rules schema; (B) Provide placeholder schema with versioned breaking-change flag; (C) Re-baseline SDK work.
|
||||
|
||||
## SPRINT_0110_0001_0001_ingestion_evidence.md
|
||||
|
||||
- **AIAI-31-008** — Status: BLOCKED (2025-11-16); Depends on: AIAI-31-006/007; DEVOPS-AIAI-31-001; Owners: Advisory AI Guild · DevOps Guild; Notes: Package inference on-prem container, remote toggle, Helm/Compose manifests, scaling/offline guidance.
|
||||
- **SBOM-AIAI-31-003** — Status: BLOCKED (2025-11-16); Depends on: SBOM-AIAI-31-001; CLI-VULN-29-001; CLI-VEX-30-001; Owners: SBOM Service Guild · Advisory AI Guild; Notes: Advisory AI hand-off kit for `/v1/sbom/context`; smoke test with tenants.
|
||||
- **DOCS-AIAI-31-005/006/008/009** — Status: BLOCKED; Depends on: CLI-VULN-29-001; CLI-VEX-30-001; POLICY-ENGINE-31-001; DEVOPS-AIAI-31-001; Owners: Docs Guild; Notes: CLI/policy/ops docs paused pending upstream artefacts.
|
||||
- **CONCELIER-AIRGAP-56-001..58-001** — Status: BLOCKED; Depends on: Link-Not-Merge schema; Evidence Locker contract; Owners: Concelier Core · AirGap Guilds; Notes: Mirror/offline provenance chain.
|
||||
- **CONCELIER-CONSOLE-23-001..003** — Status: BLOCKED; Depends on: Link-Not-Merge schema; Owners: Concelier Console Guild; Notes: Console advisory aggregation/search helpers.
|
||||
- **CONCELIER-ATTEST-73-001/002** — Status: BLOCKED; Depends on: CONCELIER-AIAI-31-002; Evidence Locker contract; Owners: Concelier Core · Evidence Locker Guild; Notes: Attestation inputs + transparency metadata.
|
||||
- **FEEDCONN-ICSCISA-02-012 / KISA-02-008** — Status: BLOCKED; Depends on: Feed owner remediation plan; Owners: Concelier Feed Owners; Notes: Overdue provenance refreshes.
|
||||
- **EXCITITOR-AIAI-31-002** — Status: BLOCKED; Depends on: Link-Not-Merge schema; Evidence Locker contract; Owners: Excititor Web/Core Guilds; Notes: Chunk API for Advisory AI feeds.
|
||||
- **EXCITITOR-AIAI-31-003** — Status: BLOCKED; Depends on: EXCITITOR-AIAI-31-002; Owners: Excititor Observability Guild; Notes: Telemetry gated on chunk API.
|
||||
- **EXCITITOR-AIAI-31-004** — Status: BLOCKED; Depends on: EXCITITOR-AIAI-31-002; Owners: Docs Guild · Excititor Guild; Notes: Chunk API docs.
|
||||
- **EXCITITOR-ATTEST-01-003 / 73-001 / 73-002** — Status: BLOCKED; Depends on: EXCITITOR-AIAI-31-002; Evidence Locker contract; Owners: Excititor Guild · Evidence Locker Guild; Notes: Attestation scope + payloads.
|
||||
- **EXCITITOR-AIRGAP-56/57/58 · CONN-TRUST-01-001** — Status: BLOCKED; Depends on: Link-Not-Merge schema; attestation plan; Owners: Excititor Guild · AirGap Guilds; Notes: Air-gap ingest + connector trust tasks.
|
||||
- **MIRROR-CRT-56-001** — Status: BLOCKED; Depends on: Staffing decision overdue; Owners: Mirror Creator Guild; Notes: Kickoff slipped past 2025-11-15.
|
||||
- **MIRROR-CRT-56-002** — Status: BLOCKED; Depends on: MIRROR-CRT-56-001; PROV-OBS-53-001; Owners: Mirror Creator · Security Guilds; Notes: Needs assembler owner first.
|
||||
- **MIRROR-CRT-57-001/002** — Status: BLOCKED; Depends on: MIRROR-CRT-56-001; AIRGAP-TIME-57-001; Owners: Mirror Creator Guild · AirGap Time Guild; Notes: Waiting on staffing.
|
||||
- **MIRROR-CRT-58-001/002** — Status: BLOCKED; Depends on: MIRROR-CRT-56-001; EXPORT-OBS-54-001; CLI-AIRGAP-56-001; Owners: Mirror Creator · CLI · Exporter Guilds; Notes: Requires assembler staffing + upstream contracts.
|
||||
- **EXPORT-OBS-51-001 / 54-001 · AIRGAP-TIME-57-001 · CLI-AIRGAP-56-001 · PROV-OBS-53-001** — Status: BLOCKED; Depends on: MIRROR-CRT-56-001 ownership; Owners: Exporter Guild · AirGap Time · CLI Guild; Notes: Blocked until assembler staffed.
|
||||
|
||||
## SPRINT_0111_0001_0001_advisoryai.md
|
||||
|
||||
- **DOCS-AIAI-31-008** — Status: BLOCKED (2025-11-03); Depends on: SBOM-AIAI-31-001; Owners: Docs Guild · SBOM Service Guild (`docs`); Notes: Publish `/docs/sbom/remediation-heuristics.md` (feasibility scoring, blast radius).
|
||||
- **DOCS-AIAI-31-009** — Status: BLOCKED (2025-11-03); Depends on: DEVOPS-AIAI-31-001; Owners: Docs Guild · DevOps Guild (`docs`); Notes: Create `/docs/runbooks/assistant-ops.md` for warmup, cache priming, outages, scaling.
|
||||
- **SBOM-AIAI-31-003** — Status: BLOCKED (2025-11-16); Depends on: SBOM-AIAI-31-001; Owners: SBOM Service Guild · Advisory AI Guild (`src/SbomService/StellaOps.SbomService`); Notes: Publish Advisory AI hand-off kit for `/v1/sbom/context`, provide base URL/API key + tenant header contract, run smoke test.
|
||||
- **AIAI-31-008** — Status: BLOCKED (2025-11-16); Depends on: AIAI-31-006/007; DEVOPS-AIAI-31-001; Owners: Advisory AI Guild · DevOps Guild (`src/AdvisoryAI/StellaOps.AdvisoryAI`); Notes: Package inference on-prem container, remote toggle, Helm/Compose manifests, scaling/offline guidance.
|
||||
- **DOCS-AIAI-31-004** — Status: BLOCKED (2025-11-16); Depends on: CONSOLE-VULN-29-001; CONSOLE-VEX-30-001; EXCITITOR-CONSOLE-23-001; Owners: Docs Guild · Console Guild (`docs`); Notes: `/docs/advisory-ai/console.md` screenshots, a11y, copy-as-ticket instructions.
|
||||
- **DOCS-AIAI-31-005** — Status: BLOCKED (2025-11-03); Depends on: CLI-VULN-29-001; CLI-VEX-30-001; AIAI-31-004C; Owners: Docs Guild · CLI Guild (`docs`); Notes: Publish `/docs/advisory-ai/cli.md` covering commands, exit codes, scripting patterns.
|
||||
|
||||
## SPRINT_0112_0001_0001_concelier_i.md
|
||||
|
||||
- **CONCELIER-CONSOLE-23-001** — Status: TODO; Depends on: Blocked by Link-Not-Merge schema; Owners: Concelier WebService Guild · BE-Base Platform Guild; Notes: `/console/advisories` groups linksets with severity/status chips and provenance `{documentId, observationPath}`.
|
||||
|
||||
## SPRINT_0113_0001_0002_concelier_ii.md
|
||||
|
||||
- **CONCELIER-GRAPH-21-001** — Status: BLOCKED (2025-10-27); Depends on: Waiting for Link-Not-Merge schema finalization; Owners: Concelier Core Guild · Cartographer Guild (`src/Concelier/__Libraries/StellaOps.Concelier.Core`); Notes: Extend SBOM normalization so relationships/scopes are stored as raw observation metadata with provenance pointers for graph joins.
|
||||
- **CONCELIER-GRAPH-21-002** — Status: BLOCKED (2025-10-27); Depends on: Depends on 21-001; Owners: Concelier Core Guild · Scheduler Guild (`src/Concelier/__Libraries/StellaOps.Concelier.Core`); Notes: Publish `sbom.observation.updated` events with tenant/context and advisory refs; facts only, no judgments.
|
||||
|
||||
## SPRINT_0119_0001_0001_excititor_i.md
|
||||
|
||||
- **EXCITITOR-AIRGAP-57-001** — Status: TODO; Depends on: Blocked on 56-001; define sealed-mode errors.; Owners: Excititor Core Guild · AirGap Policy Guild; Notes: Enforce sealed-mode policies, remediation errors, and staleness annotations surfaced to Advisory AI.
|
||||
- **EXCITITOR-ATTEST-73-001** — Status: DONE (2025-11-17); Depends on: Unblocked by 01-003; implement payload records.; Owners: Excititor Core · Attestation Payloads Guild; Notes: Emit attestation payloads capturing supplier identity, justification summary, and scope metadata for trust chaining.
|
||||
- **Connector provenance schema review (Connectors + Security Guilds)** — Status: Approve signer fingerprint + issuer tier schema for CONN-TRUST-01-001.; Depends on: If schema not ready, keep task blocked and request interim metadata list from connectors.; Owners: ; Notes:
|
||||
- **Attestation verifier rehearsal (Excititor Attestation Guild)** — Status: Demo `IVexAttestationVerifier` harness + diagnostics to unblock 73-* tasks.; Depends on: If issues persist, log BLOCKED status in attestation plan and re-forecast completion.; Owners: ; Notes:
|
||||
- **Observability span sink deploy (Ops/Signals Guild)** — Status: Enable telemetry pipeline needed for 31-003.; Depends on: If deploy slips, implement temporary counters/logs and keep action tracker flagged as blocked.; Owners: ; Notes:
|
||||
|
||||
## SPRINT_0119_0001_0002_excititor_ii.md
|
||||
|
||||
- **EXCITITOR-CORE-AOC-19-003** — Status: TODO; Depends on: Blocked on 19-002; design supersede chains.; Owners: Excititor Core Guild; Notes: Enforce uniqueness + append-only versioning of raw VEX docs.
|
||||
- **EXCITITOR-GRAPH-21-001** — Status: BLOCKED (2025-10-27); Depends on: Needs Cartographer API contract + data availability.; Owners: Excititor Core · Cartographer Guild; Notes: Batched VEX/advisory reference fetches by PURL for inspector linkouts.
|
||||
- **EXCITITOR-GRAPH-21-002** — Status: BLOCKED (2025-10-27); Depends on: Blocked on 21-001.; Owners: Excititor Core Guild; Notes: Overlay metadata includes justification summaries + versions; fixtures/tests.
|
||||
- **EXCITITOR-GRAPH-21-005** — Status: BLOCKED (2025-10-27); Depends on: Blocked on 21-002.; Owners: Excititor Storage Guild; Notes: Indexes/materialized views for VEX lookups by PURL/policy for inspector perf.
|
||||
- **Cartographer schema sync** — Status: Unblock GRAPH-21-* inspector/linkout contracts.; Depends on: Maintain BLOCKED status; deliver sample payloads for early testing.; Owners: ; Notes:
|
||||
|
||||
## SPRINT_0119_0001_0004_excititor_iv.md
|
||||
|
||||
- **Timeline schema review** — Status: Approve OBS-52-001 event envelope.; Depends on: Iterate with provisional event topic if blocked.; Owners: ; Notes:
|
||||
|
||||
## SPRINT_0120_0000_0001_policy_reasoning.md
|
||||
|
||||
- **LEDGER-34-101** — Status: BLOCKED; Depends on: Orchestrator ledger export contract (Sprint 150.A) pending; Owners: Findings Ledger Guild / `src/Findings/StellaOps.Findings.Ledger`; Notes: Link orchestrator run ledger exports into Findings Ledger provenance chain, index by artifact hash, and expose audit queries.
|
||||
- **LEDGER-AIRGAP-56-001** — Status: BLOCKED; Depends on: Mirror bundle schema freeze; Owners: Findings Ledger Guild / `src/Findings/StellaOps.Findings.Ledger`; Notes: Record bundle provenance (`bundle_id`, `merkle_root`, `time_anchor`) on ledger events for advisories/VEX/policies imported via Mirror Bundles.
|
||||
- **LEDGER-AIRGAP-56-002** — Status: BLOCKED; Depends on: Waits on LEDGER-AIRGAP-56-001 schema freeze; Owners: Findings Ledger Guild, AirGap Time Guild / `src/Findings/StellaOps.Findings.Ledger`; Notes: Surface staleness metrics for findings and block risk-critical exports when stale beyond thresholds; provide remediation messaging.
|
||||
- **LEDGER-AIRGAP-57-001** — Status: BLOCKED; Depends on: Waits on LEDGER-AIRGAP-56-002; Owners: Findings Ledger Guild, Evidence Locker Guild / `src/Findings/StellaOps.Findings.Ledger`; Notes: Link findings evidence snapshots to portable evidence bundles and ensure cross-enclave verification works.
|
||||
- **LEDGER-AIRGAP-58-001** — Status: BLOCKED; Depends on: Waits on LEDGER-AIRGAP-57-001; Owners: Findings Ledger Guild, AirGap Controller Guild / `src/Findings/StellaOps.Findings.Ledger`; Notes: Emit timeline events for bundle import impacts (new findings, remediation changes) with sealed-mode context.
|
||||
- **LEDGER-ATTEST-73-001** — Status: BLOCKED; Depends on: Attestation pointer schema alignment with NOTIFY-ATTEST-74-001; Owners: Findings Ledger Guild, Attestor Service Guild / `src/Findings/StellaOps.Findings.Ledger`; Notes: Persist pointers from findings to verification reports and attestation envelopes for explainability.
|
||||
|
||||
## SPRINT_0138_0000_0001_scanner_ruby_parity.md
|
||||
|
||||
- **SCANNER-ENG-0010** — Status: BLOCKED; Depends on: Await composer/autoload graph design + staffing; no PHP analyzer scaffolding exists yet.; Owners: PHP Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Php`); Notes: Ship the PHP analyzer pipeline (composer lock, autoload graph, capability signals) to close comparison gaps.
|
||||
- **SCANNER-ENG-0011** — Status: BLOCKED; Depends on: Needs Deno runtime analyzer scope + lockfile/import graph design; pending competitive review.; Owners: Language Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno`); Notes: Scope the Deno runtime analyzer (lockfile resolver, import graphs) beyond Sprint 130 coverage.
|
||||
- **SCANNER-ENG-0012** — Status: BLOCKED; Depends on: Define Dart analyzer requirements (pubspec parsing, AOT artifacts) and split into tasks.; Owners: Language Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart`); Notes: Evaluate Dart analyzer requirements (pubspec parsing, AOT artifacts) and split implementation tasks.
|
||||
- **SCANNER-ENG-0013** — Status: BLOCKED; Depends on: Draft SwiftPM coverage plan; align policy hooks; awaiting design kick-off.; Owners: Swift Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Native`); Notes: Plan Swift Package Manager coverage (Package.resolved, xcframeworks, runtime hints) with policy hooks.
|
||||
- **SCANNER-ENG-0014** — Status: BLOCKED; Depends on: Needs joint roadmap with Zastava/Runtime guilds for Kubernetes/VM alignment.; Owners: Runtime Guild, Zastava Guild (`docs/modules/scanner`); Notes: Align Kubernetes/VM target coverage between Scanner and Zastava per comparison findings; publish joint roadmap.
|
||||
|
||||
## SPRINT_0144_0001_0001_zastava_runtime_signals.md
|
||||
|
||||
- **ZASTAVA-ENV-01** — Status: BLOCKED-w/escalation; Depends on: Code landed; execution wait on Surface.FS cache plan + package mirrors to validate.; Owners: Zastava Observer Guild (src/Zastava/StellaOps.Zastava.Observer); Notes: Adopt Surface.Env helpers for cache endpoints, secret refs, and feature toggles.
|
||||
- **ZASTAVA-ENV-02** — Status: BLOCKED-w/escalation; Depends on: Code landed; validation blocked on Surface.FS cache availability/mirrors.; Owners: Zastava Webhook Guild (src/Zastava/StellaOps.Zastava.Webhook); Notes: Switch to Surface.Env helpers for webhook configuration (cache endpoint, secret refs, feature toggles).
|
||||
- **ZASTAVA-SECRETS-01** — Status: BLOCKED-w/escalation; Depends on: Code landed; requires cache/nuget mirrors to execute tests.; Owners: Zastava Observer Guild, Security Guild (src/Zastava/StellaOps.Zastava.Observer); Notes: Retrieve CAS/attestation access via Surface.Secrets instead of inline secret stores.
|
||||
- **ZASTAVA-SECRETS-02** — Status: BLOCKED-w/escalation; Depends on: Code landed; waiting on same cache/mirror prerequisites for validation.; Owners: Zastava Webhook Guild, Security Guild (src/Zastava/StellaOps.Zastava.Webhook); Notes: Retrieve attestation verification secrets via Surface.Secrets.
|
||||
- **ZASTAVA-SURFACE-01** — Status: BLOCKED-w/escalation; Depends on: Code landed; blocked on Sprint 130 analyzer artifact/cache drop and local gRPC mirrors to run tests.; Owners: Zastava Observer Guild (src/Zastava/StellaOps.Zastava.Observer); Notes: Integrate Surface.FS client for runtime drift detection (lookup cached layer hashes/entry traces).
|
||||
- **ZASTAVA-SURFACE-02** — Status: BLOCKED-w/escalation; Depends on: Depends on SURFACE-01 validation; blocked on Surface.FS cache drop.; Owners: Zastava Webhook Guild (src/Zastava/StellaOps.Zastava.Webhook); Notes: Enforce Surface.FS availability during admission (deny when cache missing/stale) and embed pointer checks in webhook response.
|
||||
|
||||
## SPRINT_123_policy_reasoning.md
|
||||
|
||||
- **POLICY-AIRGAP-57-001** — Status: TODO; Depends on: Enforce sealed-mode guardrails in evaluation (no outbound fetch), surface `AIRGAP_EGRESS_BLOCKED` errors with remediation (Deps: POLICY-AIRGAP-56-002); Owners: Policy Guild, AirGap Policy Guild / src/Policy/StellaOps.Policy.Engine; Notes:
|
||||
|
||||
## SPRINT_124_policy_reasoning.md
|
||||
|
||||
- **POLICY-ENGINE-20-002** — Status: BLOCKED (2025-10-26); Depends on: Build deterministic evaluator honoring lexical/priority order, first-match semantics, and safe value types (no wall-clock/network access); Owners: Policy Guild / src/Policy/StellaOps.Policy.Engine; Notes:
|
||||
|
||||
## SPRINT_125_mirror.md
|
||||
|
||||
- **Mirror Creator Guild · Exporter Guild** — Status: 2025-11-15 kickoff; Depends on: Without an owner the assembler cannot start and all downstream tasks remain blocked.; Owners: ; Notes:
|
||||
|
||||
## SPRINT_140_runtime_signals.md
|
||||
|
||||
- **Graph Indexer Guild · Observability Guild** — Status: Sprint 120.A – AirGap; Sprint 130.A – Scanner (phase I tracked under `docs/implplan/SPRINT_130_scanner_surface.md`); Depends on: BLOCKED; Owners: Analyzer artifact ETA from Sprint 130 is overdue (sync 2025-11-13); GRAPH-INDEX-28-007+ cannot start without it.; Notes:
|
||||
- **Zastava Observer/Webhook Guilds · Security Guild** — Status: Sprint 120.A – AirGap; Sprint 130.A – Scanner; Depends on: BLOCKED; Owners: Surface.FS cache drop plan still missing (overdue from 2025-11-13 sync); SURFACE tasks cannot start.; Notes:
|
||||
- **OVERDUE** — Status: Analyzer artifact publication schedule not published after 2025-11-13 sync; Graph/Zastava blocked awaiting ETA or mock payloads.; Depends on: Scanner Guild · Graph Indexer Guild · Zastava Guilds; Owners: ; Notes:
|
||||
- **GRAPH-INDEX-28-007** — Status: BLOCKED; Depends on: Sprint 130 analyzer artifacts ETA overdue (missed 2025-11-13 sync); proceed once cache manifests land or mocks are provided.; Owners: Graph Indexer Guild · Observability Guild; Notes: Clustering/centrality jobs staged for execution.
|
||||
- **GRAPH-INDEX-28-008** — Status: BLOCKED; Depends on: Depends on 28-007 artifacts; blocked until analyzer payloads available.; Owners: Graph Indexer Guild; Notes: Retry/backoff plumbing sketched but blocked.
|
||||
- **GRAPH-INDEX-28-009** — Status: BLOCKED; Depends on: Upstream graph job data unavailable while 28-007 is blocked.; Owners: Graph Indexer Guild; Notes: Test/fixture/chaos coverage for graph jobs.
|
||||
- **GRAPH-INDEX-28-010** — Status: BLOCKED; Depends on: Requires outputs from blocked graph jobs to bundle offline artifacts.; Owners: Graph Indexer Guild; Notes: Packaging/offline bundles for graph jobs.
|
||||
- **SBOM-SERVICE-21-001** — Status: BLOCKED; Depends on: Concelier Link-Not-Merge (`CONCELIER-GRAPH-21-001`) not delivered.; Owners: SBOM Service Guild · Concelier Core · Cartographer Guild; Notes: Normalized SBOM projection schema.
|
||||
- **SBOM-SERVICE-21-002** — Status: BLOCKED; Depends on: Waits on 21-001 contract + event outputs.; Owners: SBOM Service Guild; Notes: SBOM change events.
|
||||
- **SBOM-SERVICE-21-003** — Status: BLOCKED; Depends on: Depends on 21-002 event payloads.; Owners: SBOM Service Guild; Notes: Entry point/service node management.
|
||||
- **SBOM-SERVICE-21-004** — Status: BLOCKED; Depends on: Follows projection + event pipelines.; Owners: SBOM Service Guild; Notes: Observability wiring for SBOM service.
|
||||
- **SIGNALS-24-004** — Status: BLOCKED (2025-10-27); Depends on: Wait for 24-002/003 completion and Authority scope validation.; Owners: Signals Guild; Notes: Reachability scoring.
|
||||
- **SIGNALS-24-005** — Status: BLOCKED (2025-10-27); Depends on: Depends on scoring outputs (24-004).; Owners: Signals Guild; Notes: Cache + `signals.fact.updated` events.
|
||||
- **ZASTAVA-SURFACE-01** — Status: BLOCKED; Depends on: Requires Scanner layer metadata + cache drop ETA (overdue).; Owners: Zastava Guilds · Scanner Guild; Notes: Surface.FS client integration with tests.
|
||||
- **ZASTAVA-SURFACE-02** — Status: BLOCKED; Depends on: Depends on SURFACE-01; blocked while cache plan is missing.; Owners: Zastava Guilds; Notes: Admission enforcement using Surface.FS caches.
|
||||
- **2025-11-13 (overdue)** — Status: TODO; Depends on: Scanner to publish Sprint 130 surface roadmap; Graph/Zastava blocked until then.; Owners: ; Notes:
|
||||
- **2025-11-14 (overdue)** — Status: BLOCKED; Depends on: Requires `CONCELIER-GRAPH-21-001` + `CARTO-GRAPH-21-002` agreement; AirGap review scheduled after sign-off.; Owners: ; Notes:
|
||||
- **Marked Graph/Zastava waves BLOCKED; escalation sent to Scanner leadership per contingency.** — Status: Await ETA or mock payload commitment; if none by 2025-11-18, log new target date and adjust downstream start dates; move impacted tasks to BLOCKED-with-escalation in downstream sprints.; Depends on: Graph Guild · Zastava Guilds · Scanner Guild; Owners: ; Notes:
|
||||
- **Overdue** — Status: Publish analyzer artifact ETA or mark GRAPH-INDEX-28-007 as BLOCKED with mock data plan.; Depends on: Scanner Guild · Graph Indexer Guild; Owners: 2025-11-16 (overdue); Notes:
|
||||
- **Overdue** — Status: Record whether Link-Not-Merge schema was ratified; if not, set SBOM-SERVICE-21-001..004 to BLOCKED with new ETA.; Depends on: Concelier Core · Cartographer Guild · SBOM Service Guild · AirGap Guild; Owners: 2025-11-16 (overdue); Notes:
|
||||
|
||||
## SPRINT_160_export_evidence.md
|
||||
|
||||
- **Evidence Locker Guild · Security Guild · Docs Guild** — Status: Sprint 110.A – AdvisoryAI; Sprint 120.A – AirGap; Sprint 130.A – Scanner; Sprint 150.A – Orchestrator; Depends on: BLOCKED (2025-11-12); Owners: Waiting for orchestrator capsule data and AdvisoryAI evidence bundles to stabilize before wiring ingestion APIs.; Notes:
|
||||
- **Exporter Service Guild · Mirror Creator Guild · DevOps Guild** — Status: Sprint 110.A – AdvisoryAI; Sprint 120.A – AirGap; Sprint 130.A – Scanner; Sprint 150.A – Orchestrator; Depends on: BLOCKED (2025-11-12); Owners: Profiles can begin once EvidenceLocker contracts are published; keep DSSE/attestation specs ready.; Notes:
|
||||
- **Timeline Indexer Guild · Evidence Locker Guild · Security Guild** — Status: Sprint 110.A – AdvisoryAI; Sprint 120.A – AirGap; Sprint 130.A – Scanner; Sprint 150.A – Orchestrator; Depends on: BLOCKED (2025-11-12); Owners: Postgres/RLS scaffolding drafted; hold for event schemas from orchestrator/notifications.; Notes:
|
||||
- **AdvisoryAI stand-up (AdvisoryAI Guild)** — Status: Freeze evidence bundle schema + payload notes so EvidenceLocker can finalize DSSE manifests (blocked).; Depends on: If schema slips, log BLOCKED status in Sprint 110 tracker and re-evaluate at 2025-11-18 review.; Owners: ; Notes:
|
||||
- **Orchestrator + Notifications schema handoff (Orchestrator Service + Notifications Guilds)** — Status: Publish capsule envelopes & notification contracts required by EvidenceLocker ingest, ExportCenter notifications, TimelineIndexer ordering (blocked).; Depends on: If envelopes not ready, escalate to Wave 150/140 leads and leave blockers noted here; defer DOING flips.; Owners: ; Notes:
|
||||
- **Sovereign crypto readiness review (Security Guild + Evidence/Export teams)** — Status: Validate `ICryptoProviderRegistry` wiring plan for `EVID-CRYPTO-90-001` & `EXPORT-CRYPTO-90-001`; green-light sovereign modes (blocked).; Depends on: If gating issues remain, file action items in Security board and hold related sprint tasks in TODO.; Owners: ; Notes:
|
||||
- **DevPortal Offline CLI dry run (DevPortal Offline + AirGap Controller Guilds)** — Status: Demo `stella devportal verify bundle.tgz` using sample manifest to prove readiness once EvidenceLocker spec lands (blocked awaiting schema).; Depends on: If CLI not ready, update DVOFF-64-002 description with new ETA and note risk in Sprint 162 doc.; Owners: ; Notes:
|
||||
- **160.A, 160.B, 160.C** — Status: High; Depends on: Escalate to Wave 150/140 leads, record BLOCKED status in both sprint docs, and schedule daily schema stand-ups until envelopes land.; Owners: ; Notes:
|
||||
@@ -33,8 +33,8 @@
|
||||
| 24-004 | BLOCKED | 2025-10-27 | SPRINT_140_runtime_signals | Signals Guild | src/Signals/StellaOps.Signals | Authority scopes + 24-003 | Authority scopes + 24-003 | SGSI0101 |
|
||||
| 24-005 | BLOCKED | 2025-10-27 | SPRINT_140_runtime_signals | Signals Guild | src/Signals/StellaOps.Signals | 24-004 scoring outputs | 24-004 scoring outputs | SGSI0101 |
|
||||
| 29-007 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild · Observability Guild | src/Findings/StellaOps.Findings.Ledger | LEDGER-29-006 | LEDGER-29-006 | PLLG0104 |
|
||||
| 29-008 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild · QA Guild | src/Findings/StellaOps.Findings.Ledger | 29-007 | LEDGER-29-007 | PLLG0104 |
|
||||
| 29-009 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild · DevOps Guild | src/Findings/StellaOps.Findings.Ledger | 29-008 | LEDGER-29-008 | PLLG0104 |
|
||||
| 29-008 | DONE | 2025-11-17 | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild · QA Guild | src/Findings/StellaOps.Findings.Ledger | 29-007 | LEDGER-29-007 | PLLG0104 |
|
||||
| 29-009 | BLOCKED | 2025-11-17 | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild · DevOps Guild | src/Findings/StellaOps.Findings.Ledger | 29-008 | LEDGER-29-008 | PLLG0104 |
|
||||
| 30-001 | TODO | | SPRINT_129_policy_reasoning | VEX Lens Guild | src/VexLens/StellaOps.VexLens | — | — | PLVL0102 |
|
||||
| 30-002 | TODO | | SPRINT_129_policy_reasoning | VEX Lens Guild | src/VexLens/StellaOps.VexLens | VEXLENS-30-001 | VEXLENS-30-001 | PLVL0102 |
|
||||
| 30-003 | TODO | | SPRINT_129_policy_reasoning | VEX Lens Guild · Issuer Directory Guild | src/VexLens/StellaOps.VexLens | VEXLENS-30-002 | VEXLENS-30-002 | PLVL0102 |
|
||||
@@ -1144,9 +1144,9 @@
|
||||
| KMS-73-001 | DONE (2025-11-03) | 2025-11-03 | SPRINT_100_identity_signing | KMS Guild (src/__Libraries/StellaOps.Cryptography.Kms) | src/__Libraries/StellaOps.Cryptography.Kms | AWS/GCP KMS drivers landed with digest-first signing, metadata caching, config samples, and docs/tests green. | AWS/GCP KMS drivers landed with digest-first signing, metadata caching, config samples, and docs/tests green. | KMSI0102 |
|
||||
| KMS-73-002 | DONE (2025-11-03) | 2025-11-03 | SPRINT_100_identity_signing | KMS Guild (src/__Libraries/StellaOps.Cryptography.Kms) | src/__Libraries/StellaOps.Cryptography.Kms | PKCS#11 + FIDO2 drivers shipped (deterministic digesting, authenticator factories, DI extensions) with docs + xUnit fakes covering sign/verify/export flows. | FIDO2 | KMSI0102 |
|
||||
| LATTICE-401-023 | TODO | | SPRINT_401_reachability_evidence_chain | Scanner Guild · Policy Guild | `docs/reachability/lattice.md`, `docs/modules/scanner/architecture.md`, `src/Scanner/StellaOps.Scanner.WebService` | Update reachability/lattice docs + examples. | GRSC0101 & RBRE0101 | LEDG0101 |
|
||||
| LEDGER-29-007 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild (`src/Findings/StellaOps.Findings.Ledger`) | src/Findings/StellaOps.Findings.Ledger | Instrument metrics | LEDGER-29-006 | PLLG0101 |
|
||||
| LEDGER-29-008 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger + QA Guild | src/Findings/StellaOps.Findings.Ledger | Develop unit/property/integration tests, replay/restore tooling, determinism harness, and load tests at 5M findings/tenant | LEDGER-29-007 | PLLG0101 |
|
||||
| LEDGER-29-009 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger + DevOps Guild | src/Findings/StellaOps.Findings.Ledger | Provide deployment manifests | LEDGER-29-008 | PLLG0101 |
|
||||
| LEDGER-29-007 | DONE | 2025-11-17 | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild (`src/Findings/StellaOps.Findings.Ledger`) | src/Findings/StellaOps.Findings.Ledger | Instrument metrics | LEDGER-29-006 | PLLG0101 |
|
||||
| LEDGER-29-008 | BLOCKED | 2025-11-17 | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger + QA Guild | src/Findings/StellaOps.Findings.Ledger | Develop unit/property/integration tests, replay/restore tooling, determinism harness, and load tests at 5M findings/tenant | LEDGER-29-007 | PLLG0101 |
|
||||
| LEDGER-29-009 | BLOCKED | 2025-11-17 | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger + DevOps Guild | src/Findings/StellaOps.Findings.Ledger | Provide deployment manifests | LEDGER-29-008 | PLLG0101 |
|
||||
| LEDGER-34-101 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild | src/Findings/StellaOps.Findings.Ledger | Link orchestrator run ledger exports into Findings Ledger provenance chain, index by artifact hash, and expose audit queries | LEDGER-29-009 | PLLG0101 |
|
||||
| LEDGER-AIRGAP-56 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger + AirGap Guilds | | AirGap ledger schema. | PLLG0102 | PLLG0102 |
|
||||
| LEDGER-AIRGAP-56-001 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild | src/Findings/StellaOps.Findings.Ledger | Record bundle provenance (`bundle_id`, `merkle_root`, `time_anchor`) on ledger events for advisories/VEX/policies imported via Mirror Bundles | LEDGER-AIRGAP-56 | PLLG0102 |
|
||||
@@ -2253,8 +2253,8 @@
|
||||
| 24-004 | BLOCKED | 2025-10-27 | SPRINT_140_runtime_signals | Signals Guild | src/Signals/StellaOps.Signals | Authority scopes + 24-003 | Authority scopes + 24-003 | SGSI0101 |
|
||||
| 24-005 | BLOCKED | 2025-10-27 | SPRINT_140_runtime_signals | Signals Guild | src/Signals/StellaOps.Signals | 24-004 scoring outputs | 24-004 scoring outputs | SGSI0101 |
|
||||
| 29-007 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild · Observability Guild | src/Findings/StellaOps.Findings.Ledger | LEDGER-29-006 | LEDGER-29-006 | PLLG0104 |
|
||||
| 29-008 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild · QA Guild | src/Findings/StellaOps.Findings.Ledger | 29-007 | LEDGER-29-007 | PLLG0104 |
|
||||
| 29-009 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild · DevOps Guild | src/Findings/StellaOps.Findings.Ledger | 29-008 | LEDGER-29-008 | PLLG0104 |
|
||||
| 29-008 | DONE | 2025-11-17 | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild · QA Guild | src/Findings/StellaOps.Findings.Ledger | 29-007 | LEDGER-29-007 | PLLG0104 |
|
||||
| 29-009 | BLOCKED | 2025-11-17 | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild · DevOps Guild | src/Findings/StellaOps.Findings.Ledger | 29-008 | LEDGER-29-008 | PLLG0104 |
|
||||
| 30-001 | TODO | | SPRINT_129_policy_reasoning | VEX Lens Guild | src/VexLens/StellaOps.VexLens | — | — | PLVL0102 |
|
||||
| 30-002 | TODO | | SPRINT_129_policy_reasoning | VEX Lens Guild | src/VexLens/StellaOps.VexLens | VEXLENS-30-001 | VEXLENS-30-001 | PLVL0102 |
|
||||
| 30-003 | TODO | | SPRINT_129_policy_reasoning | VEX Lens Guild · Issuer Directory Guild | src/VexLens/StellaOps.VexLens | VEXLENS-30-002 | VEXLENS-30-002 | PLVL0102 |
|
||||
@@ -3365,9 +3365,9 @@
|
||||
| KMS-73-001 | DONE (2025-11-03) | 2025-11-03 | SPRINT_100_identity_signing | KMS Guild (src/__Libraries/StellaOps.Cryptography.Kms) | src/__Libraries/StellaOps.Cryptography.Kms | AWS/GCP KMS drivers landed with digest-first signing, metadata caching, config samples, and docs/tests green. | AWS/GCP KMS drivers landed with digest-first signing, metadata caching, config samples, and docs/tests green. | KMSI0102 |
|
||||
| KMS-73-002 | DONE (2025-11-03) | 2025-11-03 | SPRINT_100_identity_signing | KMS Guild (src/__Libraries/StellaOps.Cryptography.Kms) | src/__Libraries/StellaOps.Cryptography.Kms | PKCS#11 + FIDO2 drivers shipped (deterministic digesting, authenticator factories, DI extensions) with docs + xUnit fakes covering sign/verify/export flows. | FIDO2 | KMSI0102 |
|
||||
| LATTICE-401-023 | TODO | | SPRINT_401_reachability_evidence_chain | Scanner Guild · Policy Guild | `docs/reachability/lattice.md`, `docs/modules/scanner/architecture.md`, `src/Scanner/StellaOps.Scanner.WebService` | Update reachability/lattice docs + examples. | GRSC0101 & RBRE0101 | LEDG0101 |
|
||||
| LEDGER-29-007 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild (`src/Findings/StellaOps.Findings.Ledger`) | src/Findings/StellaOps.Findings.Ledger | Instrument metrics | LEDGER-29-006 | PLLG0101 |
|
||||
| LEDGER-29-008 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger + QA Guild | src/Findings/StellaOps.Findings.Ledger | Develop unit/property/integration tests, replay/restore tooling, determinism harness, and load tests at 5M findings/tenant | LEDGER-29-007 | PLLG0101 |
|
||||
| LEDGER-29-009 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger + DevOps Guild | src/Findings/StellaOps.Findings.Ledger | Provide deployment manifests | LEDGER-29-008 | PLLG0101 |
|
||||
| LEDGER-29-007 | DONE | 2025-11-17 | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild (`src/Findings/StellaOps.Findings.Ledger`) | src/Findings/StellaOps.Findings.Ledger | Instrument metrics | LEDGER-29-006 | PLLG0101 |
|
||||
| LEDGER-29-008 | BLOCKED | 2025-11-17 | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger + QA Guild | src/Findings/StellaOps.Findings.Ledger | Develop unit/property/integration tests, replay/restore tooling, determinism harness, and load tests at 5M findings/tenant | LEDGER-29-007 | PLLG0101 |
|
||||
| LEDGER-29-009 | BLOCKED | 2025-11-17 | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger + DevOps Guild | src/Findings/StellaOps.Findings.Ledger | Provide deployment manifests | LEDGER-29-008 | PLLG0101 |
|
||||
| LEDGER-34-101 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild | src/Findings/StellaOps.Findings.Ledger | Link orchestrator run ledger exports into Findings Ledger provenance chain, index by artifact hash, and expose audit queries | LEDGER-29-009 | PLLG0101 |
|
||||
| LEDGER-AIRGAP-56 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger + AirGap Guilds | | AirGap ledger schema. | PLLG0102 | PLLG0102 |
|
||||
| LEDGER-AIRGAP-56-001 | TODO | | SPRINT_0120_0000_0001_policy_reasoning | Findings Ledger Guild | src/Findings/StellaOps.Findings.Ledger | Record bundle provenance (`bundle_id`, `merkle_root`, `time_anchor`) on ledger events for advisories/VEX/policies imported via Mirror Bundles | LEDGER-AIRGAP-56 | PLLG0102 |
|
||||
|
||||
@@ -519,3 +519,27 @@ The Attestor response prints verification status, Rekor UUID (when available), a
|
||||
---
|
||||
|
||||
*Last updated: 2025-11-05 (Sprint 101).*
|
||||
|
||||
## 3 · `stella scan entrytrace --stream-ndjson`
|
||||
|
||||
### 3.1 Synopsis
|
||||
```bash
|
||||
stella scan entrytrace \
|
||||
--scan-id <scanId> \
|
||||
[--stream-ndjson] \
|
||||
[--include-ndjson] \
|
||||
[--verbose]
|
||||
```
|
||||
|
||||
### 3.2 Description
|
||||
Streams the EntryTrace NDJSON produced by a completed scan. When `--stream-ndjson` is set the CLI sends `Accept: application/x-ndjson` and writes the raw lines to stdout in order, suitable for piping into AOC/ETL tools. Without the flag, the command returns the JSON envelope (`scanId`, `imageDigest`, graph, NDJSON array) and optionally prints NDJSON when `--include-ndjson` is set.
|
||||
|
||||
### 3.3 Examples
|
||||
- Stream raw NDJSON for further processing:
|
||||
```bash
|
||||
stella scan entrytrace --scan-id scan-123 --stream-ndjson > entrytrace.ndjson
|
||||
```
|
||||
- Retrieve JSON envelope (default behaviour):
|
||||
```bash
|
||||
stella scan entrytrace --scan-id scan-123
|
||||
```
|
||||
|
||||
125
docs/modules/concelier/link-not-merge-schema.md
Normal file
125
docs/modules/concelier/link-not-merge-schema.md
Normal file
@@ -0,0 +1,125 @@
|
||||
# Link-Not-Merge (LNM) Observation & Linkset Schema
|
||||
|
||||
_Draft for approval — authored 2025-11-16 to unblock CONCELIER-LNM tracks._
|
||||
|
||||
## Goals
|
||||
- Immutable storage of raw advisory observations per source/tenant.
|
||||
- Deterministic linksets built from observations without merging or mutating originals.
|
||||
- Stable across online/offline deployments; replayable from raw inputs.
|
||||
|
||||
## Observation document (Mongo JSON Schema excerpt)
|
||||
```json
|
||||
{
|
||||
"bsonType": "object",
|
||||
"required": ["_id","tenantId","source","advisoryId","affected","provenance","ingestedAt"],
|
||||
"properties": {
|
||||
"_id": {"bsonType": "objectId"},
|
||||
"tenantId": {"bsonType": "string"},
|
||||
"source": {"bsonType": "string", "description": "Adapter id, e.g., ghsa, nvd, cert-bund"},
|
||||
"advisoryId": {"bsonType": "string"},
|
||||
"title": {"bsonType": "string"},
|
||||
"summary": {"bsonType": "string"},
|
||||
"severities": {
|
||||
"bsonType": "array",
|
||||
"items": {"bsonType": "object", "required": ["system","score"],
|
||||
"properties": {"system":{"bsonType":"string"},"score":{"bsonType":"double"},"vector":{"bsonType":"string"}}}
|
||||
},
|
||||
"affected": {
|
||||
"bsonType": "array",
|
||||
"items": {"bsonType":"object","required":["purl"],
|
||||
"properties": {
|
||||
"purl": {"bsonType":"string"},
|
||||
"package": {"bsonType":"string"},
|
||||
"versions": {"bsonType":"array","items":{"bsonType":"string"}},
|
||||
"ranges": {"bsonType":"array","items":{"bsonType":"object",
|
||||
"required":["type","events"],
|
||||
"properties": {"type":{"bsonType":"string"},"events":{"bsonType":"array","items":{"bsonType":"object"}}}}},
|
||||
"ecosystem": {"bsonType":"string"},
|
||||
"cpe": {"bsonType":"array","items":{"bsonType":"string"}},
|
||||
"cpes": {"bsonType":"array","items":{"bsonType":"string"}}
|
||||
}
|
||||
}
|
||||
},
|
||||
"references": {"bsonType": "array", "items": {"bsonType":"string"}},
|
||||
"weaknesses": {"bsonType":"array","items":{"bsonType":"string"}},
|
||||
"published": {"bsonType": "date"},
|
||||
"modified": {"bsonType": "date"},
|
||||
"provenance": {
|
||||
"bsonType": "object",
|
||||
"required": ["sourceArtifactSha","fetchedAt"],
|
||||
"properties": {
|
||||
"sourceArtifactSha": {"bsonType":"string"},
|
||||
"fetchedAt": {"bsonType":"date"},
|
||||
"ingestJobId": {"bsonType":"string"},
|
||||
"signature": {"bsonType":"object"}
|
||||
}
|
||||
},
|
||||
"ingestedAt": {"bsonType": "date"}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Observation invariants
|
||||
- **Immutable:** no in-place updates; new revision → new document with `supersedesId` optional pointer.
|
||||
- **Deterministic keying:** `_id` derived from `hash(tenantId|source|advisoryId|provenance.sourceArtifactSha)` to keep inserts idempotent in replay.
|
||||
- **Normalization guardrails:** version ranges must be stored as raw-from-source; no inferred merges.
|
||||
|
||||
## Linkset document
|
||||
```json
|
||||
{
|
||||
"bsonType":"object",
|
||||
"required":["_id","tenantId","advisoryId","source","observations","createdAt"],
|
||||
"properties":{
|
||||
"_id":{"bsonType":"objectId"},
|
||||
"tenantId":{"bsonType":"string"},
|
||||
"advisoryId":{"bsonType":"string"},
|
||||
"source":{"bsonType":"string"},
|
||||
"observations":{"bsonType":"array","items":{"bsonType":"objectId"}},
|
||||
"normalized": {
|
||||
"bsonType":"object",
|
||||
"properties":{
|
||||
"purls":{"bsonType":"array","items":{"bsonType":"string"}},
|
||||
"versions":{"bsonType":"array","items":{"bsonType":"string"}},
|
||||
"ranges": {"bsonType":"array","items":{"bsonType":"object"}},
|
||||
"severities": {"bsonType":"array","items":{"bsonType":"object"}}
|
||||
}
|
||||
},
|
||||
"createdAt":{"bsonType":"date"},
|
||||
"builtByJobId":{"bsonType":"string"},
|
||||
"provenance": {"bsonType":"object","properties":{
|
||||
"observationHashes":{"bsonType":"array","items":{"bsonType":"string"}},
|
||||
"toolVersion" : {"bsonType":"string"},
|
||||
"policyHash" : {"bsonType":"string"}
|
||||
}}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Linkset invariants
|
||||
- Built from a set of observation IDs; never overwrites observations.
|
||||
- Carries the hash list of source observations for audit/replay.
|
||||
- Deterministic sort: observations sorted by `source, advisoryId, fetchedAt` before hashing.
|
||||
|
||||
## Indexes (Mongo)
|
||||
- Observations: `{ tenantId:1, source:1, advisoryId:1, provenance.fetchedAt:-1 }` (compound for ingest); `{ provenance.sourceArtifactSha:1 }` unique to avoid dup writes.
|
||||
- Linksets: `{ tenantId:1, advisoryId:1, source:1 }` unique; `{ observations:1 }` sparse for reverse lookups.
|
||||
|
||||
## Collections
|
||||
- `advisory_observations` — raw per-source docs (immutable).
|
||||
- `advisory_linksets` — derived normalized aggregates with observation pointers and hashes.
|
||||
|
||||
## Determinism & replay
|
||||
- Replay rebuild: order observations by fetchedAt, recompute linkset hash list, ensure byte-identical linkset JSON.
|
||||
- All timestamps UTC ISO-8601; no server-local time.
|
||||
- String normalization: lowercase `source`, trim/normalize PURLs, stable sort arrays.
|
||||
|
||||
## Sample documents
|
||||
See `docs/samples/lnm/observation-ghsa.json` and `docs/samples/lnm/linkset-ghsa.json` (added with this draft) for concrete payloads.
|
||||
|
||||
## Approval path
|
||||
1) Architecture + Concelier Core review this document.
|
||||
2) If accepted, freeze JSON Schema and roll into `src/Concelier/__Libraries/StellaOps.Concelier.Storage.Mongo` migrations.
|
||||
3) Update consumers (policy/CLI/export) to read from linksets only; deprecate Merge endpoints.
|
||||
|
||||
---
|
||||
Tracking: CONCELIER-LNM-21-001/002/101; Sprint 110 blockers (Concelier/Excititor waves).
|
||||
66
docs/modules/excititor/operations/evidence-api.md
Normal file
66
docs/modules/excititor/operations/evidence-api.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# Excititor Advisory-AI evidence APIs (projection + chunks)
|
||||
|
||||
> Covers the read-only evidence surfaces shipped in Sprints 119–120: `/v1/vex/observations/{vulnerabilityId}/{productKey}` and `/v1/vex/evidence/chunks`.
|
||||
|
||||
## Scope and determinism
|
||||
|
||||
- **Aggregation-only**: no consensus, severity merging, or reachability. Responses carry raw statements plus provenance/signature metadata.
|
||||
- **Stable ordering**: both endpoints sort by `lastSeen` DESC; pagination uses a deterministic `limit`.
|
||||
- **Limits**: observation projection default `limit=200`, max `500`; chunk stream default `limit=500`, max `2000`.
|
||||
- **Tenancy**: reads respect `X-Stella-Tenant` when provided; otherwise fall back to `DefaultTenant` configuration.
|
||||
- **Auth**: bearer token with `vex.read` scope required.
|
||||
|
||||
## `/v1/vex/observations/{vulnerabilityId}/{productKey}`
|
||||
|
||||
- **Response**: JSON object with `vulnerabilityId`, `productKey`, `generatedAt`, `totalCount`, `truncated`, `statements[]`.
|
||||
- **Statement fields**: `observationId`, `providerId`, `status`, `justification`, `detail`, `firstSeen`, `lastSeen`, `scope{key,name,version,purl,cpe,componentIdentifiers[]}`, `anchors[]`, `document{digest,format,revision,sourceUri}`, `signature{type,keyId,issuer,verifiedAt}`.
|
||||
- **Filters**:
|
||||
- `providerId` (multi-valued, comma-separated)
|
||||
- `status` (values in `VexClaimStatus`)
|
||||
- `since` (ISO-8601, UTC)
|
||||
- `limit` (ints within bounds)
|
||||
- **Mapping back to storage**:
|
||||
- `observationId` = `{providerId}:{document.digest}`
|
||||
- `document.digest` locates the raw record in `vex_raw`.
|
||||
- `anchors` contain JSON pointers/paragraph locators from source metadata.
|
||||
|
||||
Headers:
|
||||
- `Excititor-Results-Truncated: true|false`
|
||||
- `Excititor-Results-Total: <int>`
|
||||
|
||||
## `/v1/vex/evidence/chunks`
|
||||
|
||||
- **Query params**: `vulnerabilityId` (required), `productKey` (required), optional `providerId`, `status`, `since`, `limit`.
|
||||
- **Response**: **NDJSON** stream; each line is a `VexEvidenceChunkResponse`.
|
||||
- **Chunk fields**: `observationId`, `linksetId`, `vulnerabilityId`, `productKey`, `providerId`, `status`, `justification`, `detail`, `scopeScore` (from confidence or signals), `firstSeen`, `lastSeen`, `scope{...}`, `document{digest,format,sourceUri,revision}`, `signature{type,subject,issuer,keyId,verifiedAt,transparencyRef}`, `metadata` (flattened additionalMetadata).
|
||||
- **Headers**: same truncation/total headers as projection API.
|
||||
- **Streaming guidance (SDK/clients)**:
|
||||
- Use HTTP client that supports response streaming; read line-by-line and JSON-deserialize per line.
|
||||
- Treat stream as unbounded list up to `limit`; do not assume array brackets.
|
||||
- Back-off or paginate by adjusting `since` or narrowing providers/statuses.
|
||||
|
||||
## `/v1/vex/attestations/{attestationId}`
|
||||
|
||||
- **Purpose**: Lookup attestation provenance (supplier ↔ observation/linkset ↔ product/vulnerability) without touching consensus.
|
||||
- **Response**: `VexAttestationPayload` with fields:
|
||||
- `attestationId`, `supplierId`, `observationId`, `linksetId`, `vulnerabilityId`, `productKey`, `justificationSummary`, `issuedAt`, `metadata{}`.
|
||||
- **Semantics**:
|
||||
- `attestationId` matches the export/attestation ID used when signing (Resolve/Worker flows).
|
||||
- `observationId`/`linksetId` map back to evidence identifiers; clients can stitch provenance for citations.
|
||||
- **Auth**: `vex.read` scope; tenant header optional (payloads are tenant-agnostic).
|
||||
|
||||
## Error model
|
||||
|
||||
- Standard API envelope with `ValidationProblem` for missing required params.
|
||||
- `scope` failures return `403` with problem details.
|
||||
- Tenancy parse failures return `400`.
|
||||
|
||||
## Backwards compatibility
|
||||
|
||||
- No legacy routes are deprecated by these endpoints; they are additive and remain aggregation-only.
|
||||
|
||||
## References
|
||||
|
||||
- Implementation: `src/Excititor/StellaOps.Excititor.WebService/Program.cs` (`/v1/vex/observations/**`, `/v1/vex/evidence/chunks`).
|
||||
- Telemetry: `src/Excititor/StellaOps.Excititor.WebService/Telemetry/EvidenceTelemetry.cs` (`excititor.vex.observation.*`, `excititor.vex.chunks.*`).
|
||||
- Data model: `src/Excititor/StellaOps.Excititor.WebService/Contracts/VexObservationContracts.cs`, `Contracts/VexEvidenceChunkContracts.cs`.
|
||||
40
docs/modules/scanner/operations/entrytrace-cadence.md
Normal file
40
docs/modules/scanner/operations/entrytrace-cadence.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# EntryTrace Heuristic Review Cadence
|
||||
|
||||
EntryTrace heuristics must stay aligned with competitor techniques and new runtime behaviours. This cadence makes updates predictable and deterministic.
|
||||
|
||||
## Objectives
|
||||
- Refresh shell/launcher heuristics quarterly using the latest gap analysis in `docs/benchmarks/scanner/scanning-gaps-stella-misses-from-competitors.md`.
|
||||
- Re-run explain-trace fixtures to confirm deterministic outputs and document any newly unsupported constructs.
|
||||
- Ensure operator-facing explainability stays in sync with emitted diagnostics and metrics.
|
||||
|
||||
## Cadence
|
||||
- **Frequency:** Quarterly (Jan, Apr, Jul, Oct) or sooner when critical regressions are discovered.
|
||||
- **Owners:** EntryTrace Guild with QA Guild pairing.
|
||||
- **Inputs:** Gap benchmark doc, new runtime samples from support channels, and anonymised customer repros (when permitted).
|
||||
- **Outputs:**
|
||||
- Updated heuristics/diagnostics in `StellaOps.Scanner.EntryTrace` with deterministic fixtures.
|
||||
- Changelog entry in `src/Scanner/__Libraries/StellaOps.Scanner.EntryTrace/TASKS.md`.
|
||||
- Sprint log updates under the active `SPRINT_0138_0000_0001_scanner_ruby_parity.md` when cadence items land.
|
||||
|
||||
## Workflow
|
||||
1) **Collect & triage signals**
|
||||
- Parse new gaps from the benchmark doc; map each to an EntryTrace detector area (shell parser, interpreter tracer, PATH resolver).
|
||||
- Classify as _coverage gap_, _precision issue_, or _observability gap_.
|
||||
2) **Fixture-first update**
|
||||
- Add/extend fixtures in `StellaOps.Scanner.EntryTrace.Tests/Fixtures` before modifying code.
|
||||
- Use deterministic serializers to keep fixture outputs byte-stable.
|
||||
3) **Implement & validate**
|
||||
- Update analyzers/diagnostics; run `dotnet test src/Scanner/__Tests/StellaOps.Scanner.EntryTrace.Tests/StellaOps.Scanner.EntryTrace.Tests.csproj --nologo --verbosity minimal`.
|
||||
- Confirm metrics counters (`entrytrace_*`) and explain-trace text stay consistent.
|
||||
4) **Record explainability**
|
||||
- Update explain-trace catalog (diagnostic enum descriptions) when new reasons are introduced.
|
||||
- Add operator notes to sprint log if remediation guidance changes.
|
||||
5) **Publish**
|
||||
- Attach a brief summary to the sprint Execution Log and to `TASKS.md` with date + scope.
|
||||
|
||||
## Fail-safe & rollback
|
||||
- Keep previous fixture baselines; if a heuristic widens too far, revert to prior fixture sets to restore determinism.
|
||||
- Prefer additive diagnostics over behavioural regressions; when behaviour must change, document it in the sprint log and `TASKS.md`.
|
||||
|
||||
## Ownership transitions
|
||||
- If the cadence cannot run on schedule, mark the relevant sprint task `BLOCKED` with the reason and hand off to the Project Manager to re-staff before the next window.
|
||||
20
docs/samples/lnm/linkset-ghsa.json
Normal file
20
docs/samples/lnm/linkset-ghsa.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"_id": "0000000000000000000000aa",
|
||||
"tenantId": "demo-tenant",
|
||||
"source": "ghsa",
|
||||
"advisoryId": "GHSA-xxxx-yyyy",
|
||||
"observations": [ "000000000000000000000001" ],
|
||||
"normalized": {
|
||||
"purls": [ "pkg:npm/example" ],
|
||||
"versions": [ "1.2.3" ],
|
||||
"ranges": [ { "type": "semver", "events": [ { "introduced": "0" }, { "fixed": "1.2.4" } ] } ],
|
||||
"severities": [ { "system": "cvssv3.1", "score": 7.5, "vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N" } ]
|
||||
},
|
||||
"createdAt": "2025-10-06T12:05:00Z",
|
||||
"builtByJobId": "linkset-builder-456",
|
||||
"provenance": {
|
||||
"observationHashes": [ "sha256:abc123" ],
|
||||
"toolVersion": "lnm-1.0.0",
|
||||
"policyHash": "sha256:def456"
|
||||
}
|
||||
}
|
||||
24
docs/samples/lnm/observation-ghsa.json
Normal file
24
docs/samples/lnm/observation-ghsa.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"_id": "000000000000000000000001",
|
||||
"tenantId": "demo-tenant",
|
||||
"source": "ghsa",
|
||||
"advisoryId": "GHSA-xxxx-yyyy",
|
||||
"title": "Example GHSA vuln",
|
||||
"summary": "Example summary",
|
||||
"severities": [ { "system": "cvssv3.1", "score": 7.5, "vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N" } ],
|
||||
"affected": [ {
|
||||
"purl": "pkg:npm/example@1.2.3",
|
||||
"versions": [ "1.2.3" ],
|
||||
"ranges": [ { "type": "semver", "events": [ { "introduced": "0" }, { "fixed": "1.2.4" } ] } ]
|
||||
} ],
|
||||
"references": [ "https://github.com/example/advisory" ],
|
||||
"weaknesses": [ "CWE-79" ],
|
||||
"published": "2025-10-01T00:00:00Z",
|
||||
"modified": "2025-10-05T00:00:00Z",
|
||||
"provenance": {
|
||||
"sourceArtifactSha": "sha256:abc123",
|
||||
"fetchedAt": "2025-10-06T12:00:00Z",
|
||||
"ingestJobId": "ingest-123"
|
||||
},
|
||||
"ingestedAt": "2025-10-06T12:01:00Z"
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"schemaVersion": "notify.template@1",
|
||||
"templateId": "tmpl-attest-expiry-warning-slack-en-us",
|
||||
"tenantId": "bootstrap",
|
||||
"channelType": "slack",
|
||||
"key": "tmpl-attest-expiry-warning",
|
||||
"locale": "en-us",
|
||||
"renderMode": "markdown",
|
||||
"format": "slack",
|
||||
"description": "Slack reminder for attestations approaching their expiration window.",
|
||||
"body": ":warning: Attestation for `{{payload.subject.digest}}` expires {{expires_in payload.attestation.expiresAt event.ts}}\nRepo: `{{payload.subject.repository}}`{{#if payload.subject.tag}} ({{payload.subject.tag}}){{/if}}\nSigner: `{{fingerprint payload.signer.kid}}` ({{payload.signer.algorithm}})\nIssued: {{payload.attestation.issuedAt}} · Expires: {{payload.attestation.expiresAt}}\nRenewal steps: {{link \"Docs\" payload.links.docs}} · Console: {{link \"Open\" payload.links.console}}\n",
|
||||
"metadata": {
|
||||
"author": "notifications-bootstrap",
|
||||
"version": "2025-11-16"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"schemaVersion": "notify.template@1",
|
||||
"templateId": "tmpl-api-deprecation-email-en-us",
|
||||
"tenantId": "bootstrap",
|
||||
"channelType": "email",
|
||||
"key": "tmpl-api-deprecation",
|
||||
"locale": "en-us",
|
||||
"renderMode": "html",
|
||||
"format": "email",
|
||||
"description": "Email notification for retiring Notifier API versions.",
|
||||
"body": "<h2>Notifier API deprecation notice</h2>\n<p>The Notifier API v1 endpoints are scheduled for sunset on <strong>{{metadata.sunset}}</strong>.</p>\n<ul>\n <li>Paths affected: {{metadata.paths}}</li>\n <li>Scope: notify.*</li>\n <li>Replacement: {{metadata.replacement}}</li>\n</ul>\n<p>Action: {{metadata.action}}</p>\n<p>Details: <a href=\"{{metadata.docs}}\">Deprecation bulletin</a></p>\n",
|
||||
"metadata": {
|
||||
"author": "notifications-bootstrap",
|
||||
"version": "2025-11-17"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"schemaVersion": "notify.template@1",
|
||||
"templateId": "tmpl-api-deprecation-slack-en-us",
|
||||
"tenantId": "bootstrap",
|
||||
"channelType": "slack",
|
||||
"key": "tmpl-api-deprecation",
|
||||
"locale": "en-us",
|
||||
"renderMode": "markdown",
|
||||
"format": "slack",
|
||||
"description": "Slack notice for retiring Notifier API versions.",
|
||||
"body": ":warning: Notifier API v1 is being deprecated.\nSunset: {{metadata.sunset}}\nPaths affected: {{metadata.paths}}\nDocs: {{link \"Deprecation details\" metadata.docs}}\nAction: {{metadata.action}}\n",
|
||||
"metadata": {
|
||||
"author": "notifications-bootstrap",
|
||||
"version": "2025-11-17"
|
||||
}
|
||||
}
|
||||
39
offline/telemetry/dashboards/ledger/alerts.yml
Normal file
39
offline/telemetry/dashboards/ledger/alerts.yml
Normal file
@@ -0,0 +1,39 @@
|
||||
groups:
|
||||
- name: ledger-observability
|
||||
interval: 30s
|
||||
rules:
|
||||
- alert: LedgerWriteLatencyHighP95
|
||||
expr: histogram_quantile(0.95, sum(rate(ledger_write_latency_seconds_bucket[5m])) by (le, tenant)) > 0.12
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Ledger write latency p95 high (tenant {{ $labels.tenant }})"
|
||||
description: "ledger_write_latency_seconds p95 > 120ms for >10m. Check DB/queue."
|
||||
|
||||
- alert: ProjectionLagHigh
|
||||
expr: max_over_time(ledger_projection_lag_seconds[10m]) > 30
|
||||
for: 10m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Ledger projection lag high"
|
||||
description: "projection lag over 30s; projections falling behind ingest."
|
||||
|
||||
- alert: MerkleAnchorFailures
|
||||
expr: sum(rate(ledger_merkle_anchor_failures_total[15m])) by (tenant, reason) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Merkle anchor failures (tenant {{ $labels.tenant }})"
|
||||
description: "Anchoring failures detected (reason={{ $labels.reason }}). Investigate signing/storage."
|
||||
|
||||
- alert: AttachmentFailures
|
||||
expr: sum(rate(ledger_attachments_encryption_failures_total[10m])) by (tenant, stage) > 0
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Attachment pipeline failures (tenant {{ $labels.tenant }}, stage {{ $labels.stage }})"
|
||||
description: "Attachment encryption/sign/upload reported failures in the last 10m."
|
||||
@@ -0,0 +1,91 @@
|
||||
{
|
||||
"id": null,
|
||||
"title": "StellaOps Findings Ledger",
|
||||
"timezone": "utc",
|
||||
"schemaVersion": 39,
|
||||
"version": 1,
|
||||
"refresh": "30s",
|
||||
"tags": ["ledger", "findings", "stellaops"],
|
||||
"panels": [
|
||||
{
|
||||
"type": "timeseries",
|
||||
"title": "Ledger Write Latency (P50/P95)",
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 0 },
|
||||
"targets": [
|
||||
{ "expr": "histogram_quantile(0.5, sum(rate(ledger_write_latency_seconds_bucket{tenant=\"$tenant\"}[5m])) by (le))", "legendFormat": "p50" },
|
||||
{ "expr": "histogram_quantile(0.95, sum(rate(ledger_write_latency_seconds_bucket{tenant=\"$tenant\"}[5m])) by (le))", "legendFormat": "p95" }
|
||||
],
|
||||
"fieldConfig": { "defaults": { "unit": "s" } }
|
||||
},
|
||||
{
|
||||
"type": "timeseries",
|
||||
"title": "Write Throughput",
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 0 },
|
||||
"targets": [
|
||||
{ "expr": "sum(rate(ledger_events_total{tenant=\"$tenant\"}[5m])) by (event_type)", "legendFormat": "{{event_type}}" }
|
||||
],
|
||||
"fieldConfig": { "defaults": { "unit": "ops" } }
|
||||
},
|
||||
{
|
||||
"type": "timeseries",
|
||||
"title": "Projection Lag",
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 8 },
|
||||
"targets": [
|
||||
{ "expr": "max(ledger_projection_lag_seconds{tenant=\"$tenant\"})", "legendFormat": "lag" }
|
||||
],
|
||||
"fieldConfig": { "defaults": { "unit": "s" } }
|
||||
},
|
||||
{
|
||||
"type": "timeseries",
|
||||
"title": "Merkle Anchor Duration",
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 8 },
|
||||
"targets": [
|
||||
{ "expr": "histogram_quantile(0.95, sum(rate(ledger_merkle_anchor_duration_seconds_bucket{tenant=\"$tenant\"}[5m])) by (le))", "legendFormat": "p95" }
|
||||
],
|
||||
"fieldConfig": { "defaults": { "unit": "s" } }
|
||||
},
|
||||
{
|
||||
"type": "stat",
|
||||
"title": "Merkle Anchor Failures (5m)",
|
||||
"gridPos": { "h": 4, "w": 6, "x": 0, "y": 16 },
|
||||
"targets": [
|
||||
{ "expr": "sum(rate(ledger_merkle_anchor_failures_total{tenant=\"$tenant\"}[5m]))", "legendFormat": "fail/s" }
|
||||
],
|
||||
"options": { "reduceOptions": { "calcs": ["lastNotNull"] } }
|
||||
},
|
||||
{
|
||||
"type": "stat",
|
||||
"title": "Attachment Failures (5m)",
|
||||
"gridPos": { "h": 4, "w": 6, "x": 6, "y": 16 },
|
||||
"targets": [
|
||||
{ "expr": "sum(rate(ledger_attachments_encryption_failures_total{tenant=\"$tenant\"}[5m])) by (stage)", "legendFormat": "{{stage}}" }
|
||||
],
|
||||
"options": { "reduceOptions": { "calcs": ["lastNotNull"] } }
|
||||
},
|
||||
{
|
||||
"type": "stat",
|
||||
"title": "Ledger Backlog",
|
||||
"gridPos": { "h": 4, "w": 6, "x": 12, "y": 16 },
|
||||
"targets": [
|
||||
{ "expr": "sum(ledger_ingest_backlog_events{tenant=\"$tenant\"})", "legendFormat": "events" }
|
||||
]
|
||||
}
|
||||
],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"name": "tenant",
|
||||
"type": "query",
|
||||
"label": "Tenant",
|
||||
"datasource": null,
|
||||
"query": "label_values(ledger_events_total, tenant)",
|
||||
"refresh": 1,
|
||||
"multi": false,
|
||||
"includeAll": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"annotations": { "list": [] },
|
||||
"time": { "from": "now-6h", "to": "now" },
|
||||
"timepicker": { "refresh_intervals": ["30s", "1m", "5m", "15m", "1h"] }
|
||||
}
|
||||
125
ops/mongo/taskrunner/20251106-task-runner-baseline.mongosh
Normal file
125
ops/mongo/taskrunner/20251106-task-runner-baseline.mongosh
Normal file
@@ -0,0 +1,125 @@
|
||||
// Task Runner baseline collections and indexes
|
||||
// Mirrors docs/modules/taskrunner/migrations/pack-run-collections.md (last updated 2025-11-06)
|
||||
|
||||
function ensureCollection(name, validator) {
|
||||
const existing = db.getCollectionNames();
|
||||
if (!existing.includes(name)) {
|
||||
db.createCollection(name, { validator, validationLevel: "moderate" });
|
||||
} else if (validator) {
|
||||
db.runCommand({ collMod: name, validator, validationLevel: "moderate" });
|
||||
}
|
||||
}
|
||||
|
||||
const runValidator = {
|
||||
$jsonSchema: {
|
||||
bsonType: "object",
|
||||
required: ["planHash", "plan", "failurePolicy", "requestedAt", "createdAt", "updatedAt", "steps"],
|
||||
properties: {
|
||||
_id: { bsonType: "string" },
|
||||
planHash: { bsonType: "string" },
|
||||
plan: { bsonType: "object" },
|
||||
failurePolicy: { bsonType: "object" },
|
||||
requestedAt: { bsonType: "date" },
|
||||
createdAt: { bsonType: "date" },
|
||||
updatedAt: { bsonType: "date" },
|
||||
steps: {
|
||||
bsonType: "array",
|
||||
items: {
|
||||
bsonType: "object",
|
||||
required: ["stepId", "status", "attempts"],
|
||||
properties: {
|
||||
stepId: { bsonType: "string" },
|
||||
status: { bsonType: "string" },
|
||||
attempts: { bsonType: "int" },
|
||||
kind: { bsonType: "string" },
|
||||
enabled: { bsonType: "bool" },
|
||||
continueOnError: { bsonType: "bool" },
|
||||
maxParallel: { bsonType: ["int", "null"] },
|
||||
approvalId: { bsonType: ["string", "null"] },
|
||||
gateMessage: { bsonType: ["string", "null"] },
|
||||
lastTransitionAt: { bsonType: ["date", "null"] },
|
||||
nextAttemptAt: { bsonType: ["date", "null"] },
|
||||
statusReason: { bsonType: ["string", "null"] }
|
||||
}
|
||||
}
|
||||
},
|
||||
tenantId: { bsonType: ["string", "null"] }
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const logValidator = {
|
||||
$jsonSchema: {
|
||||
bsonType: "object",
|
||||
required: ["runId", "sequence", "timestamp", "level", "eventType", "message"],
|
||||
properties: {
|
||||
runId: { bsonType: "string" },
|
||||
sequence: { bsonType: "long" },
|
||||
timestamp: { bsonType: "date" },
|
||||
level: { bsonType: "string" },
|
||||
eventType: { bsonType: "string" },
|
||||
message: { bsonType: "string" },
|
||||
stepId: { bsonType: ["string", "null"] },
|
||||
metadata: { bsonType: ["object", "null"] }
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const artifactsValidator = {
|
||||
$jsonSchema: {
|
||||
bsonType: "object",
|
||||
required: ["runId", "name", "type", "status", "capturedAt"],
|
||||
properties: {
|
||||
runId: { bsonType: "string" },
|
||||
name: { bsonType: "string" },
|
||||
type: { bsonType: "string" },
|
||||
status: { bsonType: "string" },
|
||||
capturedAt: { bsonType: "date" },
|
||||
sourcePath: { bsonType: ["string", "null"] },
|
||||
storedPath: { bsonType: ["string", "null"] },
|
||||
notes: { bsonType: ["string", "null"] },
|
||||
expression: { bsonType: ["object", "null"] }
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const approvalsValidator = {
|
||||
$jsonSchema: {
|
||||
bsonType: "object",
|
||||
required: ["runId", "approvalId", "requestedAt", "status"],
|
||||
properties: {
|
||||
runId: { bsonType: "string" },
|
||||
approvalId: { bsonType: "string" },
|
||||
requiredGrants: { bsonType: "array", items: { bsonType: "string" } },
|
||||
stepIds: { bsonType: "array", items: { bsonType: "string" } },
|
||||
messages: { bsonType: "array", items: { bsonType: "string" } },
|
||||
reasonTemplate: { bsonType: ["string", "null"] },
|
||||
requestedAt: { bsonType: "date" },
|
||||
status: { bsonType: "string" },
|
||||
actorId: { bsonType: ["string", "null"] },
|
||||
completedAt: { bsonType: ["date", "null"] },
|
||||
summary: { bsonType: ["string", "null"] }
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ensureCollection("pack_runs", runValidator);
|
||||
ensureCollection("pack_run_logs", logValidator);
|
||||
ensureCollection("pack_artifacts", artifactsValidator);
|
||||
ensureCollection("pack_run_approvals", approvalsValidator);
|
||||
|
||||
// Indexes for pack_runs
|
||||
db.pack_runs.createIndex({ updatedAt: -1 }, { name: "pack_runs_updatedAt_desc" });
|
||||
db.pack_runs.createIndex({ tenantId: 1, updatedAt: -1 }, { name: "pack_runs_tenant_updatedAt_desc", sparse: true });
|
||||
|
||||
// Indexes for pack_run_logs
|
||||
db.pack_run_logs.createIndex({ runId: 1, sequence: 1 }, { unique: true, name: "pack_run_logs_run_sequence" });
|
||||
db.pack_run_logs.createIndex({ runId: 1, timestamp: 1 }, { name: "pack_run_logs_run_timestamp" });
|
||||
|
||||
// Indexes for pack_artifacts
|
||||
db.pack_artifacts.createIndex({ runId: 1, name: 1 }, { unique: true, name: "pack_artifacts_run_name" });
|
||||
db.pack_artifacts.createIndex({ runId: 1 }, { name: "pack_artifacts_run" });
|
||||
|
||||
// Indexes for pack_run_approvals
|
||||
db.pack_run_approvals.createIndex({ runId: 1, approvalId: 1 }, { unique: true, name: "pack_run_approvals_run_approval" });
|
||||
db.pack_run_approvals.createIndex({ runId: 1, status: 1 }, { name: "pack_run_approvals_run_status" });
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
BIN
out/tools/pack.binlog
Normal file
BIN
out/tools/pack.binlog
Normal file
Binary file not shown.
24
samples/provenance/build-statement-sample.json
Normal file
24
samples/provenance/build-statement-sample.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
buildDefinition: {
|
||||
buildType: https://slsa.dev/provenance/v1,
|
||||
externalParameters: {
|
||||
workflow: orchestrator/job,
|
||||
policyHash: sha256:deadbeef
|
||||
},
|
||||
resolvedDependencies: {
|
||||
sbomDigest: sha256:aaaabbbb,
|
||||
vexDigest: sha256:ccccdddd
|
||||
}
|
||||
},
|
||||
buildMetadata: {
|
||||
buildInvocationId: job-12345,
|
||||
buildStartedOn: 2025-11-16T12:00:00Z,
|
||||
buildFinishedOn: 2025-11-16T12:00:10Z,
|
||||
reproducible: true,
|
||||
completeness: {
|
||||
parameters: true,
|
||||
environment: true,
|
||||
materials: true
|
||||
}
|
||||
}
|
||||
}
|
||||
50
src/Concelier/AGENTS.md
Normal file
50
src/Concelier/AGENTS.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# Concelier · AGENTS Charter (Sprint 0112–0113)
|
||||
|
||||
## Module Scope & Working Directory
|
||||
- Working directory: `src/Concelier/**` (WebService, __Libraries, Storage.Mongo, analyzers, tests, seed-data). Do not edit other modules unless explicitly referenced by this sprint.
|
||||
- Mission: Link-Not-Merge (LNM) ingestion of advisory observations, correlation into linksets, evidence/export APIs, and deterministic telemetry.
|
||||
|
||||
## Roles
|
||||
- **Backend engineer (ASP.NET Core / Mongo):** connectors, ingestion guards, linkset builder, WebService APIs, storage migrations.
|
||||
- **Observability/Platform engineer:** OTEL metrics/logs, health/readiness, distributed locks, scheduler safety.
|
||||
- **QA automation:** Mongo2Go + WebApplicationFactory tests for handlers/jobs; determinism and guardrail regression harnesses.
|
||||
- **Docs/Schema steward:** keep LNM schemas, API references, and inline provenance docs aligned with behavior.
|
||||
|
||||
## Required Reading (must be treated as read before setting DOING)
|
||||
- `docs/README.md`
|
||||
- `docs/07_HIGH_LEVEL_ARCHITECTURE.md`
|
||||
- `docs/modules/platform/architecture-overview.md`
|
||||
- `docs/modules/concelier/architecture.md`
|
||||
- `docs/modules/concelier/link-not-merge-schema.md`
|
||||
- `docs/provenance/inline-dsse.md` (for provenance anchors/DSSE notes)
|
||||
- Any sprint-specific ADRs/notes linked from `docs/implplan/SPRINT_0112_0001_0001_concelier_i.md` or `SPRINT_0113_0001_0002_concelier_ii.md`.
|
||||
|
||||
## Working Agreements
|
||||
- **Aggregation-Only Contract (AOC):** no derived semantics in ingestion; enforce via `AOCWriteGuard` and analyzers. Raw observations are append-only; linksets carry correlations/conflicts only.
|
||||
- **Determinism:** use canonical JSON writer; sort collections (fieldType, observationPath, sourceId) for cache keys; UTC ISO-8601 timestamps; stable ordering in exports/events.
|
||||
- **Offline-first:** avoid new external calls outside allowlisted connectors; feature flags must default safe for air-gapped deployments (`concelier:features:*`).
|
||||
- **Tenant safety:** every API/job must enforce tenant headers/guards; no cross-tenant leaks.
|
||||
- **Schema gates:** LNM schema changes require docs + tests; update `link-not-merge-schema.md` and samples together.
|
||||
- **Cross-module edits:** none without sprint note; if needed, log in sprint Execution Log and Decisions & Risks.
|
||||
|
||||
## Coding & Observability Standards
|
||||
- Target **.NET 10**; prefer latest C# preview features already enabled in repo.
|
||||
- Mongo driver ≥ 3.x; canonical BSON/JSON mapping lives in Storage.Mongo.
|
||||
- Metrics: use `Meter` names under `StellaOps.Concelier.*`; tag `tenant`, `source`, `result` as applicable. Counters/histograms must be documented.
|
||||
- Logging: structured, no PII; include `tenant`, `source`, `job`, `correlationId` when available.
|
||||
- Scheduler/locks: one lock per connector/export job; no duplicate runs; honor `CancellationToken`.
|
||||
|
||||
## Testing Rules
|
||||
- Write/maintain tests alongside code:
|
||||
- Web/API: `StellaOps.Concelier.WebService.Tests` with WebApplicationFactory + Mongo2Go fixtures.
|
||||
- Core/Linkset/Guards: `StellaOps.Concelier.Core.Tests`.
|
||||
- Storage: `StellaOps.Concelier.Storage.Mongo.Tests` (use in-memory or Mongo2Go; determinism on ordering/hashes).
|
||||
- Observability/analyzers: tests in `__Analyzers` or respective test projects.
|
||||
- Tests must assert determinism (stable ordering/hashes), tenant guards, AOC invariants, and no derived fields in ingestion.
|
||||
- Prefer seeded fixtures under `seed-data/` for repeatability; avoid network in tests.
|
||||
|
||||
## Delivery Discipline
|
||||
- Update sprint tracker status (`TODO → DOING → DONE/BLOCKED`) when you start/finish/block work; mirror decisions in Execution Log and Decisions & Risks.
|
||||
- If a design decision is needed, mark the task `BLOCKED` in the sprint doc and record the decision ask—do not pause the codebase.
|
||||
- When changing contracts (APIs, schemas, telemetry, exports), update corresponding docs and link them from the sprint Decisions & Risks section.
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Collections.Immutable;
|
||||
using MongoDB.Bson;
|
||||
using System.Linq;
|
||||
|
||||
namespace StellaOps.Concelier.Core.Linksets;
|
||||
|
||||
public sealed record AdvisoryLinkset(
|
||||
string TenantId,
|
||||
string Source,
|
||||
string AdvisoryId,
|
||||
ImmutableArray<string> ObservationIds,
|
||||
AdvisoryLinksetNormalized? Normalized,
|
||||
AdvisoryLinksetProvenance? Provenance,
|
||||
DateTimeOffset CreatedAt,
|
||||
string? BuiltByJobId);
|
||||
|
||||
public sealed record AdvisoryLinksetNormalized(
|
||||
IReadOnlyList<string>? Purls,
|
||||
IReadOnlyList<string>? Versions,
|
||||
IReadOnlyList<Dictionary<string, object?>>? Ranges,
|
||||
IReadOnlyList<Dictionary<string, object?>>? Severities)
|
||||
{
|
||||
public List<BsonDocument>? RangesToBson()
|
||||
=> Ranges is null ? null : Ranges.Select(BsonDocumentHelper.FromDictionary).ToList();
|
||||
|
||||
public List<BsonDocument>? SeveritiesToBson()
|
||||
=> Severities is null ? null : Severities.Select(BsonDocumentHelper.FromDictionary).ToList();
|
||||
}
|
||||
|
||||
public sealed record AdvisoryLinksetProvenance(
|
||||
IReadOnlyList<string>? ObservationHashes,
|
||||
string? ToolVersion,
|
||||
string? PolicyHash);
|
||||
|
||||
internal static class BsonDocumentHelper
|
||||
{
|
||||
public static BsonDocument FromDictionary(Dictionary<string, object?> dictionary)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(dictionary);
|
||||
var doc = new BsonDocument();
|
||||
foreach (var kvp in dictionary)
|
||||
{
|
||||
doc[kvp.Key] = kvp.Value is null ? BsonNull.Value : BsonValue.Create(kvp.Value);
|
||||
}
|
||||
|
||||
return doc;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,82 @@
|
||||
using System.Collections.Generic;
|
||||
using System.Collections.Immutable;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using StellaOps.Concelier.Core.Observations;
|
||||
|
||||
namespace StellaOps.Concelier.Core.Linksets;
|
||||
|
||||
internal sealed class AdvisoryLinksetBackfillService : IAdvisoryLinksetBackfillService
|
||||
{
|
||||
private readonly IAdvisoryObservationLookup _observations;
|
||||
private readonly IAdvisoryLinksetSink _linksetSink;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
|
||||
public AdvisoryLinksetBackfillService(
|
||||
IAdvisoryObservationLookup observations,
|
||||
IAdvisoryLinksetSink linksetSink,
|
||||
TimeProvider timeProvider)
|
||||
{
|
||||
_observations = observations ?? throw new ArgumentNullException(nameof(observations));
|
||||
_linksetSink = linksetSink ?? throw new ArgumentNullException(nameof(linksetSink));
|
||||
_timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
|
||||
}
|
||||
|
||||
public async Task<int> BackfillTenantAsync(string tenant, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(tenant);
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
var observations = await _observations.ListByTenantAsync(tenant, cancellationToken).ConfigureAwait(false);
|
||||
if (observations.Count == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
var groups = observations.GroupBy(
|
||||
o => (o.Source.Vendor, o.Upstream.UpstreamId),
|
||||
new VendorUpstreamComparer());
|
||||
var count = 0;
|
||||
var now = _timeProvider.GetUtcNow();
|
||||
|
||||
foreach (var group in groups)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
var observationIds = group.Select(o => o.ObservationId).Distinct(StringComparer.Ordinal).ToImmutableArray();
|
||||
var createdAt = group.Max(o => o.CreatedAt);
|
||||
var normalized = AdvisoryLinksetNormalization.FromPurls(group.SelectMany(o => o.Linkset.Purls));
|
||||
|
||||
var linkset = new AdvisoryLinkset(
|
||||
tenant,
|
||||
group.Key.Vendor,
|
||||
group.Key.UpstreamId,
|
||||
observationIds,
|
||||
normalized,
|
||||
null,
|
||||
createdAt,
|
||||
null);
|
||||
|
||||
await _linksetSink.UpsertAsync(linkset, cancellationToken).ConfigureAwait(false);
|
||||
count++;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class VendorUpstreamComparer : IEqualityComparer<(string Vendor, string UpstreamId)>
|
||||
{
|
||||
public bool Equals((string Vendor, string UpstreamId) x, (string Vendor, string UpstreamId) y)
|
||||
=> StringComparer.Ordinal.Equals(x.Vendor, y.Vendor)
|
||||
&& StringComparer.Ordinal.Equals(x.UpstreamId, y.UpstreamId);
|
||||
|
||||
public int GetHashCode((string Vendor, string UpstreamId) obj)
|
||||
{
|
||||
var hash = new HashCode();
|
||||
hash.Add(obj.Vendor, StringComparer.Ordinal);
|
||||
hash.Add(obj.UpstreamId, StringComparer.Ordinal);
|
||||
return hash.ToHashCode();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
using System;
|
||||
|
||||
namespace StellaOps.Concelier.Core.Linksets;
|
||||
|
||||
public sealed record AdvisoryLinksetCursor(DateTimeOffset CreatedAt, string AdvisoryId);
|
||||
@@ -0,0 +1,78 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Linq;
|
||||
using StellaOps.Concelier.RawModels;
|
||||
using StellaOps.Concelier.Models;
|
||||
|
||||
namespace StellaOps.Concelier.Core.Linksets;
|
||||
|
||||
internal static class AdvisoryLinksetNormalization
|
||||
{
|
||||
public static AdvisoryLinksetNormalized? FromRawLinkset(RawLinkset linkset)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(linkset);
|
||||
return Build(linkset.PackageUrls);
|
||||
}
|
||||
|
||||
public static AdvisoryLinksetNormalized? FromPurls(IEnumerable<string>? purls)
|
||||
{
|
||||
if (purls is null)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
return Build(purls);
|
||||
}
|
||||
|
||||
private static AdvisoryLinksetNormalized? Build(IEnumerable<string> purlValues)
|
||||
{
|
||||
var normalizedPurls = NormalizePurls(purlValues);
|
||||
var versions = ExtractVersions(normalizedPurls);
|
||||
|
||||
if (normalizedPurls.Count == 0 && versions.Count == 0)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
return new AdvisoryLinksetNormalized(normalizedPurls, versions, null, null);
|
||||
}
|
||||
|
||||
private static List<string> NormalizePurls(IEnumerable<string> purls)
|
||||
{
|
||||
var distinct = new SortedSet<string>(StringComparer.Ordinal);
|
||||
foreach (var purl in purls)
|
||||
{
|
||||
var normalized = Validation.TrimToNull(purl);
|
||||
if (normalized is null)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
distinct.Add(normalized);
|
||||
}
|
||||
|
||||
return distinct.ToList();
|
||||
}
|
||||
|
||||
private static List<string> ExtractVersions(IReadOnlyCollection<string> purls)
|
||||
{
|
||||
var versions = new SortedSet<string>(StringComparer.Ordinal);
|
||||
|
||||
foreach (var purl in purls)
|
||||
{
|
||||
var atIndex = purl.LastIndexOf('@');
|
||||
if (atIndex < 0 || atIndex >= purl.Length - 1)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
var version = purl[(atIndex + 1)..];
|
||||
if (!string.IsNullOrWhiteSpace(version))
|
||||
{
|
||||
versions.Add(version);
|
||||
}
|
||||
}
|
||||
|
||||
return versions.ToList();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace StellaOps.Concelier.Core.Linksets;
|
||||
|
||||
public sealed record AdvisoryLinksetQueryOptions(
|
||||
string Tenant,
|
||||
IEnumerable<string>? AdvisoryIds = null,
|
||||
IEnumerable<string>? Sources = null,
|
||||
int? Limit = null,
|
||||
string? Cursor = null);
|
||||
@@ -0,0 +1,111 @@
|
||||
using System.Collections.Immutable;
|
||||
|
||||
using System;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace StellaOps.Concelier.Core.Linksets;
|
||||
|
||||
public interface IAdvisoryLinksetQueryService
|
||||
{
|
||||
Task<AdvisoryLinksetQueryResult> QueryAsync(AdvisoryLinksetQueryOptions options, CancellationToken cancellationToken);
|
||||
}
|
||||
|
||||
public sealed record AdvisoryLinksetQueryResult(ImmutableArray<AdvisoryLinkset> Linksets, string? NextCursor, bool HasMore);
|
||||
public sealed record AdvisoryLinksetPage(ImmutableArray<AdvisoryLinkset> Linksets, string? NextCursor, bool HasMore);
|
||||
|
||||
public sealed class AdvisoryLinksetQueryService : IAdvisoryLinksetQueryService
|
||||
{
|
||||
private const int DefaultLimit = 100;
|
||||
private const int MaxLimit = 500;
|
||||
private readonly IAdvisoryLinksetLookup _store;
|
||||
|
||||
public AdvisoryLinksetQueryService(IAdvisoryLinksetLookup store)
|
||||
{
|
||||
_store = store ?? throw new ArgumentNullException(nameof(store));
|
||||
}
|
||||
|
||||
public async Task<AdvisoryLinksetQueryResult> QueryAsync(AdvisoryLinksetQueryOptions options, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(options);
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
var tenant = string.IsNullOrWhiteSpace(options.Tenant)
|
||||
? throw new ArgumentNullException(nameof(options.Tenant))
|
||||
: options.Tenant.ToLowerInvariant();
|
||||
var limit = NormalizeLimit(options.Limit);
|
||||
var cursor = DecodeCursor(options.Cursor);
|
||||
|
||||
var linksets = await _store
|
||||
.FindByTenantAsync(tenant, options.AdvisoryIds, options.Sources, cursor, limit + 1, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
var ordered = linksets
|
||||
.OrderByDescending(ls => ls.CreatedAt)
|
||||
.ThenBy(ls => ls.AdvisoryId, StringComparer.Ordinal)
|
||||
.ToImmutableArray();
|
||||
|
||||
var hasMore = ordered.Length > limit;
|
||||
var page = hasMore ? ordered.Take(limit).ToImmutableArray() : ordered;
|
||||
var nextCursor = hasMore ? EncodeCursor(page[^1]) : null;
|
||||
|
||||
return new AdvisoryLinksetQueryResult(page, nextCursor, hasMore);
|
||||
}
|
||||
|
||||
private static int NormalizeLimit(int? requested)
|
||||
{
|
||||
if (!requested.HasValue || requested <= 0)
|
||||
{
|
||||
return DefaultLimit;
|
||||
}
|
||||
|
||||
return requested.Value > MaxLimit ? MaxLimit : requested.Value;
|
||||
}
|
||||
private static AdvisoryLinksetCursor? DecodeCursor(string? cursor)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(cursor))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
var buffer = Convert.FromBase64String(cursor.Trim());
|
||||
var payload = System.Text.Encoding.UTF8.GetString(buffer);
|
||||
var separator = payload.IndexOf(':');
|
||||
if (separator <= 0 || separator >= payload.Length - 1)
|
||||
{
|
||||
throw new FormatException("Cursor format invalid.");
|
||||
}
|
||||
|
||||
var ticksText = payload[..separator];
|
||||
if (!long.TryParse(ticksText, out var ticks))
|
||||
{
|
||||
throw new FormatException("Cursor timestamp invalid.");
|
||||
}
|
||||
|
||||
var advisoryId = payload[(separator + 1)..];
|
||||
if (string.IsNullOrWhiteSpace(advisoryId))
|
||||
{
|
||||
throw new FormatException("Cursor advisoryId missing.");
|
||||
}
|
||||
|
||||
return new AdvisoryLinksetCursor(new DateTimeOffset(new DateTime(ticks, DateTimeKind.Utc)), advisoryId);
|
||||
}
|
||||
catch (FormatException)
|
||||
{
|
||||
throw;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new FormatException("Cursor is malformed.", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private static string? EncodeCursor(AdvisoryLinkset linkset)
|
||||
{
|
||||
var payload = $"{linkset.CreatedAt.UtcTicks}:{linkset.AdvisoryId}";
|
||||
return Convert.ToBase64String(System.Text.Encoding.UTF8.GetBytes(payload));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace StellaOps.Concelier.Core.Linksets;
|
||||
|
||||
public interface IAdvisoryLinksetBackfillService
|
||||
{
|
||||
Task<int> BackfillTenantAsync(string tenant, CancellationToken cancellationToken);
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace StellaOps.Concelier.Core.Linksets;
|
||||
|
||||
public interface IAdvisoryLinksetSink
|
||||
{
|
||||
Task UpsertAsync(AdvisoryLinkset linkset, CancellationToken cancellationToken);
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
using System.Collections.Generic;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace StellaOps.Concelier.Core.Linksets;
|
||||
|
||||
public interface IAdvisoryLinksetStore : IAdvisoryLinksetSink, IAdvisoryLinksetLookup
|
||||
{
|
||||
}
|
||||
|
||||
public interface IAdvisoryLinksetLookup
|
||||
{
|
||||
Task<IReadOnlyList<AdvisoryLinkset>> FindByTenantAsync(
|
||||
string tenantId,
|
||||
IEnumerable<string>? advisoryIds,
|
||||
IEnumerable<string>? sources,
|
||||
AdvisoryLinksetCursor? cursor,
|
||||
int limit,
|
||||
CancellationToken cancellationToken);
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using Microsoft.Extensions.DependencyInjection.Extensions;
|
||||
using StellaOps.Concelier.Core.Observations;
|
||||
using StellaOps.Concelier.Core.Linksets;
|
||||
|
||||
namespace StellaOps.Concelier.Core.Linksets;
|
||||
|
||||
public static class ObservationPipelineServiceCollectionExtensions
|
||||
{
|
||||
public static IServiceCollection AddConcelierObservationPipeline(this IServiceCollection services)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(services);
|
||||
|
||||
services.TryAddSingleton<IAdvisoryObservationSink, NullObservationSink>();
|
||||
services.TryAddSingleton<IAdvisoryLinksetSink, NullLinksetSink>();
|
||||
services.TryAddSingleton<IAdvisoryLinksetLookup, NullLinksetLookup>();
|
||||
services.TryAddSingleton<IAdvisoryLinksetBackfillService, AdvisoryLinksetBackfillService>();
|
||||
|
||||
return services;
|
||||
}
|
||||
|
||||
private sealed class NullObservationSink : IAdvisoryObservationSink
|
||||
{
|
||||
public Task UpsertAsync(Models.Observations.AdvisoryObservation observation, CancellationToken cancellationToken)
|
||||
=> Task.CompletedTask;
|
||||
}
|
||||
|
||||
private sealed class NullLinksetSink : IAdvisoryLinksetSink
|
||||
{
|
||||
public Task UpsertAsync(AdvisoryLinkset linkset, CancellationToken cancellationToken)
|
||||
=> Task.CompletedTask;
|
||||
}
|
||||
|
||||
private sealed class NullLinksetLookup : IAdvisoryLinksetLookup
|
||||
{
|
||||
public Task<IReadOnlyList<AdvisoryLinkset>> FindByTenantAsync(
|
||||
string tenantId,
|
||||
IEnumerable<string>? advisoryIds,
|
||||
IEnumerable<string>? sources,
|
||||
AdvisoryLinksetCursor? cursor,
|
||||
int limit,
|
||||
CancellationToken cancellationToken)
|
||||
=> Task.FromResult<IReadOnlyList<AdvisoryLinkset>>(Array.Empty<AdvisoryLinkset>());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using StellaOps.Concelier.Models.Observations;
|
||||
|
||||
namespace StellaOps.Concelier.Core.Observations;
|
||||
|
||||
public interface IAdvisoryObservationSink
|
||||
{
|
||||
Task UpsertAsync(AdvisoryObservation observation, CancellationToken cancellationToken);
|
||||
}
|
||||
@@ -10,6 +10,7 @@ using StellaOps.Aoc;
|
||||
using StellaOps.Ingestion.Telemetry;
|
||||
using StellaOps.Concelier.Core.Aoc;
|
||||
using StellaOps.Concelier.Core.Linksets;
|
||||
using StellaOps.Concelier.Core.Observations;
|
||||
using StellaOps.Concelier.RawModels;
|
||||
using StellaOps.Concelier.Models;
|
||||
|
||||
@@ -23,6 +24,9 @@ internal sealed class AdvisoryRawService : IAdvisoryRawService
|
||||
private readonly IAdvisoryRawWriteGuard _writeGuard;
|
||||
private readonly IAocGuard _aocGuard;
|
||||
private readonly IAdvisoryLinksetMapper _linksetMapper;
|
||||
private readonly IAdvisoryObservationFactory _observationFactory;
|
||||
private readonly IAdvisoryObservationSink _observationSink;
|
||||
private readonly IAdvisoryLinksetSink _linksetSink;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
private readonly ILogger<AdvisoryRawService> _logger;
|
||||
|
||||
@@ -31,6 +35,9 @@ internal sealed class AdvisoryRawService : IAdvisoryRawService
|
||||
IAdvisoryRawWriteGuard writeGuard,
|
||||
IAocGuard aocGuard,
|
||||
IAdvisoryLinksetMapper linksetMapper,
|
||||
IAdvisoryObservationFactory observationFactory,
|
||||
IAdvisoryObservationSink observationSink,
|
||||
IAdvisoryLinksetSink linksetSink,
|
||||
TimeProvider timeProvider,
|
||||
ILogger<AdvisoryRawService> logger)
|
||||
{
|
||||
@@ -38,6 +45,9 @@ internal sealed class AdvisoryRawService : IAdvisoryRawService
|
||||
_writeGuard = writeGuard ?? throw new ArgumentNullException(nameof(writeGuard));
|
||||
_aocGuard = aocGuard ?? throw new ArgumentNullException(nameof(aocGuard));
|
||||
_linksetMapper = linksetMapper ?? throw new ArgumentNullException(nameof(linksetMapper));
|
||||
_observationFactory = observationFactory ?? throw new ArgumentNullException(nameof(observationFactory));
|
||||
_observationSink = observationSink ?? throw new ArgumentNullException(nameof(observationSink));
|
||||
_linksetSink = linksetSink ?? throw new ArgumentNullException(nameof(linksetSink));
|
||||
_timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
}
|
||||
@@ -102,6 +112,23 @@ internal sealed class AdvisoryRawService : IAdvisoryRawService
|
||||
var result = await _repository.UpsertAsync(enriched, cancellationToken).ConfigureAwait(false);
|
||||
IngestionTelemetry.RecordWriteAttempt(tenant, source, result.Inserted ? IngestionTelemetry.ResultOk : IngestionTelemetry.ResultNoop);
|
||||
|
||||
// Persist observation + linkset for Link-Not-Merge consumers (idempotent upserts).
|
||||
var observation = _observationFactory.Create(enriched, _timeProvider.GetUtcNow());
|
||||
await _observationSink.UpsertAsync(observation, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
var normalizedLinkset = AdvisoryLinksetNormalization.FromRawLinkset(enriched.Linkset);
|
||||
var linkset = new AdvisoryLinkset(
|
||||
tenant,
|
||||
source,
|
||||
enriched.Upstream.UpstreamId,
|
||||
ImmutableArray.Create(observation.ObservationId),
|
||||
normalizedLinkset,
|
||||
null,
|
||||
_timeProvider.GetUtcNow(),
|
||||
null);
|
||||
|
||||
await _linksetSink.UpsertAsync(linkset, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
if (result.Inserted)
|
||||
{
|
||||
_logger.LogInformation(
|
||||
|
||||
@@ -0,0 +1,87 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Bson.Serialization.Attributes;
|
||||
|
||||
namespace StellaOps.Concelier.Storage.Mongo.Linksets;
|
||||
|
||||
[BsonIgnoreExtraElements]
|
||||
public sealed class AdvisoryLinksetDocument
|
||||
{
|
||||
[BsonId]
|
||||
public ObjectId Id { get; set; }
|
||||
= ObjectId.GenerateNewId();
|
||||
|
||||
[BsonElement("tenantId")]
|
||||
public string TenantId { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("source")]
|
||||
public string Source { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("advisoryId")]
|
||||
public string AdvisoryId { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("observations")]
|
||||
public List<string> Observations { get; set; } = new();
|
||||
|
||||
[BsonElement("normalized")]
|
||||
[BsonIgnoreIfNull]
|
||||
public AdvisoryLinksetNormalizedDocument? Normalized { get; set; }
|
||||
= null;
|
||||
|
||||
[BsonElement("createdAt")]
|
||||
public DateTime CreatedAt { get; set; } = DateTime.UtcNow;
|
||||
|
||||
[BsonElement("builtByJobId")]
|
||||
[BsonIgnoreIfNull]
|
||||
public string? BuiltByJobId { get; set; }
|
||||
= null;
|
||||
|
||||
[BsonElement("provenance")]
|
||||
[BsonIgnoreIfNull]
|
||||
public AdvisoryLinksetProvenanceDocument? Provenance { get; set; }
|
||||
= null;
|
||||
}
|
||||
|
||||
[BsonIgnoreExtraElements]
|
||||
public sealed class AdvisoryLinksetNormalizedDocument
|
||||
{
|
||||
[BsonElement("purls")]
|
||||
[BsonIgnoreIfNull]
|
||||
public List<string>? Purls { get; set; }
|
||||
= new();
|
||||
|
||||
[BsonElement("versions")]
|
||||
[BsonIgnoreIfNull]
|
||||
public List<string>? Versions { get; set; }
|
||||
= new();
|
||||
|
||||
[BsonElement("ranges")]
|
||||
[BsonIgnoreIfNull]
|
||||
public List<BsonDocument>? Ranges { get; set; }
|
||||
= new();
|
||||
|
||||
[BsonElement("severities")]
|
||||
[BsonIgnoreIfNull]
|
||||
public List<BsonDocument>? Severities { get; set; }
|
||||
= new();
|
||||
}
|
||||
|
||||
[BsonIgnoreExtraElements]
|
||||
public sealed class AdvisoryLinksetProvenanceDocument
|
||||
{
|
||||
[BsonElement("observationHashes")]
|
||||
[BsonIgnoreIfNull]
|
||||
public List<string>? ObservationHashes { get; set; }
|
||||
= new();
|
||||
|
||||
[BsonElement("toolVersion")]
|
||||
[BsonIgnoreIfNull]
|
||||
public string? ToolVersion { get; set; }
|
||||
= null;
|
||||
|
||||
[BsonElement("policyHash")]
|
||||
[BsonIgnoreIfNull]
|
||||
public string? PolicyHash { get; set; }
|
||||
= null;
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
using System;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using CoreLinksets = StellaOps.Concelier.Core.Linksets;
|
||||
|
||||
namespace StellaOps.Concelier.Storage.Mongo.Linksets;
|
||||
|
||||
internal sealed class AdvisoryLinksetSink : CoreLinksets.IAdvisoryLinksetSink
|
||||
{
|
||||
private readonly IAdvisoryLinksetStore _store;
|
||||
|
||||
public AdvisoryLinksetSink(IAdvisoryLinksetStore store)
|
||||
{
|
||||
_store = store ?? throw new ArgumentNullException(nameof(store));
|
||||
}
|
||||
|
||||
public Task UpsertAsync(CoreLinksets.AdvisoryLinkset linkset, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(linkset);
|
||||
return _store.UpsertAsync(linkset, cancellationToken);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,170 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Collections.Immutable;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using MongoDB.Driver;
|
||||
using CoreLinksets = StellaOps.Concelier.Core.Linksets;
|
||||
|
||||
namespace StellaOps.Concelier.Storage.Mongo.Linksets;
|
||||
|
||||
// Internal type kept in storage namespace to avoid name clash with core interface
|
||||
internal sealed class MongoAdvisoryLinksetStore : CoreLinksets.IAdvisoryLinksetStore, CoreLinksets.IAdvisoryLinksetLookup
|
||||
{
|
||||
private readonly IMongoCollection<AdvisoryLinksetDocument> _collection;
|
||||
|
||||
public MongoAdvisoryLinksetStore(IMongoCollection<AdvisoryLinksetDocument> collection)
|
||||
{
|
||||
_collection = collection ?? throw new ArgumentNullException(nameof(collection));
|
||||
}
|
||||
|
||||
public async Task UpsertAsync(CoreLinksets.AdvisoryLinkset linkset, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(linkset);
|
||||
|
||||
var document = MapToDocument(linkset);
|
||||
var filter = Builders<AdvisoryLinksetDocument>.Filter.And(
|
||||
Builders<AdvisoryLinksetDocument>.Filter.Eq(d => d.TenantId, linkset.TenantId),
|
||||
Builders<AdvisoryLinksetDocument>.Filter.Eq(d => d.Source, linkset.Source),
|
||||
Builders<AdvisoryLinksetDocument>.Filter.Eq(d => d.AdvisoryId, linkset.AdvisoryId));
|
||||
|
||||
var options = new ReplaceOptions { IsUpsert = true };
|
||||
await _collection.ReplaceOneAsync(filter, document, options, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<CoreLinksets.AdvisoryLinkset>> FindByTenantAsync(
|
||||
string tenantId,
|
||||
IEnumerable<string>? advisoryIds,
|
||||
IEnumerable<string>? sources,
|
||||
CoreLinksets.AdvisoryLinksetCursor? cursor,
|
||||
int limit,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(tenantId);
|
||||
if (limit <= 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(limit));
|
||||
}
|
||||
|
||||
var builder = Builders<AdvisoryLinksetDocument>.Filter;
|
||||
var filters = new List<FilterDefinition<AdvisoryLinksetDocument>>
|
||||
{
|
||||
builder.Eq(d => d.TenantId, tenantId.ToLowerInvariant())
|
||||
};
|
||||
|
||||
if (advisoryIds is not null)
|
||||
{
|
||||
var ids = advisoryIds.Where(v => !string.IsNullOrWhiteSpace(v)).ToArray();
|
||||
if (ids.Length > 0)
|
||||
{
|
||||
filters.Add(builder.In(d => d.AdvisoryId, ids));
|
||||
}
|
||||
}
|
||||
|
||||
if (sources is not null)
|
||||
{
|
||||
var srcs = sources.Where(v => !string.IsNullOrWhiteSpace(v)).ToArray();
|
||||
if (srcs.Length > 0)
|
||||
{
|
||||
filters.Add(builder.In(d => d.Source, srcs));
|
||||
}
|
||||
}
|
||||
|
||||
var filter = builder.And(filters);
|
||||
|
||||
var sort = Builders<AdvisoryLinksetDocument>.Sort.Descending(d => d.CreatedAt).Ascending(d => d.AdvisoryId);
|
||||
var findFilter = filter;
|
||||
|
||||
if (cursor is not null)
|
||||
{
|
||||
var cursorFilter = builder.Or(
|
||||
builder.Lt(d => d.CreatedAt, cursor.CreatedAt.UtcDateTime),
|
||||
builder.And(
|
||||
builder.Eq(d => d.CreatedAt, cursor.CreatedAt.UtcDateTime),
|
||||
builder.Gt(d => d.AdvisoryId, cursor.AdvisoryId)));
|
||||
|
||||
findFilter = builder.And(findFilter, cursorFilter);
|
||||
}
|
||||
|
||||
var documents = await _collection.Find(findFilter)
|
||||
.Sort(sort)
|
||||
.Limit(limit)
|
||||
.ToListAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
return documents.Select(FromDocument).ToArray();
|
||||
}
|
||||
|
||||
private static AdvisoryLinksetDocument MapToDocument(CoreLinksets.AdvisoryLinkset linkset)
|
||||
{
|
||||
var doc = new AdvisoryLinksetDocument
|
||||
{
|
||||
TenantId = linkset.TenantId,
|
||||
Source = linkset.Source,
|
||||
AdvisoryId = linkset.AdvisoryId,
|
||||
Observations = new List<string>(linkset.ObservationIds),
|
||||
CreatedAt = linkset.CreatedAt.UtcDateTime,
|
||||
BuiltByJobId = linkset.BuiltByJobId,
|
||||
Provenance = linkset.Provenance is null ? null : new AdvisoryLinksetProvenanceDocument
|
||||
{
|
||||
ObservationHashes = linkset.Provenance.ObservationHashes is null
|
||||
? null
|
||||
: new List<string>(linkset.Provenance.ObservationHashes),
|
||||
ToolVersion = linkset.Provenance.ToolVersion,
|
||||
PolicyHash = linkset.Provenance.PolicyHash,
|
||||
},
|
||||
Normalized = linkset.Normalized is null ? null : new AdvisoryLinksetNormalizedDocument
|
||||
{
|
||||
Purls = linkset.Normalized.Purls is null ? null : new List<string>(linkset.Normalized.Purls),
|
||||
Versions = linkset.Normalized.Versions is null ? null : new List<string>(linkset.Normalized.Versions),
|
||||
Ranges = linkset.Normalized.RangesToBson(),
|
||||
Severities = linkset.Normalized.SeveritiesToBson(),
|
||||
}
|
||||
};
|
||||
|
||||
return doc;
|
||||
}
|
||||
|
||||
private static CoreLinksets.AdvisoryLinkset FromDocument(AdvisoryLinksetDocument doc)
|
||||
{
|
||||
return new AdvisoryLinkset(
|
||||
doc.TenantId,
|
||||
doc.Source,
|
||||
doc.AdvisoryId,
|
||||
doc.Observations.ToImmutableArray(),
|
||||
doc.Normalized is null ? null : new AdvisoryLinksetNormalized(
|
||||
doc.Normalized.Purls,
|
||||
doc.Normalized.Versions,
|
||||
doc.Normalized.Ranges?.Select(ToDictionary).ToList(),
|
||||
doc.Normalized.Severities?.Select(ToDictionary).ToList()),
|
||||
doc.Provenance is null ? null : new AdvisoryLinksetProvenance(
|
||||
doc.Provenance.ObservationHashes,
|
||||
doc.Provenance.ToolVersion,
|
||||
doc.Provenance.PolicyHash),
|
||||
DateTime.SpecifyKind(doc.CreatedAt, DateTimeKind.Utc),
|
||||
doc.BuiltByJobId);
|
||||
}
|
||||
|
||||
private static Dictionary<string, object?> ToDictionary(MongoDB.Bson.BsonDocument bson)
|
||||
{
|
||||
var dict = new Dictionary<string, object?>(StringComparer.Ordinal);
|
||||
foreach (var element in bson.Elements)
|
||||
{
|
||||
dict[element.Name] = element.Value switch
|
||||
{
|
||||
MongoDB.Bson.BsonString s => s.AsString,
|
||||
MongoDB.Bson.BsonInt32 i => i.AsInt32,
|
||||
MongoDB.Bson.BsonInt64 l => l.AsInt64,
|
||||
MongoDB.Bson.BsonDouble d => d.AsDouble,
|
||||
MongoDB.Bson.BsonDecimal128 dec => dec.ToDecimal(),
|
||||
MongoDB.Bson.BsonBoolean b => b.AsBoolean,
|
||||
MongoDB.Bson.BsonDateTime dt => dt.ToUniversalTime(),
|
||||
MongoDB.Bson.BsonNull => (object?)null,
|
||||
MongoDB.Bson.BsonArray arr => arr.Select(v => v.ToString()).ToArray(),
|
||||
_ => element.Value.ToString()
|
||||
};
|
||||
}
|
||||
return dict;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,242 @@
|
||||
using System.Collections.Generic;
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
|
||||
namespace StellaOps.Concelier.Storage.Mongo.Migrations;
|
||||
|
||||
internal sealed class EnsureLinkNotMergeCollectionsMigration : IMongoMigration
|
||||
{
|
||||
public string Id => "20251116_link_not_merge_collections";
|
||||
|
||||
public string Description => "Ensure advisory_observations and advisory_linksets collections exist with validators and indexes for Link-Not-Merge";
|
||||
|
||||
public async Task ApplyAsync(IMongoDatabase database, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(database);
|
||||
|
||||
await EnsureObservationsAsync(database, cancellationToken).ConfigureAwait(false);
|
||||
await EnsureLinksetsAsync(database, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private static async Task EnsureObservationsAsync(IMongoDatabase database, CancellationToken ct)
|
||||
{
|
||||
var collectionName = MongoStorageDefaults.Collections.AdvisoryObservations;
|
||||
var validator = new BsonDocument("$jsonSchema", BuildObservationSchema());
|
||||
await EnsureCollectionWithValidatorAsync(database, collectionName, validator, ct).ConfigureAwait(false);
|
||||
|
||||
var collection = database.GetCollection<BsonDocument>(collectionName);
|
||||
var indexes = new List<CreateIndexModel<BsonDocument>>
|
||||
{
|
||||
new(new BsonDocument
|
||||
{
|
||||
{"tenant", 1},
|
||||
{"source", 1},
|
||||
{"advisoryId", 1},
|
||||
{"upstream.fetchedAt", -1},
|
||||
},
|
||||
new CreateIndexOptions { Name = "obs_tenant_source_adv_fetchedAt" }),
|
||||
new(new BsonDocument
|
||||
{
|
||||
{"provenance.sourceArtifactSha", 1},
|
||||
},
|
||||
new CreateIndexOptions { Name = "obs_prov_sourceArtifactSha_unique", Unique = true }),
|
||||
};
|
||||
|
||||
await collection.Indexes.CreateManyAsync(indexes, cancellationToken: ct).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private static async Task EnsureLinksetsAsync(IMongoDatabase database, CancellationToken ct)
|
||||
{
|
||||
var collectionName = MongoStorageDefaults.Collections.AdvisoryLinksets;
|
||||
var validator = new BsonDocument("$jsonSchema", BuildLinksetSchema());
|
||||
await EnsureCollectionWithValidatorAsync(database, collectionName, validator, ct).ConfigureAwait(false);
|
||||
|
||||
var collection = database.GetCollection<BsonDocument>(collectionName);
|
||||
var indexes = new List<CreateIndexModel<BsonDocument>>
|
||||
{
|
||||
new(new BsonDocument
|
||||
{
|
||||
{"tenantId", 1},
|
||||
{"advisoryId", 1},
|
||||
{"source", 1},
|
||||
},
|
||||
new CreateIndexOptions { Name = "linkset_tenant_advisory_source", Unique = true }),
|
||||
new(new BsonDocument { { "observations", 1 } }, new CreateIndexOptions { Name = "linkset_observations" })
|
||||
};
|
||||
|
||||
await collection.Indexes.CreateManyAsync(indexes, cancellationToken: ct).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private static async Task EnsureCollectionWithValidatorAsync(
|
||||
IMongoDatabase database,
|
||||
string collectionName,
|
||||
BsonDocument validator,
|
||||
CancellationToken ct)
|
||||
{
|
||||
var filter = new BsonDocument("name", collectionName);
|
||||
var existing = await database.ListCollectionsAsync(new ListCollectionsOptions { Filter = filter }, ct)
|
||||
.ConfigureAwait(false);
|
||||
var exists = await existing.AnyAsync(ct).ConfigureAwait(false);
|
||||
|
||||
if (!exists)
|
||||
{
|
||||
var options = new CreateCollectionOptions<BsonDocument>
|
||||
{
|
||||
Validator = validator,
|
||||
ValidationLevel = DocumentValidationLevel.Moderate,
|
||||
ValidationAction = DocumentValidationAction.Error,
|
||||
};
|
||||
|
||||
await database.CreateCollectionAsync(collectionName, options, ct).ConfigureAwait(false);
|
||||
}
|
||||
else
|
||||
{
|
||||
var command = new BsonDocument
|
||||
{
|
||||
{ "collMod", collectionName },
|
||||
{ "validator", validator },
|
||||
{ "validationLevel", "moderate" },
|
||||
{ "validationAction", "error" },
|
||||
};
|
||||
await database.RunCommandAsync<BsonDocument>(command, cancellationToken: ct).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
|
||||
private static BsonDocument BuildObservationSchema()
|
||||
{
|
||||
return new BsonDocument
|
||||
{
|
||||
{ "bsonType", "object" },
|
||||
{ "required", new BsonArray { "_id", "tenantId", "source", "advisoryId", "affected", "provenance", "ingestedAt" } },
|
||||
{ "properties", new BsonDocument
|
||||
{
|
||||
{ "_id", new BsonDocument("bsonType", "string") },
|
||||
{ "tenantId", new BsonDocument("bsonType", "string") },
|
||||
{ "source", new BsonDocument("bsonType", "string") },
|
||||
{ "advisoryId", new BsonDocument("bsonType", "string") },
|
||||
{ "title", new BsonDocument("bsonType", new BsonArray { "string", "null" }) },
|
||||
{ "summary", new BsonDocument("bsonType", new BsonArray { "string", "null" }) },
|
||||
{ "severities", new BsonDocument
|
||||
{
|
||||
{ "bsonType", "array" },
|
||||
{ "items", new BsonDocument
|
||||
{
|
||||
{ "bsonType", "object" },
|
||||
{ "required", new BsonArray { "system", "score" } },
|
||||
{ "properties", new BsonDocument
|
||||
{
|
||||
{ "system", new BsonDocument("bsonType", "string") },
|
||||
{ "score", new BsonDocument("bsonType", new BsonArray { "double", "int", "long", "decimal" }) },
|
||||
{ "vector", new BsonDocument("bsonType", new BsonArray { "string", "null" }) }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{ "affected", new BsonDocument
|
||||
{
|
||||
{ "bsonType", "array" },
|
||||
{ "items", new BsonDocument
|
||||
{
|
||||
{ "bsonType", "object" },
|
||||
{ "required", new BsonArray { "purl" } },
|
||||
{ "properties", new BsonDocument
|
||||
{
|
||||
{ "purl", new BsonDocument("bsonType", "string") },
|
||||
{ "package", new BsonDocument("bsonType", new BsonArray { "string", "null" }) },
|
||||
{ "versions", new BsonDocument("bsonType", new BsonArray { "array", "null" }) },
|
||||
{ "ranges", new BsonDocument("bsonType", new BsonArray { "array", "null" }) },
|
||||
{ "ecosystem", new BsonDocument("bsonType", new BsonArray { "string", "null" }) },
|
||||
{ "cpe", new BsonDocument("bsonType", new BsonArray { "array", "null" }) },
|
||||
{ "cpes", new BsonDocument("bsonType", new BsonArray { "array", "null" }) }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{ "references", new BsonDocument
|
||||
{
|
||||
{ "bsonType", new BsonArray { "array", "null" } },
|
||||
{ "items", new BsonDocument("bsonType", "string") }
|
||||
}
|
||||
},
|
||||
{ "weaknesses", new BsonDocument
|
||||
{
|
||||
{ "bsonType", new BsonArray { "array", "null" } },
|
||||
{ "items", new BsonDocument("bsonType", "string") }
|
||||
}
|
||||
},
|
||||
{ "published", new BsonDocument("bsonType", new BsonArray { "date", "null" }) },
|
||||
{ "modified", new BsonDocument("bsonType", new BsonArray { "date", "null" }) },
|
||||
{ "provenance", new BsonDocument
|
||||
{
|
||||
{ "bsonType", "object" },
|
||||
{ "required", new BsonArray { "sourceArtifactSha", "fetchedAt" } },
|
||||
{ "properties", new BsonDocument
|
||||
{
|
||||
{ "sourceArtifactSha", new BsonDocument("bsonType", "string") },
|
||||
{ "fetchedAt", new BsonDocument("bsonType", "date") },
|
||||
{ "ingestJobId", new BsonDocument("bsonType", new BsonArray { "string", "null" }) },
|
||||
{ "signature", new BsonDocument("bsonType", new BsonArray { "object", "null" }) }
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{ "ingestedAt", new BsonDocument("bsonType", "date") }
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private static BsonDocument BuildLinksetSchema()
|
||||
{
|
||||
return new BsonDocument
|
||||
{
|
||||
{ "bsonType", "object" },
|
||||
{ "required", new BsonArray { "_id", "tenantId", "source", "advisoryId", "observations", "createdAt" } },
|
||||
{ "properties", new BsonDocument
|
||||
{
|
||||
{ "_id", new BsonDocument("bsonType", "objectId") },
|
||||
{ "tenantId", new BsonDocument("bsonType", "string") },
|
||||
{ "source", new BsonDocument("bsonType", "string") },
|
||||
{ "advisoryId", new BsonDocument("bsonType", "string") },
|
||||
{ "observations", new BsonDocument
|
||||
{
|
||||
{ "bsonType", "array" },
|
||||
{ "items", new BsonDocument("bsonType", "string") }
|
||||
}
|
||||
},
|
||||
{ "normalized", new BsonDocument
|
||||
{
|
||||
{ "bsonType", new BsonArray { "object", "null" } },
|
||||
{ "properties", new BsonDocument
|
||||
{
|
||||
{ "purls", new BsonDocument { { "bsonType", new BsonArray { "array", "null" } }, { "items", new BsonDocument("bsonType", "string") } } },
|
||||
{ "versions", new BsonDocument { { "bsonType", new BsonArray { "array", "null" } }, { "items", new BsonDocument("bsonType", "string") } } },
|
||||
{ "ranges", new BsonDocument { { "bsonType", new BsonArray { "array", "null" } }, { "items", new BsonDocument("bsonType", "object") } } },
|
||||
{ "severities", new BsonDocument { { "bsonType", new BsonArray { "array", "null" } }, { "items", new BsonDocument("bsonType", "object") } } }
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{ "createdAt", new BsonDocument("bsonType", "date") },
|
||||
{ "builtByJobId", new BsonDocument("bsonType", new BsonArray { "string", "null" }) },
|
||||
{ "provenance", new BsonDocument
|
||||
{
|
||||
{ "bsonType", new BsonArray { "object", "null" } },
|
||||
{ "properties", new BsonDocument
|
||||
{
|
||||
{ "observationHashes", new BsonDocument { { "bsonType", new BsonArray { "array", "null" } }, { "items", new BsonDocument("bsonType", "string") } } },
|
||||
{ "toolVersion", new BsonDocument("bsonType", new BsonArray { "string", "null" }) },
|
||||
{ "policyHash", new BsonDocument("bsonType", new BsonArray { "string", "null" }) }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
using System;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using StellaOps.Concelier.Core.Observations;
|
||||
using StellaOps.Concelier.Models.Observations;
|
||||
|
||||
namespace StellaOps.Concelier.Storage.Mongo.Observations;
|
||||
|
||||
internal sealed class AdvisoryObservationSink : IAdvisoryObservationSink
|
||||
{
|
||||
private readonly IAdvisoryObservationStore _store;
|
||||
|
||||
public AdvisoryObservationSink(IAdvisoryObservationStore store)
|
||||
{
|
||||
_store = store ?? throw new ArgumentNullException(nameof(store));
|
||||
}
|
||||
|
||||
public Task UpsertAsync(AdvisoryObservation observation, CancellationToken cancellationToken)
|
||||
{
|
||||
return _store.UpsertAsync(observation, cancellationToken);
|
||||
}
|
||||
}
|
||||
@@ -80,6 +80,13 @@ public static class ServiceCollectionExtensions
|
||||
services.AddSingleton<IAdvisoryEventRepository, MongoAdvisoryEventRepository>();
|
||||
services.AddSingleton<IAdvisoryEventLog, AdvisoryEventLog>();
|
||||
services.AddSingleton<IAdvisoryRawRepository, MongoAdvisoryRawRepository>();
|
||||
services.AddSingleton<StellaOps.Concelier.Storage.Mongo.Linksets.MongoAdvisoryLinksetStore>();
|
||||
services.AddSingleton<StellaOps.Concelier.Core.Linksets.IAdvisoryLinksetStore>(sp =>
|
||||
sp.GetRequiredService<StellaOps.Concelier.Storage.Mongo.Linksets.MongoAdvisoryLinksetStore>());
|
||||
services.AddSingleton<StellaOps.Concelier.Core.Linksets.IAdvisoryLinksetLookup>(sp =>
|
||||
sp.GetRequiredService<StellaOps.Concelier.Storage.Mongo.Linksets.MongoAdvisoryLinksetStore>());
|
||||
services.AddSingleton<StellaOps.Concelier.Core.Linksets.IAdvisoryObservationSink, StellaOps.Concelier.Storage.Mongo.Linksets.AdvisoryObservationSink>();
|
||||
services.AddSingleton<StellaOps.Concelier.Core.Linksets.IAdvisoryLinksetSink, StellaOps.Concelier.Storage.Mongo.Linksets.AdvisoryLinksetSink>();
|
||||
services.AddSingleton<IExportStateStore, ExportStateStore>();
|
||||
services.TryAddSingleton<ExportStateManager>();
|
||||
|
||||
|
||||
@@ -0,0 +1,94 @@
|
||||
using System.Collections.Generic;
|
||||
using System.Collections.Immutable;
|
||||
using StellaOps.Concelier.Core.Linksets;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Concelier.Core.Tests.Linksets;
|
||||
|
||||
public sealed class AdvisoryLinksetQueryServiceTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task QueryAsync_ReturnsPagedResults_WithCursor()
|
||||
{
|
||||
var linksets = new List<AdvisoryLinkset>
|
||||
{
|
||||
new("tenant", "ghsa", "adv-003",
|
||||
ImmutableArray.Create("obs-003"),
|
||||
new AdvisoryLinksetNormalized(new[]{"pkg:npm/a"}, new[]{"1.0.0"}, null, null),
|
||||
null, DateTimeOffset.Parse("2025-11-10T12:00:00Z"), null),
|
||||
new("tenant", "ghsa", "adv-002",
|
||||
ImmutableArray.Create("obs-002"),
|
||||
new AdvisoryLinksetNormalized(new[]{"pkg:npm/b"}, new[]{"2.0.0"}, null, null),
|
||||
null, DateTimeOffset.Parse("2025-11-09T12:00:00Z"), null),
|
||||
new("tenant", "ghsa", "adv-001",
|
||||
ImmutableArray.Create("obs-001"),
|
||||
new AdvisoryLinksetNormalized(new[]{"pkg:npm/c"}, new[]{"3.0.0"}, null, null),
|
||||
null, DateTimeOffset.Parse("2025-11-08T12:00:00Z"), null),
|
||||
};
|
||||
|
||||
var lookup = new FakeLinksetLookup(linksets);
|
||||
var service = new AdvisoryLinksetQueryService(lookup);
|
||||
|
||||
var firstPage = await service.QueryAsync(new AdvisoryLinksetQueryOptions("tenant", limit: 2), CancellationToken.None);
|
||||
|
||||
Assert.Equal(2, firstPage.Linksets.Length);
|
||||
Assert.True(firstPage.HasMore);
|
||||
Assert.False(string.IsNullOrWhiteSpace(firstPage.NextCursor));
|
||||
Assert.Equal("adv-003", firstPage.Linksets[0].AdvisoryId);
|
||||
Assert.Equal("pkg:npm/a", firstPage.Linksets[0].Normalized?.Purls?.First());
|
||||
|
||||
var secondPage = await service.QueryAsync(new AdvisoryLinksetQueryOptions("tenant", limit: 2, Cursor: firstPage.NextCursor), CancellationToken.None);
|
||||
|
||||
Assert.Single(secondPage.Linksets);
|
||||
Assert.False(secondPage.HasMore);
|
||||
Assert.Null(secondPage.NextCursor);
|
||||
Assert.Equal("adv-001", secondPage.Linksets[0].AdvisoryId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task QueryAsync_InvalidCursor_ThrowsFormatException()
|
||||
{
|
||||
var lookup = new FakeLinksetLookup(Array.Empty<AdvisoryLinkset>());
|
||||
var service = new AdvisoryLinksetQueryService(lookup);
|
||||
|
||||
await Assert.ThrowsAsync<FormatException>(async () =>
|
||||
{
|
||||
await service.QueryAsync(new AdvisoryLinksetQueryOptions("tenant", limit: 1, Cursor: "not-base64"), CancellationToken.None);
|
||||
});
|
||||
}
|
||||
|
||||
private sealed class FakeLinksetLookup : IAdvisoryLinksetLookup
|
||||
{
|
||||
private readonly IReadOnlyList<AdvisoryLinkset> _linksets;
|
||||
|
||||
public FakeLinksetLookup(IReadOnlyList<AdvisoryLinkset> linksets)
|
||||
{
|
||||
_linksets = linksets;
|
||||
}
|
||||
|
||||
public Task<IReadOnlyList<AdvisoryLinkset>> FindByTenantAsync(
|
||||
string tenantId,
|
||||
IEnumerable<string>? advisoryIds,
|
||||
IEnumerable<string>? sources,
|
||||
AdvisoryLinksetCursor? cursor,
|
||||
int limit,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
var ordered = _linksets
|
||||
.Where(ls => ls.TenantId == tenantId)
|
||||
.OrderByDescending(ls => ls.CreatedAt)
|
||||
.ThenBy(ls => ls.AdvisoryId, StringComparer.Ordinal)
|
||||
.ToList();
|
||||
|
||||
if (cursor is not null)
|
||||
{
|
||||
ordered = ordered
|
||||
.Where(ls => ls.CreatedAt < cursor.CreatedAt ||
|
||||
(ls.CreatedAt == cursor.CreatedAt && string.Compare(ls.AdvisoryId, cursor.AdvisoryId, StringComparison.Ordinal) > 0))
|
||||
.ToList();
|
||||
}
|
||||
|
||||
return Task.FromResult<IReadOnlyList<AdvisoryLinkset>>(ordered.Take(limit).ToList());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -205,6 +205,104 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime
|
||||
Assert.Equal("tenant-a:nvd:alpha:1", secondObservations[0].GetProperty("observationId").GetString());
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task LinksetsEndpoint_ReturnsNormalizedLinksetsFromIngestion()
|
||||
{
|
||||
var tenant = "tenant-linkset-ingest";
|
||||
using var client = _factory.CreateClient();
|
||||
client.DefaultRequestHeaders.Add("X-Stella-Tenant", tenant);
|
||||
|
||||
var firstIngest = await client.PostAsJsonAsync("/ingest/advisory", BuildAdvisoryIngestRequest("sha256:linkset-1", "GHSA-LINK-001", purls: new[] { "pkg:npm/demo@1.0.0" }));
|
||||
firstIngest.EnsureSuccessStatusCode();
|
||||
|
||||
var secondIngest = await client.PostAsJsonAsync("/ingest/advisory", BuildAdvisoryIngestRequest("sha256:linkset-2", "GHSA-LINK-002", purls: new[] { "pkg:npm/demo@2.0.0" }));
|
||||
secondIngest.EnsureSuccessStatusCode();
|
||||
|
||||
var response = await client.GetAsync("/linksets?tenant=tenant-linkset-ingest&limit=10");
|
||||
response.EnsureSuccessStatusCode();
|
||||
|
||||
var payload = await response.Content.ReadFromJsonAsync<AdvisoryLinksetQueryResponse>();
|
||||
Assert.NotNull(payload);
|
||||
Assert.Equal(2, payload!.Linksets.Length);
|
||||
|
||||
var linksetAdvisoryIds = payload.Linksets.Select(ls => ls.AdvisoryId).OrderBy(id => id, StringComparer.Ordinal).ToArray();
|
||||
Assert.Equal(new[] { "GHSA-LINK-001", "GHSA-LINK-002" }, linksetAdvisoryIds);
|
||||
|
||||
var allPurls = payload.Linksets.SelectMany(ls => ls.Purls).OrderBy(p => p, StringComparer.Ordinal).ToArray();
|
||||
Assert.Contains("pkg:npm/demo@1.0.0", allPurls);
|
||||
Assert.Contains("pkg:npm/demo@2.0.0", allPurls);
|
||||
|
||||
var versions = payload.Linksets
|
||||
.SelectMany(ls => ls.Versions)
|
||||
.Distinct(StringComparer.Ordinal)
|
||||
.OrderBy(v => v, StringComparer.Ordinal)
|
||||
.ToArray();
|
||||
Assert.Contains("1.0.0", versions);
|
||||
Assert.Contains("2.0.0", versions);
|
||||
|
||||
Assert.False(payload.HasMore);
|
||||
Assert.True(string.IsNullOrEmpty(payload.NextCursor));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task LinksetsEndpoint_SupportsCursorPagination()
|
||||
{
|
||||
var tenant = "tenant-linkset-page";
|
||||
var documents = new[]
|
||||
{
|
||||
CreateLinksetDocument(
|
||||
tenant,
|
||||
"nvd",
|
||||
"ADV-002",
|
||||
new[] { "obs-2" },
|
||||
new[] { "pkg:npm/demo@2.0.0" },
|
||||
new[] { "2.0.0" },
|
||||
new DateTime(2025, 1, 6, 0, 0, 0, DateTimeKind.Utc)),
|
||||
CreateLinksetDocument(
|
||||
tenant,
|
||||
"osv",
|
||||
"ADV-001",
|
||||
new[] { "obs-1" },
|
||||
new[] { "pkg:npm/demo@1.0.0" },
|
||||
new[] { "1.0.0" },
|
||||
new DateTime(2025, 1, 5, 0, 0, 0, DateTimeKind.Utc)),
|
||||
CreateLinksetDocument(
|
||||
"tenant-other",
|
||||
"osv",
|
||||
"ADV-999",
|
||||
new[] { "obs-x" },
|
||||
new[] { "pkg:npm/other@1.0.0" },
|
||||
new[] { "1.0.0" },
|
||||
new DateTime(2025, 1, 4, 0, 0, 0, DateTimeKind.Utc))
|
||||
};
|
||||
|
||||
await SeedLinksetDocumentsAsync(documents);
|
||||
|
||||
using var client = _factory.CreateClient();
|
||||
|
||||
var firstResponse = await client.GetAsync($"/linksets?tenant={tenant}&limit=1");
|
||||
firstResponse.EnsureSuccessStatusCode();
|
||||
var firstPayload = await firstResponse.Content.ReadFromJsonAsync<AdvisoryLinksetQueryResponse>();
|
||||
Assert.NotNull(firstPayload);
|
||||
var first = Assert.Single(firstPayload!.Linksets);
|
||||
Assert.Equal("ADV-002", first.AdvisoryId);
|
||||
Assert.Equal(new[] { "pkg:npm/demo@2.0.0" }, first.Purls.ToArray());
|
||||
Assert.Equal(new[] { "2.0.0" }, first.Versions.ToArray());
|
||||
Assert.True(firstPayload.HasMore);
|
||||
Assert.False(string.IsNullOrWhiteSpace(firstPayload.NextCursor));
|
||||
|
||||
var secondResponse = await client.GetAsync($"/linksets?tenant={tenant}&limit=1&cursor={Uri.EscapeDataString(firstPayload.NextCursor!)}");
|
||||
secondResponse.EnsureSuccessStatusCode();
|
||||
var secondPayload = await secondResponse.Content.ReadFromJsonAsync<AdvisoryLinksetQueryResponse>();
|
||||
Assert.NotNull(secondPayload);
|
||||
var second = Assert.Single(secondPayload!.Linksets);
|
||||
Assert.Equal("ADV-001", second.AdvisoryId);
|
||||
Assert.Equal(new[] { "pkg:npm/demo@1.0.0" }, second.Purls.ToArray());
|
||||
Assert.Equal(new[] { "1.0.0" }, second.Versions.ToArray());
|
||||
Assert.False(secondPayload.HasMore);
|
||||
Assert.True(string.IsNullOrEmpty(secondPayload.NextCursor));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ObservationsEndpoint_ReturnsBadRequestWhenTenantMissing()
|
||||
{
|
||||
@@ -1505,6 +1603,52 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime
|
||||
await SeedAdvisoryRawDocumentsAsync(rawDocuments);
|
||||
}
|
||||
|
||||
private async Task SeedLinksetDocumentsAsync(IEnumerable<AdvisoryLinksetDocument> documents)
|
||||
{
|
||||
var client = new MongoClient(_runner.ConnectionString);
|
||||
var database = client.GetDatabase(MongoStorageDefaults.DefaultDatabaseName);
|
||||
var collection = database.GetCollection<AdvisoryLinksetDocument>(MongoStorageDefaults.Collections.AdvisoryLinksets);
|
||||
|
||||
try
|
||||
{
|
||||
await database.DropCollectionAsync(MongoStorageDefaults.Collections.AdvisoryLinksets);
|
||||
}
|
||||
catch (MongoCommandException ex) when (ex.CodeName == "NamespaceNotFound" || ex.Message.Contains("ns not found", StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
// Collection not created yet; safe to ignore.
|
||||
}
|
||||
|
||||
var snapshot = documents?.ToArray() ?? Array.Empty<AdvisoryLinksetDocument>();
|
||||
if (snapshot.Length > 0)
|
||||
{
|
||||
await collection.InsertManyAsync(snapshot);
|
||||
}
|
||||
}
|
||||
|
||||
private static AdvisoryLinksetDocument CreateLinksetDocument(
|
||||
string tenant,
|
||||
string source,
|
||||
string advisoryId,
|
||||
IEnumerable<string> observationIds,
|
||||
IEnumerable<string> purls,
|
||||
IEnumerable<string> versions,
|
||||
DateTime createdAtUtc)
|
||||
{
|
||||
return new AdvisoryLinksetDocument
|
||||
{
|
||||
TenantId = tenant,
|
||||
Source = source,
|
||||
AdvisoryId = advisoryId,
|
||||
Observations = observationIds.ToList(),
|
||||
CreatedAt = DateTime.SpecifyKind(createdAtUtc, DateTimeKind.Utc),
|
||||
Normalized = new AdvisoryLinksetNormalizedDocument
|
||||
{
|
||||
Purls = purls.ToList(),
|
||||
Versions = versions.ToList()
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private static AdvisoryObservationDocument[] BuildSampleObservationDocuments()
|
||||
{
|
||||
return new[]
|
||||
|
||||
@@ -0,0 +1,44 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Text.Json.Serialization;
|
||||
|
||||
namespace StellaOps.Excititor.WebService.Contracts;
|
||||
|
||||
public sealed record VexEvidenceChunkResponse(
|
||||
[property: JsonPropertyName("observationId")] string ObservationId,
|
||||
[property: JsonPropertyName("linksetId")] string LinksetId,
|
||||
[property: JsonPropertyName("vulnerabilityId")] string VulnerabilityId,
|
||||
[property: JsonPropertyName("productKey")] string ProductKey,
|
||||
[property: JsonPropertyName("providerId")] string ProviderId,
|
||||
[property: JsonPropertyName("status")] string Status,
|
||||
[property: JsonPropertyName("justification")] string? Justification,
|
||||
[property: JsonPropertyName("detail")] string? Detail,
|
||||
[property: JsonPropertyName("scopeScore")] double? ScopeScore,
|
||||
[property: JsonPropertyName("firstSeen")] DateTimeOffset FirstSeen,
|
||||
[property: JsonPropertyName("lastSeen")] DateTimeOffset LastSeen,
|
||||
[property: JsonPropertyName("scope")] VexEvidenceChunkScope Scope,
|
||||
[property: JsonPropertyName("document")] VexEvidenceChunkDocument Document,
|
||||
[property: JsonPropertyName("signature")] VexEvidenceChunkSignature? Signature,
|
||||
[property: JsonPropertyName("metadata")] IReadOnlyDictionary<string, string> Metadata);
|
||||
|
||||
public sealed record VexEvidenceChunkScope(
|
||||
[property: JsonPropertyName("key")] string Key,
|
||||
[property: JsonPropertyName("name")] string? Name,
|
||||
[property: JsonPropertyName("version")] string? Version,
|
||||
[property: JsonPropertyName("purl")] string? Purl,
|
||||
[property: JsonPropertyName("cpe")] string? Cpe,
|
||||
[property: JsonPropertyName("componentIdentifiers")] IReadOnlyList<string> ComponentIdentifiers);
|
||||
|
||||
public sealed record VexEvidenceChunkDocument(
|
||||
[property: JsonPropertyName("digest")] string Digest,
|
||||
[property: JsonPropertyName("format")] string Format,
|
||||
[property: JsonPropertyName("sourceUri")] string SourceUri,
|
||||
[property: JsonPropertyName("revision")] string? Revision);
|
||||
|
||||
public sealed record VexEvidenceChunkSignature(
|
||||
[property: JsonPropertyName("type")] string Type,
|
||||
[property: JsonPropertyName("subject")] string? Subject,
|
||||
[property: JsonPropertyName("issuer")] string? Issuer,
|
||||
[property: JsonPropertyName("keyId")] string? KeyId,
|
||||
[property: JsonPropertyName("verifiedAt")] DateTimeOffset? VerifiedAt,
|
||||
[property: JsonPropertyName("transparencyRef")] string? TransparencyRef);
|
||||
@@ -4,6 +4,7 @@ using System.Linq;
|
||||
using System.Collections.Immutable;
|
||||
using System.Globalization;
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using Microsoft.AspNetCore.Authentication;
|
||||
using Microsoft.AspNetCore.Http;
|
||||
using Microsoft.AspNetCore.Mvc;
|
||||
@@ -56,6 +57,7 @@ services.AddVexAttestation();
|
||||
services.Configure<VexAttestationClientOptions>(configuration.GetSection("Excititor:Attestation:Client"));
|
||||
services.Configure<VexAttestationVerificationOptions>(configuration.GetSection("Excititor:Attestation:Verification"));
|
||||
services.AddVexPolicy();
|
||||
services.AddSingleton<IVexEvidenceChunkService, VexEvidenceChunkService>();
|
||||
services.AddRedHatCsafConnector();
|
||||
services.Configure<MirrorDistributionOptions>(configuration.GetSection(MirrorDistributionOptions.SectionName));
|
||||
services.AddSingleton<MirrorRateLimiter>();
|
||||
@@ -515,6 +517,69 @@ app.MapGet("/v1/vex/observations/{vulnerabilityId}/{productKey}", async (
|
||||
return Results.Json(response);
|
||||
});
|
||||
|
||||
app.MapGet("/v1/vex/evidence/chunks", async (
|
||||
HttpContext context,
|
||||
[FromServices] IVexEvidenceChunkService chunkService,
|
||||
[FromServices] IOptions<VexMongoStorageOptions> storageOptions,
|
||||
CancellationToken cancellationToken) =>
|
||||
{
|
||||
var scopeResult = ScopeAuthorization.RequireScope(context, "vex.read");
|
||||
if (scopeResult is not null)
|
||||
{
|
||||
return scopeResult;
|
||||
}
|
||||
|
||||
if (!TryResolveTenant(context, storageOptions.Value, requireHeader: false, out var tenant, out var tenantError))
|
||||
{
|
||||
return tenantError;
|
||||
}
|
||||
|
||||
var vulnerabilityId = context.Request.Query["vulnerabilityId"].FirstOrDefault();
|
||||
var productKey = context.Request.Query["productKey"].FirstOrDefault();
|
||||
if (string.IsNullOrWhiteSpace(vulnerabilityId) || string.IsNullOrWhiteSpace(productKey))
|
||||
{
|
||||
return ValidationProblem("vulnerabilityId and productKey are required.");
|
||||
}
|
||||
|
||||
var providerFilter = BuildStringFilterSet(context.Request.Query["providerId"]);
|
||||
var statusFilter = BuildStatusFilter(context.Request.Query["status"]);
|
||||
var since = ParseSinceTimestamp(context.Request.Query["since"]);
|
||||
var limit = ResolveLimit(context.Request.Query["limit"], defaultValue: 200, min: 1, max: 500);
|
||||
|
||||
var request = new VexEvidenceChunkRequest(
|
||||
tenant,
|
||||
vulnerabilityId.Trim(),
|
||||
productKey.Trim(),
|
||||
providerFilter,
|
||||
statusFilter,
|
||||
since,
|
||||
limit);
|
||||
|
||||
VexEvidenceChunkResult result;
|
||||
try
|
||||
{
|
||||
result = await chunkService.QueryAsync(request, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
catch (OperationCanceledException)
|
||||
{
|
||||
return Results.StatusCode(StatusCodes.Status499ClientClosedRequest);
|
||||
}
|
||||
|
||||
context.Response.Headers["X-Total-Count"] = result.TotalCount.ToString(CultureInfo.InvariantCulture);
|
||||
context.Response.Headers["X-Truncated"] = result.Truncated ? "true" : "false";
|
||||
context.Response.ContentType = "application/x-ndjson";
|
||||
|
||||
var options = new JsonSerializerOptions(JsonSerializerDefaults.Web);
|
||||
foreach (var chunk in result.Chunks)
|
||||
{
|
||||
var line = JsonSerializer.Serialize(chunk, options);
|
||||
await context.Response.WriteAsync(line, cancellationToken).ConfigureAwait(false);
|
||||
await context.Response.WriteAsync("\n", cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
return Results.Empty;
|
||||
});
|
||||
|
||||
app.MapPost("/aoc/verify", async (
|
||||
HttpContext context,
|
||||
VexAocVerifyRequest? request,
|
||||
|
||||
@@ -0,0 +1,130 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Collections.Immutable;
|
||||
using System.Globalization;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using StellaOps.Excititor.Core;
|
||||
using StellaOps.Excititor.Storage.Mongo;
|
||||
using StellaOps.Excititor.WebService.Contracts;
|
||||
|
||||
namespace StellaOps.Excititor.WebService.Services;
|
||||
|
||||
internal interface IVexEvidenceChunkService
|
||||
{
|
||||
Task<VexEvidenceChunkResult> QueryAsync(VexEvidenceChunkRequest request, CancellationToken cancellationToken);
|
||||
}
|
||||
|
||||
internal sealed record VexEvidenceChunkRequest(
|
||||
string Tenant,
|
||||
string VulnerabilityId,
|
||||
string ProductKey,
|
||||
ImmutableHashSet<string> ProviderIds,
|
||||
ImmutableHashSet<VexClaimStatus> Statuses,
|
||||
DateTimeOffset? Since,
|
||||
int Limit);
|
||||
|
||||
internal sealed record VexEvidenceChunkResult(
|
||||
IReadOnlyList<VexEvidenceChunkResponse> Chunks,
|
||||
bool Truncated,
|
||||
int TotalCount,
|
||||
DateTimeOffset GeneratedAtUtc);
|
||||
|
||||
internal sealed class VexEvidenceChunkService : IVexEvidenceChunkService
|
||||
{
|
||||
private readonly IVexClaimStore _claimStore;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
|
||||
public VexEvidenceChunkService(IVexClaimStore claimStore, TimeProvider timeProvider)
|
||||
{
|
||||
_claimStore = claimStore ?? throw new ArgumentNullException(nameof(claimStore));
|
||||
_timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
|
||||
}
|
||||
|
||||
public async Task<VexEvidenceChunkResult> QueryAsync(VexEvidenceChunkRequest request, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(request);
|
||||
|
||||
var claims = await _claimStore
|
||||
.FindAsync(request.VulnerabilityId, request.ProductKey, request.Since, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
var filtered = claims
|
||||
.Where(claim => MatchesProvider(claim, request.ProviderIds))
|
||||
.Where(claim => MatchesStatus(claim, request.Statuses))
|
||||
.OrderByDescending(claim => claim.LastSeen)
|
||||
.ToList();
|
||||
|
||||
var total = filtered.Count;
|
||||
if (filtered.Count > request.Limit)
|
||||
{
|
||||
filtered = filtered.Take(request.Limit).ToList();
|
||||
}
|
||||
|
||||
var chunks = filtered
|
||||
.Select(MapChunk)
|
||||
.ToList();
|
||||
|
||||
return new VexEvidenceChunkResult(
|
||||
chunks,
|
||||
total > request.Limit,
|
||||
total,
|
||||
_timeProvider.GetUtcNow());
|
||||
}
|
||||
|
||||
private static bool MatchesProvider(VexClaim claim, ImmutableHashSet<string> providers)
|
||||
=> providers.Count == 0 || providers.Contains(claim.ProviderId, StringComparer.OrdinalIgnoreCase);
|
||||
|
||||
private static bool MatchesStatus(VexClaim claim, ImmutableHashSet<VexClaimStatus> statuses)
|
||||
=> statuses.Count == 0 || statuses.Contains(claim.Status);
|
||||
|
||||
private static VexEvidenceChunkResponse MapChunk(VexClaim claim)
|
||||
{
|
||||
var observationId = string.Create(CultureInfo.InvariantCulture, $"{claim.ProviderId}:{claim.Document.Digest}");
|
||||
var linksetId = string.Create(CultureInfo.InvariantCulture, $"{claim.VulnerabilityId}:{claim.Product.Key}");
|
||||
|
||||
var scope = new VexEvidenceChunkScope(
|
||||
claim.Product.Key,
|
||||
claim.Product.Name,
|
||||
claim.Product.Version,
|
||||
claim.Product.Purl,
|
||||
claim.Product.Cpe,
|
||||
claim.Product.ComponentIdentifiers);
|
||||
|
||||
var document = new VexEvidenceChunkDocument(
|
||||
claim.Document.Digest,
|
||||
claim.Document.Format.ToString().ToLowerInvariant(),
|
||||
claim.Document.SourceUri.ToString(),
|
||||
claim.Document.Revision);
|
||||
|
||||
var signature = claim.Document.Signature is null
|
||||
? null
|
||||
: new VexEvidenceChunkSignature(
|
||||
claim.Document.Signature.Type,
|
||||
claim.Document.Signature.Subject,
|
||||
claim.Document.Signature.Issuer,
|
||||
claim.Document.Signature.KeyId,
|
||||
claim.Document.Signature.VerifiedAt,
|
||||
claim.Document.Signature.TransparencyLogReference);
|
||||
|
||||
var scopeScore = claim.Confidence?.Score ?? claim.Signals?.Severity?.Score;
|
||||
|
||||
return new VexEvidenceChunkResponse(
|
||||
observationId,
|
||||
linksetId,
|
||||
claim.VulnerabilityId,
|
||||
claim.Product.Key,
|
||||
claim.ProviderId,
|
||||
claim.Status.ToString(),
|
||||
claim.Justification?.ToString(),
|
||||
claim.Detail,
|
||||
scopeScore,
|
||||
claim.FirstSeen,
|
||||
claim.LastSeen,
|
||||
scope,
|
||||
document,
|
||||
signature,
|
||||
claim.AdditionalMetadata);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
[assembly: InternalsVisibleTo("StellaOps.Excititor.Attestation.Tests")]
|
||||
@@ -0,0 +1,76 @@
|
||||
using System;
|
||||
using System.Collections.Immutable;
|
||||
|
||||
namespace StellaOps.Excititor.Core.Observations;
|
||||
|
||||
/// <summary>
|
||||
/// Immutable timeline event emitted for ingest/linkset changes with deterministic field ordering.
|
||||
/// </summary>
|
||||
public sealed record TimelineEvent
|
||||
{
|
||||
public TimelineEvent(
|
||||
string eventId,
|
||||
string tenant,
|
||||
string providerId,
|
||||
string streamId,
|
||||
string eventType,
|
||||
string traceId,
|
||||
string justificationSummary,
|
||||
DateTimeOffset createdAt,
|
||||
string? evidenceHash = null,
|
||||
string? payloadHash = null,
|
||||
ImmutableDictionary<string, string>? attributes = null)
|
||||
{
|
||||
EventId = Ensure(eventId, nameof(eventId));
|
||||
Tenant = Ensure(tenant, nameof(tenant)).ToLowerInvariant();
|
||||
ProviderId = Ensure(providerId, nameof(providerId));
|
||||
StreamId = Ensure(streamId, nameof(streamId));
|
||||
EventType = Ensure(eventType, nameof(eventType));
|
||||
TraceId = Ensure(traceId, nameof(traceId));
|
||||
JustificationSummary = justificationSummary?.Trim() ?? string.Empty;
|
||||
EvidenceHash = evidenceHash?.Trim();
|
||||
PayloadHash = payloadHash?.Trim();
|
||||
CreatedAt = createdAt;
|
||||
Attributes = Normalize(attributes);
|
||||
}
|
||||
|
||||
public string EventId { get; }
|
||||
public string Tenant { get; }
|
||||
public string ProviderId { get; }
|
||||
public string StreamId { get; }
|
||||
public string EventType { get; }
|
||||
public string TraceId { get; }
|
||||
public string JustificationSummary { get; }
|
||||
public string? EvidenceHash { get; }
|
||||
public string? PayloadHash { get; }
|
||||
public DateTimeOffset CreatedAt { get; }
|
||||
public ImmutableDictionary<string, string> Attributes { get; }
|
||||
|
||||
private static string Ensure(string value, string name)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(value))
|
||||
{
|
||||
throw new ArgumentException($"{name} cannot be null or whitespace", name);
|
||||
}
|
||||
return value.Trim();
|
||||
}
|
||||
|
||||
private static ImmutableDictionary<string, string> Normalize(ImmutableDictionary<string, string>? attributes)
|
||||
{
|
||||
if (attributes is null || attributes.Count == 0)
|
||||
{
|
||||
return ImmutableDictionary<string, string>.Empty;
|
||||
}
|
||||
|
||||
var builder = ImmutableDictionary.CreateBuilder<string, string>(StringComparer.Ordinal);
|
||||
foreach (var kv in attributes)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(kv.Key) || kv.Value is null)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
builder[kv.Key.Trim()] = kv.Value;
|
||||
}
|
||||
return builder.ToImmutable();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,99 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Collections.Immutable;
|
||||
using System.Linq;
|
||||
|
||||
namespace StellaOps.Excititor.Core;
|
||||
|
||||
/// <summary>
|
||||
/// Aggregation-only attestation payload describing evidence supplier identity and the observation/linkset it covers.
|
||||
/// Used by Advisory AI / Policy to chain trust without Excititor interpreting verdicts.
|
||||
/// </summary>
|
||||
public sealed record VexAttestationPayload
|
||||
{
|
||||
public VexAttestationPayload(
|
||||
string attestationId,
|
||||
string supplierId,
|
||||
string observationId,
|
||||
string linksetId,
|
||||
string vulnerabilityId,
|
||||
string productKey,
|
||||
string? justificationSummary,
|
||||
DateTimeOffset issuedAt,
|
||||
ImmutableDictionary<string, string>? metadata = null)
|
||||
{
|
||||
AttestationId = EnsureNotNullOrWhiteSpace(attestationId, nameof(attestationId));
|
||||
SupplierId = EnsureNotNullOrWhiteSpace(supplierId, nameof(supplierId));
|
||||
ObservationId = EnsureNotNullOrWhiteSpace(observationId, nameof(observationId));
|
||||
LinksetId = EnsureNotNullOrWhiteSpace(linksetId, nameof(linksetId));
|
||||
VulnerabilityId = EnsureNotNullOrWhiteSpace(vulnerabilityId, nameof(vulnerabilityId));
|
||||
ProductKey = EnsureNotNullOrWhiteSpace(productKey, nameof(productKey));
|
||||
JustificationSummary = TrimToNull(justificationSummary);
|
||||
IssuedAt = issuedAt.ToUniversalTime();
|
||||
Metadata = NormalizeMetadata(metadata);
|
||||
}
|
||||
|
||||
public string AttestationId { get; }
|
||||
public string SupplierId { get; }
|
||||
public string ObservationId { get; }
|
||||
public string LinksetId { get; }
|
||||
public string VulnerabilityId { get; }
|
||||
public string ProductKey { get; }
|
||||
public string? JustificationSummary { get; }
|
||||
public DateTimeOffset IssuedAt { get; }
|
||||
public ImmutableDictionary<string, string> Metadata { get; }
|
||||
|
||||
private static ImmutableDictionary<string, string> NormalizeMetadata(ImmutableDictionary<string, string>? metadata)
|
||||
{
|
||||
if (metadata is null || metadata.Count == 0)
|
||||
{
|
||||
return ImmutableDictionary<string, string>.Empty;
|
||||
}
|
||||
|
||||
var builder = ImmutableDictionary.CreateBuilder<string, string>(StringComparer.Ordinal);
|
||||
foreach (var pair in metadata.OrderBy(kv => kv.Key, StringComparer.Ordinal))
|
||||
{
|
||||
var key = TrimToNull(pair.Key);
|
||||
var value = TrimToNull(pair.Value);
|
||||
if (key is null || value is null)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
builder[key] = value;
|
||||
}
|
||||
|
||||
return builder.ToImmutable();
|
||||
}
|
||||
|
||||
private static string EnsureNotNullOrWhiteSpace(string value, string name)
|
||||
=> string.IsNullOrWhiteSpace(value) ? throw new ArgumentException($"{name} must be provided.", name) : value.Trim();
|
||||
|
||||
private static string? TrimToNull(string? value)
|
||||
=> string.IsNullOrWhiteSpace(value) ? null : value.Trim();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Lightweight mapping from attestation IDs back to the observation/linkset/product tuple for provenance tracing.
|
||||
/// </summary>
|
||||
public sealed record VexAttestationLink
|
||||
{
|
||||
public VexAttestationLink(string attestationId, string observationId, string linksetId, string productKey)
|
||||
{
|
||||
AttestationId = EnsureNotNullOrWhiteSpace(attestationId, nameof(attestationId));
|
||||
ObservationId = EnsureNotNullOrWhiteSpace(observationId, nameof(observationId));
|
||||
LinksetId = EnsureNotNullOrWhiteSpace(linksetId, nameof(linksetId));
|
||||
ProductKey = EnsureNotNullOrWhiteSpace(productKey, nameof(productKey));
|
||||
}
|
||||
|
||||
public string AttestationId { get; }
|
||||
|
||||
public string ObservationId { get; }
|
||||
|
||||
public string LinksetId { get; }
|
||||
|
||||
public string ProductKey { get; }
|
||||
|
||||
private static string EnsureNotNullOrWhiteSpace(string value, string name)
|
||||
=> string.IsNullOrWhiteSpace(value) ? throw new ArgumentException($"{name} must be provided.", name) : value.Trim();
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using StellaOps.Excititor.Core;
|
||||
|
||||
namespace StellaOps.Excititor.Storage.Mongo;
|
||||
|
||||
public interface IVexAttestationLinkStore
|
||||
{
|
||||
ValueTask UpsertAsync(VexAttestationPayload payload, CancellationToken cancellationToken);
|
||||
|
||||
ValueTask<VexAttestationPayload?> FindAsync(string attestationId, CancellationToken cancellationToken);
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.Excititor.Core;
|
||||
|
||||
namespace StellaOps.Excititor.Storage.Mongo;
|
||||
|
||||
public sealed class MongoVexAttestationLinkStore : IVexAttestationLinkStore
|
||||
{
|
||||
private readonly IMongoCollection<VexAttestationLinkRecord> _collection;
|
||||
|
||||
public MongoVexAttestationLinkStore(IMongoDatabase database)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(database);
|
||||
VexMongoMappingRegistry.Register();
|
||||
_collection = database.GetCollection<VexAttestationLinkRecord>(VexMongoCollectionNames.Attestations);
|
||||
}
|
||||
|
||||
public async ValueTask UpsertAsync(VexAttestationPayload payload, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(payload);
|
||||
|
||||
var record = VexAttestationLinkRecord.FromDomain(payload);
|
||||
var filter = Builders<VexAttestationLinkRecord>.Filter.Eq(x => x.AttestationId, record.AttestationId);
|
||||
var options = new ReplaceOptions { IsUpsert = true };
|
||||
|
||||
await _collection.ReplaceOneAsync(filter, record, options, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async ValueTask<VexAttestationPayload?> FindAsync(string attestationId, CancellationToken cancellationToken)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(attestationId))
|
||||
{
|
||||
throw new ArgumentException("Attestation id must be provided.", nameof(attestationId));
|
||||
}
|
||||
|
||||
var filter = Builders<VexAttestationLinkRecord>.Filter.Eq(x => x.AttestationId, attestationId.Trim());
|
||||
var record = await _collection.Find(filter).FirstOrDefaultAsync(cancellationToken).ConfigureAwait(false);
|
||||
return record?.ToDomain();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Collections.Immutable;
|
||||
using MongoDB.Bson.Serialization.Attributes;
|
||||
using StellaOps.Excititor.Core;
|
||||
|
||||
namespace StellaOps.Excititor.Storage.Mongo;
|
||||
|
||||
[BsonIgnoreExtraElements]
|
||||
internal sealed class VexAttestationLinkRecord
|
||||
{
|
||||
[BsonId]
|
||||
public string AttestationId { get; set; } = default!;
|
||||
|
||||
public string SupplierId { get; set; } = default!;
|
||||
|
||||
public string ObservationId { get; set; } = default!;
|
||||
|
||||
public string LinksetId { get; set; } = default!;
|
||||
|
||||
public string VulnerabilityId { get; set; } = default!;
|
||||
|
||||
public string ProductKey { get; set; } = default!;
|
||||
|
||||
public string? JustificationSummary { get; set; }
|
||||
= null;
|
||||
|
||||
public DateTime IssuedAt { get; set; }
|
||||
= DateTime.SpecifyKind(DateTime.UtcNow, DateTimeKind.Utc);
|
||||
|
||||
public Dictionary<string, string> Metadata { get; set; } = new(StringComparer.Ordinal);
|
||||
|
||||
public static VexAttestationLinkRecord FromDomain(VexAttestationPayload payload)
|
||||
=> new()
|
||||
{
|
||||
AttestationId = payload.AttestationId,
|
||||
SupplierId = payload.SupplierId,
|
||||
ObservationId = payload.ObservationId,
|
||||
LinksetId = payload.LinksetId,
|
||||
VulnerabilityId = payload.VulnerabilityId,
|
||||
ProductKey = payload.ProductKey,
|
||||
JustificationSummary = payload.JustificationSummary,
|
||||
IssuedAt = payload.IssuedAt.UtcDateTime,
|
||||
Metadata = payload.Metadata.ToDictionary(kv => kv.Key, kv => kv.Value, StringComparer.Ordinal),
|
||||
};
|
||||
|
||||
public VexAttestationPayload ToDomain()
|
||||
{
|
||||
var metadata = (Metadata ?? new Dictionary<string, string>(StringComparer.Ordinal))
|
||||
.ToImmutableDictionary(StringComparer.Ordinal);
|
||||
|
||||
return new VexAttestationPayload(
|
||||
AttestationId,
|
||||
SupplierId,
|
||||
ObservationId,
|
||||
LinksetId,
|
||||
VulnerabilityId,
|
||||
ProductKey,
|
||||
JustificationSummary,
|
||||
new DateTimeOffset(DateTime.SpecifyKind(IssuedAt, DateTimeKind.Utc)),
|
||||
metadata);
|
||||
}
|
||||
}
|
||||
@@ -76,4 +76,5 @@ public static class VexMongoCollectionNames
|
||||
public const string Cache = "vex.cache";
|
||||
public const string ConnectorState = "vex.connector_state";
|
||||
public const string ConsensusHolds = "vex.consensus_holds";
|
||||
public const string Attestations = "vex.attestations";
|
||||
}
|
||||
|
||||
@@ -0,0 +1,41 @@
|
||||
using System;
|
||||
using System.Collections.Immutable;
|
||||
using FluentAssertions;
|
||||
using StellaOps.Excititor.Core.Observations;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Excititor.Core.Tests.Observations;
|
||||
|
||||
public class TimelineEventTests
|
||||
{
|
||||
[Fact]
|
||||
public void Normalizes_and_requires_fields()
|
||||
{
|
||||
var evt = new TimelineEvent(
|
||||
eventId: " EVT-1 ",
|
||||
tenant: "TenantA",
|
||||
providerId: "prov",
|
||||
streamId: "stream",
|
||||
eventType: "ingest",
|
||||
traceId: "trace-123",
|
||||
justificationSummary: " summary ",
|
||||
createdAt: DateTimeOffset.UnixEpoch,
|
||||
evidenceHash: " evhash ",
|
||||
payloadHash: " pwhash ",
|
||||
attributes: ImmutableDictionary<string, string>.Empty.Add(" a ", " b " ));
|
||||
|
||||
evt.EventId.Should().Be("EVT-1");
|
||||
evt.Tenant.Should().Be("tenanta");
|
||||
evt.JustificationSummary.Should().Be("summary");
|
||||
evt.EvidenceHash.Should().Be("evhash");
|
||||
evt.PayloadHash.Should().Be("pwhash");
|
||||
evt.Attributes.Should().ContainKey("a");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Throws_on_missing_required()
|
||||
{
|
||||
Action act = () => new TimelineEvent(" ", "t", "p", "s", "t", "trace", "", DateTimeOffset.UtcNow);
|
||||
act.Should().Throw<ArgumentException>();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
using System;
|
||||
using System.Collections.Immutable;
|
||||
using FluentAssertions;
|
||||
using StellaOps.Excititor.Core;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Excititor.Core.Tests;
|
||||
|
||||
public sealed class VexAttestationPayloadTests
|
||||
{
|
||||
[Fact]
|
||||
public void Payload_NormalizesAndOrdersMetadata()
|
||||
{
|
||||
var metadata = ImmutableDictionary<string, string>.Empty
|
||||
.Add(b,
|
||||
@@ -0,0 +1,86 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Net.Http.Headers;
|
||||
using System.Net.Http.Json;
|
||||
using EphemeralMongo;
|
||||
using Microsoft.AspNetCore.Mvc.Testing;
|
||||
using Microsoft.Extensions.Configuration;
|
||||
using StellaOps.Excititor.Core;
|
||||
using StellaOps.Excititor.Storage.Mongo;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Excititor.WebService.Tests;
|
||||
|
||||
public sealed class VexAttestationLinkEndpointTests : IDisposable
|
||||
{
|
||||
private readonly IMongoRunner _runner;
|
||||
private readonly TestWebApplicationFactory _factory;
|
||||
|
||||
public VexAttestationLinkEndpointTests()
|
||||
{
|
||||
_runner = MongoRunner.Run(new MongoRunnerOptions { UseSingleNodeReplicaSet = true });
|
||||
|
||||
_factory = new TestWebApplicationFactory(
|
||||
configureConfiguration: configuration =>
|
||||
{
|
||||
configuration.AddInMemoryCollection(new Dictionary<string, string?>
|
||||
{
|
||||
[Excititor:Storage:Mongo:ConnectionString] = _runner.ConnectionString,
|
||||
[Excititor:Storage:Mongo:DatabaseName] = vex_attestation_links,
|
||||
[Excititor:Storage:Mongo:DefaultTenant] = tests,
|
||||
});
|
||||
},
|
||||
configureServices: services =>
|
||||
{
|
||||
TestServiceOverrides.Apply(services);
|
||||
services.AddTestAuthentication();
|
||||
});
|
||||
|
||||
SeedLink();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetAttestationLink_ReturnsPayload()
|
||||
{
|
||||
using var client = _factory.CreateClient(new WebApplicationFactoryClientOptions { AllowAutoRedirect = false });
|
||||
client.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue(Bearer, vex.read);
|
||||
|
||||
var response = await client.GetAsync(/v1/vex/attestations/att-123);
|
||||
response.EnsureSuccessStatusCode();
|
||||
|
||||
var payload = await response.Content.ReadFromJsonAsync<VexAttestationPayload>();
|
||||
Assert.NotNull(payload);
|
||||
Assert.Equal(att-123, payload!.AttestationId);
|
||||
Assert.Equal(supplier-a, payload.SupplierId);
|
||||
Assert.Equal(CVE-2025-0001, payload.VulnerabilityId);
|
||||
Assert.Equal(pkg:demo, payload.ProductKey);
|
||||
}
|
||||
|
||||
private void SeedLink()
|
||||
{
|
||||
var client = new MongoDB.Driver.MongoClient(_runner.ConnectionString);
|
||||
var database = client.GetDatabase(vex_attestation_links);
|
||||
var collection = database.GetCollection<VexAttestationLinkRecord>(VexMongoCollectionNames.Attestations);
|
||||
|
||||
var record = new VexAttestationLinkRecord
|
||||
{
|
||||
AttestationId = att-123,
|
||||
SupplierId = supplier-a,
|
||||
ObservationId = obs-1,
|
||||
LinksetId = link-1,
|
||||
VulnerabilityId = CVE-2025-0001,
|
||||
ProductKey = pkg:demo,
|
||||
JustificationSummary = summary,
|
||||
IssuedAt = DateTime.UtcNow,
|
||||
Metadata = new Dictionary<string, string> { [policyRevisionId] = rev-1 },
|
||||
};
|
||||
|
||||
collection.InsertOne(record);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
_factory.Dispose();
|
||||
_runner.Dispose();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,117 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Collections.Immutable;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using FluentAssertions;
|
||||
using StellaOps.Excititor.Core;
|
||||
using StellaOps.Excititor.Storage.Mongo;
|
||||
using StellaOps.Excititor.WebService.Services;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Excititor.WebService.Tests;
|
||||
|
||||
public sealed class VexEvidenceChunkServiceTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task QueryAsync_FiltersAndLimitsResults()
|
||||
{
|
||||
var now = new DateTimeOffset(2025, 11, 16, 12, 0, 0, TimeSpan.Zero);
|
||||
var claims = new[]
|
||||
{
|
||||
CreateClaim("provider-a", VexClaimStatus.Affected, now.AddHours(-6), now.AddHours(-5), score: 0.9),
|
||||
CreateClaim("provider-b", VexClaimStatus.NotAffected, now.AddHours(-4), now.AddHours(-3), score: 0.2)
|
||||
};
|
||||
|
||||
var service = new VexEvidenceChunkService(new FakeClaimStore(claims), new FixedTimeProvider(now));
|
||||
var request = new VexEvidenceChunkRequest(
|
||||
Tenant: "tenant-a",
|
||||
VulnerabilityId: "CVE-2025-0001",
|
||||
ProductKey: "pkg:docker/demo",
|
||||
ProviderIds: ImmutableHashSet.Create("provider-b"),
|
||||
Statuses: ImmutableHashSet.Create(VexClaimStatus.NotAffected),
|
||||
Since: null,
|
||||
Limit: 1);
|
||||
|
||||
var result = await service.QueryAsync(request, CancellationToken.None);
|
||||
|
||||
result.Truncated.Should().BeTrue();
|
||||
result.TotalCount.Should().Be(1);
|
||||
result.GeneratedAtUtc.Should().Be(now);
|
||||
var chunk = result.Chunks.Single();
|
||||
chunk.ProviderId.Should().Be("provider-b");
|
||||
chunk.Status.Should().Be(VexClaimStatus.NotAffected.ToString());
|
||||
chunk.ScopeScore.Should().Be(0.2);
|
||||
chunk.ObservationId.Should().Contain("provider-b");
|
||||
chunk.Document.Digest.Should().NotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
private static VexClaim CreateClaim(string providerId, VexClaimStatus status, DateTimeOffset firstSeen, DateTimeOffset lastSeen, double? score)
|
||||
{
|
||||
var product = new VexProduct("pkg:docker/demo", "demo", "1.0.0", "pkg:docker/demo:1.0.0", null, new[] { "component-a" });
|
||||
var document = new VexClaimDocument(
|
||||
VexDocumentFormat.SbomCycloneDx,
|
||||
digest: Guid.NewGuid().ToString("N"),
|
||||
sourceUri: new Uri("https://example.test/vex.json"),
|
||||
revision: "r1",
|
||||
signature: new VexSignatureMetadata("cosign", "demo", "issuer", keyId: "kid", verifiedAt: firstSeen, transparencyLogReference: null));
|
||||
|
||||
var signals = score.HasValue
|
||||
? new VexSignalSnapshot(new VexSeveritySignal("cvss", score, "low", vector: null), Kev: null, Epss: null)
|
||||
: null;
|
||||
|
||||
return new VexClaim(
|
||||
"CVE-2025-0001",
|
||||
providerId,
|
||||
product,
|
||||
status,
|
||||
document,
|
||||
firstSeen,
|
||||
lastSeen,
|
||||
justification: VexJustification.ComponentNotPresent,
|
||||
detail: "demo detail",
|
||||
confidence: null,
|
||||
signals: signals,
|
||||
additionalMetadata: ImmutableDictionary<string, string>.Empty);
|
||||
}
|
||||
|
||||
private sealed class FakeClaimStore : IVexClaimStore
|
||||
{
|
||||
private readonly IReadOnlyCollection<VexClaim> _claims;
|
||||
|
||||
public FakeClaimStore(IReadOnlyCollection<VexClaim> claims)
|
||||
{
|
||||
_claims = claims;
|
||||
}
|
||||
|
||||
public ValueTask AppendAsync(IEnumerable<VexClaim> claims, DateTimeOffset observedAt, CancellationToken cancellationToken, MongoDB.Driver.IClientSessionHandle? session = null)
|
||||
=> throw new NotSupportedException();
|
||||
|
||||
public ValueTask<IReadOnlyCollection<VexClaim>> FindAsync(string vulnerabilityId, string productKey, DateTimeOffset? since, CancellationToken cancellationToken, MongoDB.Driver.IClientSessionHandle? session = null)
|
||||
{
|
||||
var query = _claims
|
||||
.Where(claim => claim.VulnerabilityId == vulnerabilityId)
|
||||
.Where(claim => claim.Product.Key == productKey);
|
||||
|
||||
if (since.HasValue)
|
||||
{
|
||||
query = query.Where(claim => claim.LastSeen >= since.Value);
|
||||
}
|
||||
|
||||
return ValueTask.FromResult<IReadOnlyCollection<VexClaim>>(query.ToList());
|
||||
}
|
||||
}
|
||||
|
||||
private sealed class FixedTimeProvider : TimeProvider
|
||||
{
|
||||
private readonly DateTimeOffset _timestamp;
|
||||
|
||||
public FixedTimeProvider(DateTimeOffset timestamp)
|
||||
{
|
||||
_timestamp = timestamp;
|
||||
}
|
||||
|
||||
public override DateTimeOffset GetUtcNow() => _timestamp;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,128 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Net.Http.Headers;
|
||||
using System.Text.Json;
|
||||
using System.Threading.Tasks;
|
||||
using EphemeralMongo;
|
||||
using Microsoft.AspNetCore.Mvc.Testing;
|
||||
using Microsoft.Extensions.Configuration;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.Excititor.Core;
|
||||
using StellaOps.Excititor.Storage.Mongo;
|
||||
using StellaOps.Excititor.WebService.Contracts;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Excititor.WebService.Tests;
|
||||
|
||||
public sealed class VexEvidenceChunksEndpointTests : IDisposable
|
||||
{
|
||||
private readonly IMongoRunner _runner;
|
||||
private readonly TestWebApplicationFactory _factory;
|
||||
|
||||
public VexEvidenceChunksEndpointTests()
|
||||
{
|
||||
_runner = MongoRunner.Run(new MongoRunnerOptions { UseSingleNodeReplicaSet = true });
|
||||
|
||||
_factory = new TestWebApplicationFactory(
|
||||
configureConfiguration: configuration =>
|
||||
{
|
||||
configuration.AddInMemoryCollection(new Dictionary<string, string?>
|
||||
{
|
||||
["Excititor:Storage:Mongo:ConnectionString"] = _runner.ConnectionString,
|
||||
["Excititor:Storage:Mongo:DatabaseName"] = "vex_chunks_tests",
|
||||
["Excititor:Storage:Mongo:DefaultTenant"] = "tests",
|
||||
});
|
||||
},
|
||||
configureServices: services =>
|
||||
{
|
||||
TestServiceOverrides.Apply(services);
|
||||
services.AddTestAuthentication();
|
||||
});
|
||||
|
||||
SeedStatements();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ChunksEndpoint_Filters_ByProvider_AndStreamsNdjson()
|
||||
{
|
||||
using var client = _factory.CreateClient(new WebApplicationFactoryClientOptions { AllowAutoRedirect = false });
|
||||
client.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Bearer", "vex.read");
|
||||
client.DefaultRequestHeaders.Add("X-Stella-Tenant", "tests");
|
||||
|
||||
var response = await client.GetAsync("/v1/vex/evidence/chunks?vulnerabilityId=CVE-2025-0001&productKey=pkg:docker/demo&providerId=provider-b&limit=1");
|
||||
response.EnsureSuccessStatusCode();
|
||||
|
||||
Assert.True(response.Headers.TryGetValues("Excititor-Results-Truncated", out var truncatedValues));
|
||||
Assert.Contains("true", truncatedValues, StringComparer.OrdinalIgnoreCase);
|
||||
|
||||
var body = await response.Content.ReadAsStringAsync();
|
||||
var lines = body.Split(n, StringSplitOptions.RemoveEmptyEntries);
|
||||
Assert.Single(lines);
|
||||
|
||||
var chunk = JsonSerializer.Deserialize<VexEvidenceChunkResponse>(lines[0], new JsonSerializerOptions(JsonSerializerDefaults.Web));
|
||||
Assert.NotNull(chunk);
|
||||
Assert.Equal("provider-b", chunk!.ProviderId);
|
||||
Assert.Equal("NotAffected", chunk.Status);
|
||||
Assert.Equal("pkg:docker/demo", chunk.Scope.Key);
|
||||
Assert.Equal("CVE-2025-0001", chunk.VulnerabilityId);
|
||||
}
|
||||
|
||||
private void SeedStatements()
|
||||
{
|
||||
var client = new MongoClient(_runner.ConnectionString);
|
||||
var database = client.GetDatabase("vex_chunks_tests");
|
||||
var collection = database.GetCollection<VexStatementRecord>(VexMongoCollectionNames.Statements);
|
||||
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
var claims = new[]
|
||||
{
|
||||
CreateClaim("provider-a", VexClaimStatus.Affected, now.AddHours(-6), now.AddHours(-5), 0.9),
|
||||
CreateClaim("provider-b", VexClaimStatus.NotAffected, now.AddHours(-4), now.AddHours(-3), 0.2),
|
||||
CreateClaim("provider-c", VexClaimStatus.Affected, now.AddHours(-2), now.AddHours(-1), 0.5)
|
||||
};
|
||||
|
||||
var records = claims
|
||||
.Select(claim => VexStatementRecord.FromDomain(claim, now))
|
||||
.ToList();
|
||||
|
||||
collection.InsertMany(records);
|
||||
}
|
||||
|
||||
private static VexClaim CreateClaim(string providerId, VexClaimStatus status, DateTimeOffset firstSeen, DateTimeOffset lastSeen, double? score)
|
||||
{
|
||||
var product = new VexProduct("pkg:docker/demo", "demo", "1.0.0", "pkg:docker/demo:1.0.0", null, new[] { "component-a" });
|
||||
var document = new VexClaimDocument(
|
||||
VexDocumentFormat.SbomCycloneDx,
|
||||
digest: Guid.NewGuid().ToString("N"),
|
||||
sourceUri: new Uri("https://example.test/vex.json"),
|
||||
revision: "r1",
|
||||
signature: new VexSignatureMetadata("cosign", "demo", "issuer", keyId: "kid", verifiedAt: firstSeen, transparencyLogReference: null));
|
||||
|
||||
var signals = score.HasValue
|
||||
? new VexSignalSnapshot(new VexSeveritySignal("cvss", score, "low", vector: null), Kev: null, Epss: null)
|
||||
: null;
|
||||
|
||||
return new VexClaim(
|
||||
"CVE-2025-0001",
|
||||
providerId,
|
||||
product,
|
||||
status,
|
||||
document,
|
||||
firstSeen,
|
||||
lastSeen,
|
||||
justification: VexJustification.ComponentNotPresent,
|
||||
detail: "demo detail",
|
||||
confidence: null,
|
||||
signals: signals,
|
||||
additionalMetadata: null);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
_factory.Dispose();
|
||||
_runner.Dispose();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
// Temporary shim for compilers that do not surface System.Runtime.CompilerServices.IsExternalInit
|
||||
// (needed for record types). Remove when toolchain natively provides the type.
|
||||
namespace System.Runtime.CompilerServices;
|
||||
|
||||
internal static class IsExternalInit
|
||||
{
|
||||
}
|
||||
@@ -0,0 +1,2 @@
|
||||
{"tenant": "tenant-a", "chain_id": "c8d6f7f1-58f8-4c2d-8d92-f9b8790a0001", "sequence_no": 1, "event_id": "c0e6d9b4-1d89-4b07-b622-1c7b6d111001", "event_type": "finding.assignment", "policy_version": "2025.01", "finding_id": "F-001", "artifact_id": "artifact-1", "actor_id": "system", "actor_type": "system", "occurred_at": "2025-01-01T00:00:00Z", "recorded_at": "2025-01-01T00:00:01Z", "payload": {"comment": "seed event"}, "previous_hash": "0000000000000000000000000000000000000000000000000000000000000000", "event_hash": "0d95f63532b6488407e8fd2e837edb3e9bfc8a2defde232aca99dbfd518558c6", "merkle_leaf_hash": "d08d4da76da50fbe4274a394c73fcaae0180fd591238d224bc7d5efee2ad3696"}
|
||||
{"tenant": "tenant-a", "chain_id": "c8d6f7f1-58f8-4c2d-8d92-f9b8790a0001", "sequence_no": 2, "event_id": "c0e6d9b4-1d89-4b07-b622-1c7b6d111002", "event_type": "finding.comment", "policy_version": "2025.01", "finding_id": "F-001", "artifact_id": "artifact-1", "actor_id": "analyst", "actor_type": "operator", "occurred_at": "2025-01-01T00:00:10Z", "recorded_at": "2025-01-01T00:00:11Z", "payload": {"comment": "follow-up"}, "previous_hash": "PLACEHOLDER", "event_hash": "0e77979af948be38de028a2497f15529473ae5aeb0a95f5d9d648efc8afb9fa3", "merkle_leaf_hash": "2854050efba048f2674ba27fd7dc2f1b65e90e150098bfeeb4fc6e23334c3790"}
|
||||
@@ -0,0 +1,15 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
<PropertyGroup>
|
||||
<OutputType>Exe</OutputType>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<LangVersion>preview</LangVersion>
|
||||
<Nullable>enable</Nullable>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
</PropertyGroup>
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\..\StellaOps.Findings.Ledger.csproj" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<PackageReference Include="System.CommandLine" Version="2.0.0-beta4.22272.1" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
@@ -0,0 +1,502 @@
|
||||
using System.CommandLine;
|
||||
using System.Diagnostics;
|
||||
using System.Diagnostics.Metrics;
|
||||
using System.Text.Json;
|
||||
using System.Text.Json.Nodes;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using Microsoft.Extensions.Hosting;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
using StellaOps.Findings.Ledger.Hashing;
|
||||
using StellaOps.Findings.Ledger.Infrastructure;
|
||||
using StellaOps.Findings.Ledger.Infrastructure.Merkle;
|
||||
using StellaOps.Findings.Ledger.Infrastructure.Postgres;
|
||||
using StellaOps.Findings.Ledger.Infrastructure.Projection;
|
||||
using StellaOps.Findings.Ledger.Options;
|
||||
using StellaOps.Findings.Ledger.Observability;
|
||||
using StellaOps.Findings.Ledger.Services;
|
||||
|
||||
// Command-line options
|
||||
var fixturesOption = new Option<FileInfo[]>(
|
||||
name: "--fixture",
|
||||
description: "NDJSON fixtures containing canonical ledger envelopes (sequence-ordered)")
|
||||
{
|
||||
IsRequired = true
|
||||
};
|
||||
fixturesOption.AllowMultipleArgumentsPerToken = true;
|
||||
|
||||
var connectionOption = new Option<string>(
|
||||
name: "--connection",
|
||||
description: "PostgreSQL connection string for ledger DB")
|
||||
{
|
||||
IsRequired = true
|
||||
};
|
||||
|
||||
var tenantOption = new Option<string>(
|
||||
name: "--tenant",
|
||||
getDefaultValue: () => "tenant-a",
|
||||
description: "Tenant identifier for appended events");
|
||||
|
||||
var maxParallelOption = new Option<int>(
|
||||
name: "--maxParallel",
|
||||
getDefaultValue: () => 4,
|
||||
description: "Maximum concurrent append operations");
|
||||
|
||||
var reportOption = new Option<FileInfo?>(
|
||||
name: "--report",
|
||||
description: "Path to write harness report JSON (with DSSE placeholder)");
|
||||
|
||||
var metricsOption = new Option<FileInfo?>(
|
||||
name: "--metrics",
|
||||
description: "Optional path to write metrics snapshot JSON");
|
||||
|
||||
var root = new RootCommand("Findings Ledger Replay Harness (LEDGER-29-008)");
|
||||
root.AddOption(fixturesOption);
|
||||
root.AddOption(connectionOption);
|
||||
root.AddOption(tenantOption);
|
||||
root.AddOption(maxParallelOption);
|
||||
root.AddOption(reportOption);
|
||||
root.AddOption(metricsOption);
|
||||
|
||||
root.SetHandler(async (FileInfo[] fixtures, string connection, string tenant, int maxParallel, FileInfo? reportFile, FileInfo? metricsFile) =>
|
||||
{
|
||||
await using var host = BuildHost(connection);
|
||||
using var scope = host.Services.CreateScope();
|
||||
|
||||
var writeService = scope.ServiceProvider.GetRequiredService<ILedgerEventWriteService>();
|
||||
var projectionWorker = scope.ServiceProvider.GetRequiredService<LedgerProjectionWorker>();
|
||||
var anchorWorker = scope.ServiceProvider.GetRequiredService<LedgerMerkleAnchorWorker>();
|
||||
var logger = scope.ServiceProvider.GetRequiredService<ILoggerFactory>().CreateLogger("Harness");
|
||||
var timeProvider = scope.ServiceProvider.GetRequiredService<TimeProvider>();
|
||||
|
||||
var cts = new CancellationTokenSource();
|
||||
var projectionTask = projectionWorker.StartAsync(cts.Token);
|
||||
var anchorTask = anchorWorker.StartAsync(cts.Token);
|
||||
|
||||
var (meterListener, metrics) = CreateMeterListener();
|
||||
|
||||
var sw = Stopwatch.StartNew();
|
||||
long eventsWritten = 0;
|
||||
|
||||
await Parallel.ForEachAsync(fixtures, new ParallelOptions { MaxDegreeOfParallelism = maxParallel, CancellationToken = cts.Token }, async (file, token) =>
|
||||
{
|
||||
await foreach (var draft in ReadDraftsAsync(file, tenant, timeProvider, token))
|
||||
{
|
||||
var result = await writeService.AppendAsync(draft, token).ConfigureAwait(false);
|
||||
if (result.Status is LedgerWriteStatus.ValidationFailed or LedgerWriteStatus.Conflict)
|
||||
{
|
||||
throw new InvalidOperationException($"Append failed for {draft.EventId}: {string.Join(",", result.Errors)} ({result.ConflictCode})");
|
||||
}
|
||||
|
||||
Interlocked.Increment(ref eventsWritten);
|
||||
if (eventsWritten % 50_000 == 0)
|
||||
{
|
||||
logger.LogInformation("Appended {Count} events...", eventsWritten);
|
||||
}
|
||||
}
|
||||
}).ConfigureAwait(false);
|
||||
|
||||
// Wait for projector to catch up
|
||||
await Task.Delay(TimeSpan.FromSeconds(2), cts.Token);
|
||||
sw.Stop();
|
||||
|
||||
meterListener.RecordObservableInstruments();
|
||||
|
||||
var verification = await VerifyLedgerAsync(scope.ServiceProvider, tenant, eventsWritten, cts.Token).ConfigureAwait(false);
|
||||
|
||||
var writeLatencyP95Ms = Percentile(metrics.HistDouble("ledger_write_latency_seconds"), 95) * 1000;
|
||||
var rebuildP95Ms = Percentile(metrics.HistDouble("ledger_projection_rebuild_seconds"), 95) * 1000;
|
||||
var projectionLagSeconds = metrics.GaugeDouble("ledger_projection_lag_seconds").DefaultIfEmpty(0).Max();
|
||||
var backlogEvents = metrics.GaugeLong("ledger_ingest_backlog_events").DefaultIfEmpty(0).Max();
|
||||
var dbConnections = metrics.GaugeLong("ledger_db_connections_active").DefaultIfEmpty(0).Sum();
|
||||
|
||||
var report = new HarnessReport(
|
||||
tenant,
|
||||
fixtures.Select(f => f.FullName).ToArray(),
|
||||
eventsWritten,
|
||||
sw.Elapsed.TotalSeconds,
|
||||
status: verification.Success ? "pass" : "fail",
|
||||
WriteLatencyP95Ms: writeLatencyP95Ms,
|
||||
ProjectionRebuildP95Ms: rebuildP95Ms,
|
||||
ProjectionLagSecondsMax: projectionLagSeconds,
|
||||
BacklogEventsMax: backlogEvents,
|
||||
DbConnectionsObserved: dbConnections,
|
||||
VerificationErrors: verification.Errors.ToArray());
|
||||
|
||||
var jsonOptions = new JsonSerializerOptions { WriteIndented = true };
|
||||
var json = JsonSerializer.Serialize(report, jsonOptions);
|
||||
Console.WriteLine(json);
|
||||
|
||||
if (reportFile is not null)
|
||||
{
|
||||
await File.WriteAllTextAsync(reportFile.FullName, json, cts.Token).ConfigureAwait(false);
|
||||
await WriteDssePlaceholderAsync(reportFile.FullName, json, cts.Token).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
if (metricsFile is not null)
|
||||
{
|
||||
var snapshot = metrics.ToSnapshot();
|
||||
var metricsJson = JsonSerializer.Serialize(snapshot, jsonOptions);
|
||||
await File.WriteAllTextAsync(metricsFile.FullName, metricsJson, cts.Token).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
cts.Cancel();
|
||||
await Task.WhenAll(projectionTask, anchorTask).WaitAsync(TimeSpan.FromSeconds(5));
|
||||
}, fixturesOption, connectionOption, tenantOption, maxParallelOption, reportOption, metricsOption);
|
||||
|
||||
await root.InvokeAsync(args);
|
||||
|
||||
static async Task WriteDssePlaceholderAsync(string reportPath, string json, CancellationToken cancellationToken)
|
||||
{
|
||||
using var sha = System.Security.Cryptography.SHA256.Create();
|
||||
var digest = sha.ComputeHash(System.Text.Encoding.UTF8.GetBytes(json));
|
||||
var sig = new
|
||||
{
|
||||
payloadType = "application/vnd.stella-ledger-harness+json",
|
||||
sha256 = Convert.ToHexString(digest).ToLowerInvariant(),
|
||||
signedBy = "harness-local",
|
||||
createdAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
var sigJson = JsonSerializer.Serialize(sig, new JsonSerializerOptions { WriteIndented = true });
|
||||
await File.WriteAllTextAsync(reportPath + ".sig", sigJson, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
static (MeterListener Listener, MetricsBag Bag) CreateMeterListener()
|
||||
{
|
||||
var bag = new MetricsBag();
|
||||
var listener = new MeterListener
|
||||
{
|
||||
InstrumentPublished = (instrument, meterListener) =>
|
||||
{
|
||||
if (instrument.Meter.Name == "StellaOps.Findings.Ledger")
|
||||
{
|
||||
meterListener.EnableMeasurementEvents(instrument);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
listener.SetMeasurementEventCallback<double>((instrument, measurement, tags, _) =>
|
||||
{
|
||||
bag.Add(instrument, measurement, tags);
|
||||
});
|
||||
listener.SetMeasurementEventCallback<long>((instrument, measurement, tags, _) =>
|
||||
{
|
||||
bag.Add(instrument, measurement, tags);
|
||||
});
|
||||
|
||||
listener.Start();
|
||||
return (listener, bag);
|
||||
}
|
||||
|
||||
static IHost BuildHost(string connectionString)
|
||||
{
|
||||
return Host.CreateDefaultBuilder()
|
||||
.ConfigureLogging(logging =>
|
||||
{
|
||||
logging.ClearProviders();
|
||||
logging.AddSimpleConsole(options =>
|
||||
{
|
||||
options.SingleLine = true;
|
||||
options.TimestampFormat = "HH:mm:ss ";
|
||||
});
|
||||
})
|
||||
.ConfigureServices(services =>
|
||||
{
|
||||
services.Configure<LedgerServiceOptions>(opts =>
|
||||
{
|
||||
opts.Database.ConnectionString = connectionString;
|
||||
});
|
||||
|
||||
services.AddSingleton<TimeProvider>(_ => TimeProvider.System);
|
||||
services.AddSingleton<LedgerDataSource>();
|
||||
services.AddSingleton<ILedgerEventRepository, PostgresLedgerEventRepository>();
|
||||
services.AddSingleton<IFindingProjectionRepository, NoOpProjectionRepository>();
|
||||
services.AddSingleton<ILedgerEventStream, PostgresLedgerEventStream>();
|
||||
services.AddSingleton<IPolicyEvaluationService, NoOpPolicyEvaluationService>();
|
||||
services.AddSingleton<IMerkleAnchorRepository, NoOpMerkleAnchorRepository>();
|
||||
services.AddSingleton<LedgerAnchorQueue>();
|
||||
services.AddSingleton<IMerkleAnchorScheduler, QueueMerkleAnchorScheduler>();
|
||||
services.AddSingleton<LedgerMerkleAnchorWorker>();
|
||||
services.AddSingleton<LedgerProjectionWorker>();
|
||||
services.AddSingleton<ILedgerEventWriteService, LedgerEventWriteService>();
|
||||
})
|
||||
.Build();
|
||||
}
|
||||
|
||||
static async IAsyncEnumerable<LedgerEventDraft> ReadDraftsAsync(FileInfo file, string tenant, TimeProvider timeProvider, [EnumeratorCancellation] CancellationToken cancellationToken)
|
||||
{
|
||||
await using var stream = file.OpenRead();
|
||||
using var reader = new StreamReader(stream);
|
||||
var recordedAtBase = timeProvider.GetUtcNow();
|
||||
|
||||
while (!reader.EndOfStream)
|
||||
{
|
||||
var line = await reader.ReadLineAsync().ConfigureAwait(false);
|
||||
if (string.IsNullOrWhiteSpace(line))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
var node = JsonNode.Parse(line)?.AsObject();
|
||||
if (node is null)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
yield return ToDraft(node, tenant, recordedAtBase);
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
}
|
||||
}
|
||||
|
||||
static LedgerEventDraft ToDraft(JsonObject node, string defaultTenant, DateTimeOffset recordedAtBase)
|
||||
{
|
||||
string required(string name) => node[name]?.GetValue<string>() ?? throw new InvalidOperationException($"{name} missing");
|
||||
|
||||
var tenantId = node.TryGetPropertyValue("tenant", out var tenantNode)
|
||||
? tenantNode!.GetValue<string>()
|
||||
: defaultTenant;
|
||||
|
||||
var chainId = Guid.Parse(required("chain_id"));
|
||||
var sequence = node["sequence_no"]?.GetValue<long>() ?? node["sequence"]?.GetValue<long>() ?? throw new InvalidOperationException("sequence_no missing");
|
||||
var eventId = Guid.Parse(required("event_id"));
|
||||
var eventType = required("event_type");
|
||||
var policyVersion = required("policy_version");
|
||||
var findingId = required("finding_id");
|
||||
var artifactId = required("artifact_id");
|
||||
var sourceRunId = node.TryGetPropertyValue("source_run_id", out var sourceRunNode) && sourceRunNode is not null && !string.IsNullOrWhiteSpace(sourceRunNode.GetValue<string>())
|
||||
? Guid.Parse(sourceRunNode!.GetValue<string>())
|
||||
: null;
|
||||
var actorId = required("actor_id");
|
||||
var actorType = required("actor_type");
|
||||
var occurredAt = DateTimeOffset.Parse(required("occurred_at"));
|
||||
var recordedAt = node.TryGetPropertyValue("recorded_at", out var recordedAtNode) && recordedAtNode is not null
|
||||
? DateTimeOffset.Parse(recordedAtNode.GetValue<string>())
|
||||
: recordedAtBase;
|
||||
|
||||
var payload = node.TryGetPropertyValue("payload", out var payloadNode) && payloadNode is JsonObject payloadObj
|
||||
? payloadObj
|
||||
: throw new InvalidOperationException("payload missing");
|
||||
|
||||
var canonicalEnvelope = LedgerCanonicalJsonSerializer.Canonicalize(payload);
|
||||
var prev = node.TryGetPropertyValue("previous_hash", out var prevNode) ? prevNode?.GetValue<string>() : null;
|
||||
|
||||
return new LedgerEventDraft(
|
||||
tenantId,
|
||||
chainId,
|
||||
sequence,
|
||||
eventId,
|
||||
eventType,
|
||||
policyVersion,
|
||||
findingId,
|
||||
artifactId,
|
||||
sourceRunId,
|
||||
actorId,
|
||||
actorType,
|
||||
occurredAt,
|
||||
recordedAt,
|
||||
payload,
|
||||
canonicalEnvelope,
|
||||
prev);
|
||||
}
|
||||
|
||||
static async Task<VerificationResult> VerifyLedgerAsync(IServiceProvider services, string tenant, long expectedEvents, CancellationToken cancellationToken)
|
||||
{
|
||||
var errors = new List<string>();
|
||||
var dataSource = services.GetRequiredService<LedgerDataSource>();
|
||||
|
||||
await using var connection = await dataSource.OpenConnectionAsync(tenant, "verify", cancellationToken).ConfigureAwait(false);
|
||||
|
||||
// Count check
|
||||
await using (var countCommand = new Npgsql.NpgsqlCommand("select count(*) from ledger_events where tenant_id = @tenant", connection))
|
||||
{
|
||||
countCommand.Parameters.AddWithValue("tenant", tenant);
|
||||
var count = (long)await countCommand.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
|
||||
if (count < expectedEvents)
|
||||
{
|
||||
errors.Add($"event_count_mismatch:{count}/{expectedEvents}");
|
||||
}
|
||||
}
|
||||
|
||||
// Sequence and hash verification
|
||||
const string query = """
|
||||
select chain_id, sequence_no, event_id, event_body, event_hash, previous_hash, merkle_leaf_hash
|
||||
from ledger_events
|
||||
where tenant_id = @tenant
|
||||
order by chain_id, sequence_no
|
||||
""";
|
||||
|
||||
await using var command = new Npgsql.NpgsqlCommand(query, connection);
|
||||
command.Parameters.AddWithValue("tenant", tenant);
|
||||
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
Guid? currentChain = null;
|
||||
long expectedSequence = 1;
|
||||
string? prevHash = null;
|
||||
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
var chainId = reader.GetGuid(0);
|
||||
var sequence = reader.GetInt64(1);
|
||||
var eventId = reader.GetGuid(2);
|
||||
var eventBodyJson = reader.GetString(3);
|
||||
var eventHash = reader.GetString(4);
|
||||
var previousHash = reader.GetString(5);
|
||||
var merkleLeafHash = reader.GetString(6);
|
||||
|
||||
if (currentChain != chainId)
|
||||
{
|
||||
currentChain = chainId;
|
||||
expectedSequence = 1;
|
||||
prevHash = LedgerEventConstants.EmptyHash;
|
||||
}
|
||||
|
||||
if (sequence != expectedSequence)
|
||||
{
|
||||
errors.Add($"sequence_gap:{chainId}:{sequence}");
|
||||
}
|
||||
|
||||
if (!string.Equals(previousHash, prevHash, StringComparison.Ordinal))
|
||||
{
|
||||
errors.Add($"previous_hash_mismatch:{chainId}:{sequence}");
|
||||
}
|
||||
|
||||
var node = JsonNode.Parse(eventBodyJson)?.AsObject() ?? new JsonObject();
|
||||
var canonical = LedgerCanonicalJsonSerializer.Canonicalize(node);
|
||||
var hashResult = LedgerHashing.ComputeHashes(canonical, sequence);
|
||||
|
||||
if (!string.Equals(hashResult.EventHash, eventHash, StringComparison.Ordinal))
|
||||
{
|
||||
errors.Add($"event_hash_mismatch:{eventId}");
|
||||
}
|
||||
|
||||
if (!string.Equals(hashResult.MerkleLeafHash, merkleLeafHash, StringComparison.Ordinal))
|
||||
{
|
||||
errors.Add($"merkle_leaf_mismatch:{eventId}");
|
||||
}
|
||||
|
||||
prevHash = eventHash;
|
||||
expectedSequence++;
|
||||
}
|
||||
|
||||
if (errors.Count == 0)
|
||||
{
|
||||
// Additional check: projector caught up (no lag > 0)
|
||||
var lagMax = LedgerMetricsSnapshot.LagMax;
|
||||
if (lagMax > 0)
|
||||
{
|
||||
errors.Add($"projection_lag_remaining:{lagMax}");
|
||||
}
|
||||
}
|
||||
|
||||
return new VerificationResult(errors.Count == 0, errors);
|
||||
}
|
||||
|
||||
static double Percentile(IEnumerable<double> values, double percentile)
|
||||
{
|
||||
var data = values.Where(v => !double.IsNaN(v)).OrderBy(v => v).ToArray();
|
||||
if (data.Length == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
var rank = (percentile / 100.0) * (data.Length - 1);
|
||||
var lowerIndex = (int)Math.Floor(rank);
|
||||
var upperIndex = (int)Math.Ceiling(rank);
|
||||
if (lowerIndex == upperIndex)
|
||||
{
|
||||
return data[lowerIndex];
|
||||
}
|
||||
|
||||
var fraction = rank - lowerIndex;
|
||||
return data[lowerIndex] + (data[upperIndex] - data[lowerIndex]) * fraction;
|
||||
}
|
||||
|
||||
internal sealed record HarnessReport(
|
||||
string Tenant,
|
||||
IReadOnlyList<string> Fixtures,
|
||||
long EventsWritten,
|
||||
double DurationSeconds,
|
||||
string Status,
|
||||
double WriteLatencyP95Ms,
|
||||
double ProjectionRebuildP95Ms,
|
||||
double ProjectionLagSecondsMax,
|
||||
double BacklogEventsMax,
|
||||
long DbConnectionsObserved,
|
||||
IReadOnlyList<string> VerificationErrors);
|
||||
|
||||
internal sealed record VerificationResult(bool Success, IReadOnlyList<string> Errors);
|
||||
|
||||
internal sealed class MetricsBag
|
||||
{
|
||||
private readonly List<(string Name, double Value)> doubles = new();
|
||||
private readonly List<(string Name, long Value)> longs = new();
|
||||
|
||||
public void Add(Instrument instrument, double value, ReadOnlySpan<KeyValuePair<string, object?>> _)
|
||||
=> doubles.Add((instrument.Name, value));
|
||||
|
||||
public void Add(Instrument instrument, long value, ReadOnlySpan<KeyValuePair<string, object?>> _)
|
||||
=> longs.Add((instrument.Name, value));
|
||||
|
||||
public IEnumerable<double> HistDouble(string name) => doubles.Where(d => d.Name == name).Select(d => d.Value);
|
||||
public IEnumerable<double> GaugeDouble(string name) => doubles.Where(d => d.Name == name).Select(d => d.Value);
|
||||
public IEnumerable<long> GaugeLong(string name) => longs.Where(l => l.Name == name).Select(l => l.Value);
|
||||
|
||||
public object ToSnapshot() => new
|
||||
{
|
||||
doubles = doubles.GroupBy(x => x.Name).ToDictionary(g => g.Key, g => g.Select(v => v.Value).ToArray()),
|
||||
longs = longs.GroupBy(x => x.Name).ToDictionary(g => g.Key, g => g.Select(v => v.Value).ToArray())
|
||||
};
|
||||
}
|
||||
|
||||
// Harness lightweight no-op implementations for projection/merkle to keep replay fast
|
||||
internal sealed class NoOpPolicyEvaluationService : IPolicyEvaluationService
|
||||
{
|
||||
public Task<PolicyEvaluationResult> EvaluateAsync(LedgerEventRecord record, FindingProjection? current, CancellationToken cancellationToken)
|
||||
{
|
||||
return Task.FromResult(new PolicyEvaluationResult("noop", record.OccurredAt, record.RecordedAt, current?.Status ?? "new"));
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class NoOpProjectionRepository : IFindingProjectionRepository
|
||||
{
|
||||
public Task<FindingProjection?> GetAsync(string tenantId, string findingId, string policyVersion, CancellationToken cancellationToken) =>
|
||||
Task.FromResult<FindingProjection?>(null);
|
||||
|
||||
public Task InsertActionAsync(FindingAction action, CancellationToken cancellationToken) => Task.CompletedTask;
|
||||
|
||||
public Task InsertHistoryAsync(FindingHistory history, CancellationToken cancellationToken) => Task.CompletedTask;
|
||||
|
||||
public Task SaveCheckpointAsync(ProjectionCheckpoint checkpoint, CancellationToken cancellationToken) => Task.CompletedTask;
|
||||
|
||||
public Task<ProjectionCheckpoint> GetCheckpointAsync(CancellationToken cancellationToken) =>
|
||||
Task.FromResult(new ProjectionCheckpoint(DateTimeOffset.MinValue, Guid.Empty, DateTimeOffset.MinValue));
|
||||
|
||||
public Task UpsertAsync(FindingProjection projection, CancellationToken cancellationToken) => Task.CompletedTask;
|
||||
|
||||
public Task EnsureIndexesAsync(CancellationToken cancellationToken) => Task.CompletedTask;
|
||||
}
|
||||
|
||||
internal sealed class NoOpMerkleAnchorRepository : IMerkleAnchorRepository
|
||||
{
|
||||
public Task InsertAsync(string tenantId, Guid anchorId, DateTimeOffset windowStart, DateTimeOffset windowEnd, long sequenceStart, long sequenceEnd, string rootHash, long leafCount, DateTime anchoredAt, string? anchorReference, CancellationToken cancellationToken)
|
||||
=> Task.CompletedTask;
|
||||
|
||||
public Task<MerkleAnchor?> GetLatestAsync(string tenantId, CancellationToken cancellationToken) =>
|
||||
Task.FromResult<MerkleAnchor?>(null);
|
||||
}
|
||||
|
||||
internal sealed class QueueMerkleAnchorScheduler : IMerkleAnchorScheduler
|
||||
{
|
||||
private readonly LedgerAnchorQueue _queue;
|
||||
|
||||
public QueueMerkleAnchorScheduler(LedgerAnchorQueue queue)
|
||||
{
|
||||
_queue = queue ?? throw new ArgumentNullException(nameof(queue));
|
||||
}
|
||||
|
||||
public Task EnqueueAsync(LedgerEventRecord record, CancellationToken cancellationToken)
|
||||
=> _queue.EnqueueAsync(record, cancellationToken).AsTask();
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
import json
|
||||
import sys
|
||||
from hashlib import sha256
|
||||
|
||||
EMPTY_PREV = "0" * 64
|
||||
|
||||
|
||||
def canonical(obj):
|
||||
return json.dumps(obj, separators=(",", ":"), sort_keys=True)
|
||||
|
||||
|
||||
def hash_event(payload, sequence_no):
|
||||
canonical_json = canonical(payload).encode()
|
||||
event_hash = sha256(canonical_json + str(sequence_no).encode()).hexdigest()
|
||||
merkle_leaf = sha256(event_hash.encode()).hexdigest()
|
||||
return event_hash, merkle_leaf
|
||||
|
||||
|
||||
def main(path):
|
||||
out_lines = []
|
||||
last_hash = {}
|
||||
with open(path, "r") as f:
|
||||
events = [json.loads(line) for line in f if line.strip()]
|
||||
events.sort(key=lambda e: (e["chain_id"], e["sequence_no"]))
|
||||
for e in events:
|
||||
prev = e.get("previous_hash") or last_hash.get(e["chain_id"], EMPTY_PREV)
|
||||
payload = e.get("payload") or e
|
||||
event_hash, leaf = hash_event(payload, e["sequence_no"])
|
||||
e["event_hash"] = event_hash
|
||||
e["merkle_leaf_hash"] = leaf
|
||||
e["previous_hash"] = prev
|
||||
last_hash[e["chain_id"]] = event_hash
|
||||
out_lines.append(json.dumps(e))
|
||||
with open(path, "w") as f:
|
||||
for line in out_lines:
|
||||
f.write(line + "\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
print("usage: compute_hashes.py <ndjson>")
|
||||
sys.exit(1)
|
||||
main(sys.argv[1])
|
||||
@@ -0,0 +1,37 @@
|
||||
using System.Text.Json;
|
||||
using LedgerReplayHarness;
|
||||
using FluentAssertions;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Tests;
|
||||
|
||||
public class HarnessRunnerTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task HarnessRunner_WritesReportAndValidatesHashes()
|
||||
{
|
||||
var fixturePath = Path.Combine(AppContext.BaseDirectory, "fixtures", "sample.ndjson");
|
||||
var tempReport = Path.GetTempFileName();
|
||||
|
||||
try
|
||||
{
|
||||
var exitCode = await HarnessRunner.RunAsync(new[] { fixturePath }, "tenant-test", tempReport);
|
||||
exitCode.Should().Be(0);
|
||||
|
||||
var json = await File.ReadAllTextAsync(tempReport);
|
||||
using var doc = JsonDocument.Parse(json);
|
||||
doc.RootElement.GetProperty("eventsWritten").GetInt64().Should().BeGreaterThan(0);
|
||||
doc.RootElement.GetProperty("status").GetString().Should().Be("pass");
|
||||
doc.RootElement.GetProperty("tenant").GetString().Should().Be("tenant-test");
|
||||
doc.RootElement.GetProperty("hashSummary").GetProperty("uniqueEventHashes").GetInt32().Should().Be(1);
|
||||
doc.RootElement.GetProperty("hashSummary").GetProperty("uniqueMerkleLeaves").GetInt32().Should().Be(1);
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (File.Exists(tempReport))
|
||||
{
|
||||
File.Delete(tempReport);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,223 @@
|
||||
using System.Diagnostics.Metrics;
|
||||
using System.Linq;
|
||||
using FluentAssertions;
|
||||
using StellaOps.Findings.Ledger.Observability;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Tests;
|
||||
|
||||
public class LedgerMetricsTests
|
||||
{
|
||||
[Fact]
|
||||
public void ProjectionLagGauge_RecordsLatestPerTenant()
|
||||
{
|
||||
using var listener = CreateListener();
|
||||
var measurements = new List<Measurement<double>>();
|
||||
|
||||
listener.SetMeasurementEventCallback<double>((instrument, measurement, tags, state) =>
|
||||
{
|
||||
if (instrument.Name == "ledger_projection_lag_seconds")
|
||||
{
|
||||
measurements.Add(measurement);
|
||||
}
|
||||
});
|
||||
|
||||
LedgerMetrics.RecordProjectionLag(TimeSpan.FromSeconds(42), "tenant-a");
|
||||
|
||||
listener.RecordObservableInstruments();
|
||||
|
||||
var measurement = measurements.Should().ContainSingle().Subject;
|
||||
measurement.Value.Should().BeApproximately(42, precision: 0.001);
|
||||
measurement.Tags.ToDictionary(kvp => kvp.Key, kvp => kvp.Value)
|
||||
.Should().Contain(new KeyValuePair<string, object?>("tenant", "tenant-a"));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void MerkleAnchorDuration_EmitsHistogramMeasurement()
|
||||
{
|
||||
using var listener = CreateListener();
|
||||
var measurements = new List<Measurement<double>>();
|
||||
|
||||
listener.SetMeasurementEventCallback<double>((instrument, measurement, tags, state) =>
|
||||
{
|
||||
if (instrument.Name == "ledger_merkle_anchor_duration_seconds")
|
||||
{
|
||||
measurements.Add(measurement);
|
||||
}
|
||||
});
|
||||
|
||||
LedgerMetrics.RecordMerkleAnchorDuration(TimeSpan.FromSeconds(1.5), "tenant-b");
|
||||
|
||||
var measurement = measurements.Should().ContainSingle().Subject;
|
||||
measurement.Value.Should().BeApproximately(1.5, precision: 0.001);
|
||||
measurement.Tags.ToDictionary(kvp => kvp.Key, kvp => kvp.Value)
|
||||
.Should().Contain(new KeyValuePair<string, object?>("tenant", "tenant-b"));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void MerkleAnchorFailure_IncrementsCounter()
|
||||
{
|
||||
using var listener = CreateListener();
|
||||
var measurements = new List<Measurement<long>>();
|
||||
|
||||
listener.SetMeasurementEventCallback<long>((instrument, measurement, tags, state) =>
|
||||
{
|
||||
if (instrument.Name == "ledger_merkle_anchor_failures_total")
|
||||
{
|
||||
measurements.Add(measurement);
|
||||
}
|
||||
});
|
||||
|
||||
LedgerMetrics.RecordMerkleAnchorFailure("tenant-c", "persist_failure");
|
||||
|
||||
var measurement = measurements.Should().ContainSingle().Subject;
|
||||
measurement.Value.Should().Be(1);
|
||||
var tags = measurement.Tags.ToDictionary(kvp => kvp.Key, kvp => kvp.Value);
|
||||
tags.Should().Contain(new KeyValuePair<string, object?>("tenant", "tenant-c"));
|
||||
tags.Should().Contain(new KeyValuePair<string, object?>("reason", "persist_failure"));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void AttachmentFailure_IncrementsCounter()
|
||||
{
|
||||
using var listener = CreateListener();
|
||||
var measurements = new List<Measurement<long>>();
|
||||
|
||||
listener.SetMeasurementEventCallback<long>((instrument, measurement, tags, state) =>
|
||||
{
|
||||
if (instrument.Name == "ledger_attachments_encryption_failures_total")
|
||||
{
|
||||
measurements.Add(measurement);
|
||||
}
|
||||
});
|
||||
|
||||
LedgerMetrics.RecordAttachmentFailure("tenant-d", "encrypt");
|
||||
|
||||
var measurement = measurements.Should().ContainSingle().Subject;
|
||||
measurement.Value.Should().Be(1);
|
||||
var tags = measurement.Tags.ToDictionary(kvp => kvp.Key, kvp => kvp.Value);
|
||||
tags.Should().Contain(new KeyValuePair<string, object?>("tenant", "tenant-d"));
|
||||
tags.Should().Contain(new KeyValuePair<string, object?>("stage", "encrypt"));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void BacklogGauge_ReflectsOutstandingQueue()
|
||||
{
|
||||
using var listener = CreateListener();
|
||||
var measurements = new List<Measurement<long>>();
|
||||
|
||||
// Reset
|
||||
LedgerMetrics.DecrementBacklog("tenant-q");
|
||||
|
||||
LedgerMetrics.IncrementBacklog("tenant-q");
|
||||
LedgerMetrics.IncrementBacklog("tenant-q");
|
||||
LedgerMetrics.DecrementBacklog("tenant-q");
|
||||
|
||||
listener.SetMeasurementEventCallback<long>((instrument, measurement, tags, state) =>
|
||||
{
|
||||
if (instrument.Name == "ledger_ingest_backlog_events")
|
||||
{
|
||||
measurements.Add(measurement);
|
||||
}
|
||||
});
|
||||
|
||||
listener.RecordObservableInstruments();
|
||||
|
||||
var measurement = measurements.Should().ContainSingle().Subject;
|
||||
measurement.Value.Should().Be(1);
|
||||
measurement.Tags.ToDictionary(kvp => kvp.Key, kvp => kvp.Value)
|
||||
.Should().Contain(new KeyValuePair<string, object?>("tenant", "tenant-q"));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProjectionRebuildHistogram_RecordsScenarioTags()
|
||||
{
|
||||
using var listener = CreateListener();
|
||||
var measurements = new List<Measurement<double>>();
|
||||
|
||||
listener.SetMeasurementEventCallback<double>((instrument, measurement, tags, state) =>
|
||||
{
|
||||
if (instrument.Name == "ledger_projection_rebuild_seconds")
|
||||
{
|
||||
measurements.Add(measurement);
|
||||
}
|
||||
});
|
||||
|
||||
LedgerMetrics.RecordProjectionRebuild(TimeSpan.FromSeconds(3.2), "tenant-r", "replay");
|
||||
|
||||
var measurement = measurements.Should().ContainSingle().Subject;
|
||||
measurement.Value.Should().BeApproximately(3.2, 0.001);
|
||||
var tags = measurement.Tags.ToDictionary(kvp => kvp.Key, kvp => kvp.Value);
|
||||
tags.Should().Contain(new KeyValuePair<string, object?>("tenant", "tenant-r"));
|
||||
tags.Should().Contain(new KeyValuePair<string, object?>("scenario", "replay"));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void DbConnectionsGauge_TracksRoleCounts()
|
||||
{
|
||||
using var listener = CreateListener();
|
||||
var measurements = new List<Measurement<long>>();
|
||||
|
||||
// Reset
|
||||
LedgerMetrics.DecrementDbConnection("writer");
|
||||
|
||||
LedgerMetrics.IncrementDbConnection("writer");
|
||||
|
||||
listener.SetMeasurementEventCallback<long>((instrument, measurement, tags, state) =>
|
||||
{
|
||||
if (instrument.Name == "ledger_db_connections_active")
|
||||
{
|
||||
measurements.Add(measurement);
|
||||
}
|
||||
});
|
||||
|
||||
listener.RecordObservableInstruments();
|
||||
|
||||
var measurement = measurements.Should().ContainSingle().Subject;
|
||||
measurement.Value.Should().Be(1);
|
||||
measurement.Tags.ToDictionary(kvp => kvp.Key, kvp => kvp.Value)
|
||||
.Should().Contain(new KeyValuePair<string, object?>("role", "writer"));
|
||||
|
||||
LedgerMetrics.DecrementDbConnection("writer");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void VersionInfoGauge_EmitsConstantOne()
|
||||
{
|
||||
using var listener = CreateListener();
|
||||
var measurements = new List<Measurement<long>>();
|
||||
|
||||
listener.SetMeasurementEventCallback<long>((instrument, measurement, tags, state) =>
|
||||
{
|
||||
if (instrument.Name == "ledger_app_version_info")
|
||||
{
|
||||
measurements.Add(measurement);
|
||||
}
|
||||
});
|
||||
|
||||
listener.RecordObservableInstruments();
|
||||
|
||||
var measurement = measurements.Should().ContainSingle().Subject;
|
||||
measurement.Value.Should().Be(1);
|
||||
var tags = measurement.Tags.ToDictionary(kvp => kvp.Key, kvp => kvp.Value);
|
||||
tags.Should().ContainKey("version");
|
||||
tags.Should().ContainKey("git_sha");
|
||||
}
|
||||
|
||||
private static MeterListener CreateListener()
|
||||
{
|
||||
var listener = new MeterListener
|
||||
{
|
||||
InstrumentPublished = (instrument, l) =>
|
||||
{
|
||||
if (instrument.Meter.Name == "StellaOps.Findings.Ledger")
|
||||
{
|
||||
l.EnableMeasurementEvents(instrument);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
listener.Start();
|
||||
return listener;
|
||||
}
|
||||
}
|
||||
148
src/Findings/tools/LedgerReplayHarness/HarnessRunner.cs
Normal file
148
src/Findings/tools/LedgerReplayHarness/HarnessRunner.cs
Normal file
@@ -0,0 +1,148 @@
|
||||
using System.Text.Json;
|
||||
using System.Text.Json.Nodes;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
using StellaOps.Findings.Ledger.Hashing;
|
||||
|
||||
namespace LedgerReplayHarness;
|
||||
|
||||
public sealed class HarnessRunner
|
||||
{
|
||||
private readonly ILedgerClient _client;
|
||||
private readonly int _maxParallel;
|
||||
|
||||
public HarnessRunner(ILedgerClient client, int maxParallel = 4)
|
||||
{
|
||||
_client = client ?? throw new ArgumentNullException(nameof(client));
|
||||
_maxParallel = maxParallel <= 0 ? 1 : maxParallel;
|
||||
}
|
||||
|
||||
public async Task<int> RunAsync(IEnumerable<string> fixtures, string tenant, string reportPath, CancellationToken cancellationToken)
|
||||
{
|
||||
if (fixtures is null || !fixtures.Any())
|
||||
{
|
||||
throw new ArgumentException("At least one fixture is required.", nameof(fixtures));
|
||||
}
|
||||
|
||||
var stats = new HarnessStats();
|
||||
|
||||
tenant = string.IsNullOrWhiteSpace(tenant) ? "default" : tenant;
|
||||
reportPath = string.IsNullOrWhiteSpace(reportPath) ? "harness-report.json" : reportPath;
|
||||
|
||||
var eventCount = 0L;
|
||||
var hashesValid = true;
|
||||
DateTimeOffset? earliest = null;
|
||||
DateTimeOffset? latest = null;
|
||||
var latencies = new List<double>();
|
||||
var leafHashes = new List<string>();
|
||||
string? expectedMerkleRoot = null;
|
||||
var latencies = new ConcurrentBag<double>();
|
||||
var swTotal = Stopwatch.StartNew();
|
||||
|
||||
var throttler = new TaskThrottler(_maxParallel);
|
||||
|
||||
foreach (var fixture in fixtures)
|
||||
{
|
||||
await foreach (var line in ReadLinesAsync(fixture, cancellationToken))
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(line)) continue;
|
||||
var node = JsonNode.Parse(line)?.AsObject();
|
||||
if (node is null) continue;
|
||||
|
||||
eventCount++;
|
||||
var recordedAt = node["recorded_at"]?.GetValue<DateTimeOffset>() ?? DateTimeOffset.UtcNow;
|
||||
earliest = earliest is null ? recordedAt : DateTimeOffset.Compare(recordedAt, earliest.Value) < 0 ? recordedAt : earliest;
|
||||
latest = latest is null
|
||||
? recordedAt
|
||||
: DateTimeOffset.Compare(recordedAt, latest.Value) > 0 ? recordedAt : latest;
|
||||
|
||||
if (node["canonical_envelope"] is JsonObject envelope && node["sequence_no"] is not null)
|
||||
{
|
||||
var seq = node["sequence_no"]!.GetValue<long>();
|
||||
var computed = LedgerHashing.ComputeHashes(envelope, seq);
|
||||
var expected = node["event_hash"]?.GetValue<string>();
|
||||
if (!string.IsNullOrEmpty(expected) && !string.Equals(expected, computed.EventHash, StringComparison.Ordinal))
|
||||
{
|
||||
hashesValid = false;
|
||||
}
|
||||
|
||||
stats.UpdateHashes(computed.EventHash, computed.MerkleLeafHash);
|
||||
leafHashes.Add(computed.MerkleLeafHash);
|
||||
expectedMerkleRoot ??= node["merkle_root"]?.GetValue<string>();
|
||||
|
||||
// enqueue for concurrent append
|
||||
var record = new LedgerEventRecord(
|
||||
tenant,
|
||||
envelope["chain_id"]?.GetValue<Guid>() ?? Guid.Empty,
|
||||
seq,
|
||||
envelope["event_id"]?.GetValue<Guid>() ?? Guid.Empty,
|
||||
envelope["event_type"]?.GetValue<string>() ?? string.Empty,
|
||||
envelope["policy_version"]?.GetValue<string>() ?? string.Empty,
|
||||
envelope["finding_id"]?.GetValue<string>() ?? string.Empty,
|
||||
envelope["artifact_id"]?.GetValue<string>() ?? string.Empty,
|
||||
envelope["source_run_id"]?.GetValue<Guid?>(),
|
||||
envelope["actor_id"]?.GetValue<string>() ?? "system",
|
||||
envelope["actor_type"]?.GetValue<string>() ?? "system",
|
||||
envelope["occurred_at"]?.GetValue<DateTimeOffset>() ?? recordedAt,
|
||||
recordedAt,
|
||||
envelope,
|
||||
computed.EventHash,
|
||||
envelope["previous_hash"]?.GetValue<string>() ?? string.Empty,
|
||||
computed.MerkleLeafHash,
|
||||
computed.CanonicalJson);
|
||||
|
||||
// fire-and-track latency
|
||||
await throttler.RunAsync(async () =>
|
||||
{
|
||||
var sw = Stopwatch.StartNew();
|
||||
await _client.AppendAsync(record, cancellationToken).ConfigureAwait(false);
|
||||
sw.Stop();
|
||||
latencies.Add(sw.Elapsed.TotalMilliseconds);
|
||||
}, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
await throttler.DrainAsync(cancellationToken).ConfigureAwait(false);
|
||||
swTotal.Stop();
|
||||
|
||||
var latencyArray = latencies.ToArray();
|
||||
Array.Sort(latencyArray);
|
||||
double p95 = latencyArray.Length == 0 ? 0 : latencyArray[(int)Math.Ceiling(latencyArray.Length * 0.95) - 1];
|
||||
|
||||
string? computedRoot = leafHashes.Count == 0 ? null : MerkleCalculator.ComputeRoot(leafHashes);
|
||||
var merkleOk = expectedMerkleRoot is null || string.Equals(expectedMerkleRoot, computedRoot, StringComparison.OrdinalIgnoreCase);
|
||||
|
||||
var report = new
|
||||
{
|
||||
tenant,
|
||||
fixtures = fixtures.ToArray(),
|
||||
eventsWritten = eventCount,
|
||||
durationSeconds = Math.Max(swTotal.Elapsed.TotalSeconds, (latest - earliest)?.TotalSeconds ?? 0),
|
||||
throughputEps = swTotal.Elapsed.TotalSeconds > 0 ? eventCount / swTotal.Elapsed.TotalSeconds : 0,
|
||||
latencyP95Ms = p95,
|
||||
projectionLagMaxSeconds = 0,
|
||||
cpuPercentMax = 0,
|
||||
memoryMbMax = 0,
|
||||
status = hashesValid && merkleOk ? "pass" : "fail",
|
||||
timestamp = DateTimeOffset.UtcNow.ToString("O"),
|
||||
hashSummary = stats.ToReport(),
|
||||
merkleRoot = computedRoot,
|
||||
merkleExpected = expectedMerkleRoot
|
||||
};
|
||||
|
||||
var json = JsonSerializer.Serialize(report, new JsonSerializerOptions { WriteIndented = true });
|
||||
await File.WriteAllTextAsync(reportPath, json);
|
||||
return hashesValid && merkleOk ? 0 : 1;
|
||||
}
|
||||
|
||||
private static async IAsyncEnumerable<string> ReadLinesAsync(string path, [System.Runtime.CompilerServices.EnumeratorCancellation] CancellationToken cancellationToken)
|
||||
{
|
||||
await using var stream = File.OpenRead(path);
|
||||
using var reader = new StreamReader(stream);
|
||||
string? line;
|
||||
while (!reader.EndOfStream && !cancellationToken.IsCancellationRequested && (line = await reader.ReadLineAsync()) is not null)
|
||||
{
|
||||
yield return line;
|
||||
}
|
||||
}
|
||||
}
|
||||
26
src/Findings/tools/LedgerReplayHarness/HarnessStats.cs
Normal file
26
src/Findings/tools/LedgerReplayHarness/HarnessStats.cs
Normal file
@@ -0,0 +1,26 @@
|
||||
namespace LedgerReplayHarness;
|
||||
|
||||
internal sealed class HarnessStats
|
||||
{
|
||||
private readonly HashSet<string> _eventHashes = new(StringComparer.OrdinalIgnoreCase);
|
||||
private readonly HashSet<string> _leafHashes = new(StringComparer.OrdinalIgnoreCase);
|
||||
|
||||
public void UpdateHashes(string eventHash, string leafHash)
|
||||
{
|
||||
if (!string.IsNullOrWhiteSpace(eventHash))
|
||||
{
|
||||
_eventHashes.Add(eventHash);
|
||||
}
|
||||
|
||||
if (!string.IsNullOrWhiteSpace(leafHash))
|
||||
{
|
||||
_leafHashes.Add(leafHash);
|
||||
}
|
||||
}
|
||||
|
||||
public object ToReport() => new
|
||||
{
|
||||
uniqueEventHashes = _eventHashes.Count,
|
||||
uniqueMerkleLeaves = _leafHashes.Count
|
||||
};
|
||||
}
|
||||
8
src/Findings/tools/LedgerReplayHarness/ILedgerClient.cs
Normal file
8
src/Findings/tools/LedgerReplayHarness/ILedgerClient.cs
Normal file
@@ -0,0 +1,8 @@
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
|
||||
namespace LedgerReplayHarness;
|
||||
|
||||
public interface ILedgerClient
|
||||
{
|
||||
Task AppendAsync(LedgerEventRecord record, CancellationToken cancellationToken);
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
using System.Collections.Concurrent;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
|
||||
namespace LedgerReplayHarness;
|
||||
|
||||
public sealed class InMemoryLedgerClient : ILedgerClient
|
||||
{
|
||||
private readonly ConcurrentDictionary<(string Tenant, Guid EventId), LedgerEventRecord> _store = new();
|
||||
|
||||
public Task AppendAsync(LedgerEventRecord record, CancellationToken cancellationToken)
|
||||
{
|
||||
_store.TryAdd((record.TenantId, record.EventId), record);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
<PropertyGroup>
|
||||
<OutputType>Exe</OutputType>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
<Nullable>enable</Nullable>
|
||||
</PropertyGroup>
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\\..\\StellaOps.Findings.Ledger\\StellaOps.Findings.Ledger.csproj" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<PackageReference Include="System.CommandLine" Version="2.0.0-beta4.22272.1" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
41
src/Findings/tools/LedgerReplayHarness/MerkleCalculator.cs
Normal file
41
src/Findings/tools/LedgerReplayHarness/MerkleCalculator.cs
Normal file
@@ -0,0 +1,41 @@
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
|
||||
namespace LedgerReplayHarness;
|
||||
|
||||
internal static class MerkleCalculator
|
||||
{
|
||||
public static string ComputeRoot(IReadOnlyList<string> leafHashes)
|
||||
{
|
||||
if (leafHashes is null || leafHashes.Count == 0)
|
||||
{
|
||||
throw new ArgumentException("At least one leaf hash is required.", nameof(leafHashes));
|
||||
}
|
||||
|
||||
var level = leafHashes.Select(Normalize).ToList();
|
||||
while (level.Count > 1)
|
||||
{
|
||||
var next = new List<string>((level.Count + 1) / 2);
|
||||
for (int i = 0; i < level.Count; i += 2)
|
||||
{
|
||||
var left = level[i];
|
||||
var right = i + 1 < level.Count ? level[i + 1] : level[i];
|
||||
next.Add(HashPair(left, right));
|
||||
}
|
||||
level = next;
|
||||
}
|
||||
|
||||
return level[0];
|
||||
}
|
||||
|
||||
private static string Normalize(string hex)
|
||||
=> hex?.Trim().ToLowerInvariant() ?? string.Empty;
|
||||
|
||||
private static string HashPair(string left, string right)
|
||||
{
|
||||
using var sha = SHA256.Create();
|
||||
var data = Encoding.UTF8.GetBytes(left + right);
|
||||
var hash = sha.ComputeHash(data);
|
||||
return Convert.ToHexString(hash).ToLowerInvariant();
|
||||
}
|
||||
}
|
||||
22
src/Findings/tools/LedgerReplayHarness/Program.cs
Normal file
22
src/Findings/tools/LedgerReplayHarness/Program.cs
Normal file
@@ -0,0 +1,22 @@
|
||||
using System.CommandLine;
|
||||
using LedgerReplayHarness;
|
||||
|
||||
var fixtureOption = new Option<string[]>("--fixture", "NDJSON fixture path(s)") { IsRequired = true, AllowMultipleArgumentsPerToken = true };
|
||||
var tenantOption = new Option<string>("--tenant", () => "default", "Tenant identifier");
|
||||
var reportOption = new Option<string>("--report", () => "harness-report.json", "Path to write JSON report");
|
||||
var parallelOption = new Option<int>("--maxParallel", () => 4, "Maximum parallelism when sending events");
|
||||
|
||||
var root = new RootCommand("Findings Ledger replay & determinism harness");
|
||||
root.AddOption(fixtureOption);
|
||||
root.AddOption(tenantOption);
|
||||
root.AddOption(reportOption);
|
||||
root.AddOption(parallelOption);
|
||||
|
||||
root.SetHandler(async (fixtures, tenant, report, maxParallel) =>
|
||||
{
|
||||
var runner = new HarnessRunner(new InMemoryLedgerClient(), maxParallel);
|
||||
var exitCode = await runner.RunAsync(fixtures, tenant, report, CancellationToken.None);
|
||||
Environment.Exit(exitCode);
|
||||
}, fixtureOption, tenantOption, reportOption, parallelOption);
|
||||
|
||||
return await root.InvokeAsync(args);
|
||||
36
src/Findings/tools/LedgerReplayHarness/TaskThrottler.cs
Normal file
36
src/Findings/tools/LedgerReplayHarness/TaskThrottler.cs
Normal file
@@ -0,0 +1,36 @@
|
||||
namespace LedgerReplayHarness;
|
||||
|
||||
internal sealed class TaskThrottler
|
||||
{
|
||||
private readonly SemaphoreSlim _semaphore;
|
||||
private readonly List<Task> _tasks = new();
|
||||
|
||||
public TaskThrottler(int maxDegreeOfParallelism)
|
||||
{
|
||||
_semaphore = new SemaphoreSlim(maxDegreeOfParallelism > 0 ? maxDegreeOfParallelism : 1);
|
||||
}
|
||||
|
||||
public async Task RunAsync(Func<Task> taskFactory, CancellationToken cancellationToken)
|
||||
{
|
||||
await _semaphore.WaitAsync(cancellationToken).ConfigureAwait(false);
|
||||
var task = Task.Run(async () =>
|
||||
{
|
||||
try
|
||||
{
|
||||
await taskFactory().ConfigureAwait(false);
|
||||
}
|
||||
finally
|
||||
{
|
||||
_semaphore.Release();
|
||||
}
|
||||
}, cancellationToken);
|
||||
lock (_tasks) _tasks.Add(task);
|
||||
}
|
||||
|
||||
public async Task DrainAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
Task[] pending;
|
||||
lock (_tasks) pending = _tasks.ToArray();
|
||||
await Task.WhenAll(pending).WaitAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,77 @@
|
||||
using System.Text.Json;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Notifier.Tests;
|
||||
|
||||
public sealed class AttestationTemplateCoverageTests
|
||||
{
|
||||
private static readonly string RepoRoot = LocateRepoRoot();
|
||||
|
||||
[Fact]
|
||||
public void Attestation_templates_cover_required_channels()
|
||||
{
|
||||
var directory = Path.Combine(RepoRoot, "offline", "notifier", "templates", "attestation");
|
||||
Assert.True(Directory.Exists(directory), $"Expected template directory at {directory}");
|
||||
|
||||
var templates = Directory
|
||||
.GetFiles(directory, "*.template.json")
|
||||
.Select(path => new
|
||||
{
|
||||
Path = path,
|
||||
Document = JsonDocument.Parse(File.ReadAllText(path)).RootElement
|
||||
})
|
||||
.ToList();
|
||||
|
||||
var required = new Dictionary<string, string[]>
|
||||
{
|
||||
["tmpl-attest-verify-fail"] = new[] { "slack", "email", "webhook" },
|
||||
["tmpl-attest-expiry-warning"] = new[] { "email", "slack" },
|
||||
["tmpl-attest-key-rotation"] = new[] { "email", "webhook" },
|
||||
["tmpl-attest-transparency-anomaly"] = new[] { "slack", "webhook" }
|
||||
};
|
||||
|
||||
foreach (var pair in required)
|
||||
{
|
||||
var matches = templates.Where(t => t.Document.GetProperty("key").GetString() == pair.Key);
|
||||
var channels = matches
|
||||
.Select(t => t.Document.GetProperty("channelType").GetString() ?? string.Empty)
|
||||
.ToHashSet(StringComparer.OrdinalIgnoreCase);
|
||||
|
||||
var missing = pair.Value.Where(requiredChannel => !channels.Contains(requiredChannel)).ToArray();
|
||||
Assert.True(missing.Length == 0, $"{pair.Key} missing channels: {string.Join(", ", missing)}");
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Attestation_templates_include_schema_and_locale_metadata()
|
||||
{
|
||||
var directory = Path.Combine(RepoRoot, "offline", "notifier", "templates", "attestation");
|
||||
Assert.True(Directory.Exists(directory), $"Expected template directory at {directory}");
|
||||
|
||||
foreach (var path in Directory.GetFiles(directory, "*.template.json"))
|
||||
{
|
||||
var document = JsonDocument.Parse(File.ReadAllText(path)).RootElement;
|
||||
|
||||
Assert.True(document.TryGetProperty("schemaVersion", out var schemaVersion) && !string.IsNullOrWhiteSpace(schemaVersion.GetString()), $"schemaVersion missing for {Path.GetFileName(path)}");
|
||||
Assert.True(document.TryGetProperty("locale", out var locale) && !string.IsNullOrWhiteSpace(locale.GetString()), $"locale missing for {Path.GetFileName(path)}");
|
||||
Assert.True(document.TryGetProperty("key", out var key) && !string.IsNullOrWhiteSpace(key.GetString()), $"key missing for {Path.GetFileName(path)}");
|
||||
}
|
||||
}
|
||||
|
||||
private static string LocateRepoRoot()
|
||||
{
|
||||
var directory = AppContext.BaseDirectory;
|
||||
while (directory != null)
|
||||
{
|
||||
var candidate = Path.Combine(directory, "offline", "notifier", "templates", "attestation");
|
||||
if (Directory.Exists(candidate))
|
||||
{
|
||||
return directory;
|
||||
}
|
||||
|
||||
directory = Directory.GetParent(directory)?.FullName;
|
||||
}
|
||||
|
||||
throw new InvalidOperationException("Unable to locate repository root containing offline/notifier/templates/attestation.");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,66 @@
|
||||
using System.Text.Json;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Notifier.Tests;
|
||||
|
||||
public sealed class DeprecationTemplateTests
|
||||
{
|
||||
[Fact]
|
||||
public void Deprecation_templates_cover_slack_and_email()
|
||||
{
|
||||
var directory = LocateOfflineDeprecationDir();
|
||||
Assert.True(Directory.Exists(directory), $"Expected template directory at {directory}");
|
||||
|
||||
var templates = Directory
|
||||
.GetFiles(directory, "*.template.json")
|
||||
.Select(path => new
|
||||
{
|
||||
Path = path,
|
||||
Document = JsonDocument.Parse(File.ReadAllText(path)).RootElement
|
||||
})
|
||||
.ToList();
|
||||
|
||||
var channels = templates
|
||||
.Where(t => t.Document.GetProperty("key").GetString() == "tmpl-api-deprecation")
|
||||
.Select(t => t.Document.GetProperty("channelType").GetString() ?? string.Empty)
|
||||
.ToHashSet(StringComparer.OrdinalIgnoreCase);
|
||||
|
||||
Assert.Contains("slack", channels);
|
||||
Assert.Contains("email", channels);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Deprecation_templates_require_core_metadata()
|
||||
{
|
||||
var directory = LocateOfflineDeprecationDir();
|
||||
Assert.True(Directory.Exists(directory), $"Expected template directory at {directory}");
|
||||
|
||||
foreach (var path in Directory.GetFiles(directory, "*.template.json"))
|
||||
{
|
||||
var document = JsonDocument.Parse(File.ReadAllText(path)).RootElement;
|
||||
|
||||
Assert.True(document.TryGetProperty("metadata", out var meta), $"metadata missing for {Path.GetFileName(path)}");
|
||||
|
||||
// Ensure documented metadata keys are present for offline baseline.
|
||||
Assert.True(meta.TryGetProperty("version", out _), $"metadata.version missing for {Path.GetFileName(path)}");
|
||||
Assert.True(meta.TryGetProperty("author", out _), $"metadata.author missing for {Path.GetFileName(path)}");
|
||||
}
|
||||
}
|
||||
|
||||
private static string LocateOfflineDeprecationDir()
|
||||
{
|
||||
var directory = AppContext.BaseDirectory;
|
||||
while (directory != null)
|
||||
{
|
||||
var candidate = Path.Combine(directory, "offline", "notifier", "templates", "deprecation");
|
||||
if (Directory.Exists(candidate))
|
||||
{
|
||||
return candidate;
|
||||
}
|
||||
|
||||
directory = Directory.GetParent(directory)?.FullName;
|
||||
}
|
||||
|
||||
throw new InvalidOperationException("Unable to locate offline/notifier/templates/deprecation directory.");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,87 @@
|
||||
using System.Net;
|
||||
using Microsoft.AspNetCore.Mvc.Testing;
|
||||
using StellaOps.Notifier.WebService;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Notifier.Tests;
|
||||
|
||||
public sealed class OpenApiEndpointTests : IClassFixture<WebApplicationFactory<WebServiceAssemblyMarker>>
|
||||
{
|
||||
private readonly HttpClient _client;
|
||||
private readonly InMemoryPackApprovalRepository _packRepo = new();
|
||||
private readonly InMemoryLockRepository _lockRepo = new();
|
||||
private readonly InMemoryAuditRepository _auditRepo = new();
|
||||
|
||||
public OpenApiEndpointTests(WebApplicationFactory<WebServiceAssemblyMarker> factory)
|
||||
{
|
||||
_client = factory
|
||||
.WithWebHostBuilder(builder =>
|
||||
{
|
||||
builder.ConfigureServices(services =>
|
||||
{
|
||||
services.AddSingleton<INotifyPackApprovalRepository>(_packRepo);
|
||||
services.AddSingleton<INotifyLockRepository>(_lockRepo);
|
||||
services.AddSingleton<INotifyAuditRepository>(_auditRepo);
|
||||
});
|
||||
})
|
||||
.CreateClient();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task OpenApi_endpoint_serves_yaml_with_scope_header()
|
||||
{
|
||||
var response = await _client.GetAsync("/.well-known/openapi", TestContext.Current.CancellationToken);
|
||||
|
||||
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
|
||||
Assert.Equal("application/yaml", response.Content.Headers.ContentType?.MediaType);
|
||||
Assert.True(response.Headers.TryGetValues("X-OpenAPI-Scope", out var values) &&
|
||||
values.Contains("notify"));
|
||||
Assert.True(response.Headers.ETag is not null && response.Headers.ETag.Tag.Length > 2);
|
||||
|
||||
var body = await response.Content.ReadAsStringAsync(TestContext.Current.CancellationToken);
|
||||
Assert.Contains("openapi: 3.1.0", body);
|
||||
Assert.Contains("/api/v1/notify/quiet-hours", body);
|
||||
Assert.Contains("/api/v1/notify/incidents", body);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Deprecation_headers_emitted_for_api_surface()
|
||||
{
|
||||
var response = await _client.GetAsync("/api/v1/notify/rules", TestContext.Current.CancellationToken);
|
||||
|
||||
Assert.True(response.Headers.TryGetValues("Deprecation", out var depValues) &&
|
||||
depValues.Contains("true"));
|
||||
Assert.True(response.Headers.TryGetValues("Sunset", out var sunsetValues) &&
|
||||
sunsetValues.Any());
|
||||
Assert.True(response.Headers.TryGetValues("Link", out var linkValues) &&
|
||||
linkValues.Any(v => v.Contains("rel=\"deprecation\"")));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task PackApprovals_endpoint_validates_missing_headers()
|
||||
{
|
||||
var content = new StringContent("""{"eventId":"00000000-0000-0000-0000-000000000001","issuedAt":"2025-11-17T16:00:00Z","kind":"pack.approval.granted","packId":"offline-kit","decision":"approved","actor":"task-runner"}""", Encoding.UTF8, "application/json");
|
||||
var response = await _client.PostAsync("/api/v1/notify/pack-approvals", content, TestContext.Current.CancellationToken);
|
||||
|
||||
Assert.Equal(HttpStatusCode.BadRequest, response.StatusCode);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task PackApprovals_endpoint_accepts_happy_path_and_echoes_resume_token()
|
||||
{
|
||||
var content = new StringContent("""{"eventId":"00000000-0000-0000-0000-000000000002","issuedAt":"2025-11-17T16:00:00Z","kind":"pack.approval.granted","packId":"offline-kit","decision":"approved","actor":"task-runner","resumeToken":"rt-ok"}""", Encoding.UTF8, "application/json");
|
||||
var request = new HttpRequestMessage(HttpMethod.Post, "/api/v1/notify/pack-approvals")
|
||||
{
|
||||
Content = content
|
||||
};
|
||||
request.Headers.Add("X-StellaOps-Tenant", "tenant-a");
|
||||
request.Headers.Add("Idempotency-Key", Guid.NewGuid().ToString());
|
||||
|
||||
var response = await _client.SendAsync(request, TestContext.Current.CancellationToken);
|
||||
|
||||
Assert.Equal(HttpStatusCode.Accepted, response.StatusCode);
|
||||
Assert.True(response.Headers.TryGetValues("X-Resume-After", out var resumeValues) &&
|
||||
resumeValues.Contains("rt-ok"));
|
||||
Assert.True(_packRepo.Exists("tenant-a", Guid.Parse("00000000-0000-0000-0000-000000000002"), "offline-kit"));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
using StellaOps.Notify.Storage.Mongo.Documents;
|
||||
using StellaOps.Notify.Storage.Mongo.Repositories;
|
||||
|
||||
namespace StellaOps.Notifier.Tests.Support;
|
||||
|
||||
internal sealed class InMemoryAuditRepository : INotifyAuditRepository
|
||||
{
|
||||
private readonly List<NotifyAuditEntryDocument> _entries = new();
|
||||
|
||||
public Task AppendAsync(NotifyAuditEntryDocument entry, CancellationToken cancellationToken = default)
|
||||
{
|
||||
_entries.Add(entry);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public Task<IReadOnlyList<NotifyAuditEntryDocument>> QueryAsync(string tenantId, DateTimeOffset? since, int? limit, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var items = _entries
|
||||
.Where(e => e.TenantId == tenantId && (!since.HasValue || e.Timestamp >= since.Value))
|
||||
.OrderByDescending(e => e.Timestamp)
|
||||
.ToList();
|
||||
|
||||
if (limit is > 0)
|
||||
{
|
||||
items = items.Take(limit.Value).ToList();
|
||||
}
|
||||
|
||||
return Task.FromResult<IReadOnlyList<NotifyAuditEntryDocument>>(items);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
using StellaOps.Notify.Storage.Mongo.Documents;
|
||||
using StellaOps.Notify.Storage.Mongo.Repositories;
|
||||
|
||||
namespace StellaOps.Notifier.Tests.Support;
|
||||
|
||||
internal sealed class InMemoryPackApprovalRepository : INotifyPackApprovalRepository
|
||||
{
|
||||
private readonly Dictionary<(string TenantId, Guid EventId, string PackId), PackApprovalDocument> _records = new();
|
||||
|
||||
public Task UpsertAsync(PackApprovalDocument document, CancellationToken cancellationToken = default)
|
||||
{
|
||||
_records[(document.TenantId, document.EventId, document.PackId)] = document;
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public bool Exists(string tenantId, Guid eventId, string packId)
|
||||
=> _records.ContainsKey((tenantId, eventId, packId));
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
using System.Text.Json.Serialization;
|
||||
|
||||
namespace StellaOps.Notifier.WebService.Contracts;
|
||||
|
||||
public sealed class PackApprovalRequest
|
||||
{
|
||||
[JsonPropertyName("eventId")]
|
||||
public Guid EventId { get; init; }
|
||||
|
||||
[JsonPropertyName("issuedAt")]
|
||||
public DateTimeOffset IssuedAt { get; init; }
|
||||
|
||||
[JsonPropertyName("kind")]
|
||||
public string Kind { get; init; } = string.Empty;
|
||||
|
||||
[JsonPropertyName("packId")]
|
||||
public string PackId { get; init; } = string.Empty;
|
||||
|
||||
[JsonPropertyName("policy")]
|
||||
public PackApprovalPolicy? Policy { get; init; }
|
||||
|
||||
[JsonPropertyName("decision")]
|
||||
public string Decision { get; init; } = string.Empty;
|
||||
|
||||
[JsonPropertyName("actor")]
|
||||
public string Actor { get; init; } = string.Empty;
|
||||
|
||||
[JsonPropertyName("resumeToken")]
|
||||
public string? ResumeToken { get; init; }
|
||||
|
||||
[JsonPropertyName("summary")]
|
||||
public string? Summary { get; init; }
|
||||
|
||||
[JsonPropertyName("labels")]
|
||||
public Dictionary<string, string>? Labels { get; init; }
|
||||
}
|
||||
|
||||
public sealed class PackApprovalPolicy
|
||||
{
|
||||
[JsonPropertyName("id")]
|
||||
public string? Id { get; init; }
|
||||
|
||||
[JsonPropertyName("version")]
|
||||
public string? Version { get; init; }
|
||||
}
|
||||
@@ -1,9 +1,14 @@
|
||||
using System.Text.Json;
|
||||
using Microsoft.AspNetCore.Builder;
|
||||
using Microsoft.AspNetCore.Http;
|
||||
using Microsoft.Extensions.Configuration;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using Microsoft.Extensions.Hosting;
|
||||
using StellaOps.Notify.Storage.Mongo;
|
||||
using StellaOps.Notifier.WebService.Contracts;
|
||||
using StellaOps.Notifier.WebService.Setup;
|
||||
using StellaOps.Notify.Storage.Mongo;
|
||||
using StellaOps.Notify.Storage.Mongo.Documents;
|
||||
using StellaOps.Notify.Storage.Mongo.Repositories;
|
||||
|
||||
var builder = WebApplication.CreateBuilder(args);
|
||||
|
||||
@@ -13,6 +18,7 @@ builder.Configuration
|
||||
|
||||
var mongoSection = builder.Configuration.GetSection("notifier:storage:mongo");
|
||||
builder.Services.AddNotifyMongoStorage(mongoSection);
|
||||
builder.Services.AddSingleton<OpenApiDocumentCache>();
|
||||
|
||||
builder.Services.AddHealthChecks();
|
||||
builder.Services.AddHostedService<MongoInitializationHostedService>();
|
||||
@@ -21,4 +27,115 @@ var app = builder.Build();
|
||||
|
||||
app.MapHealthChecks("/healthz");
|
||||
|
||||
// Deprecation headers for retiring v1 APIs (RFC 8594 / IETF Sunset)
|
||||
app.Use(async (context, next) =>
|
||||
{
|
||||
if (context.Request.Path.StartsWithSegments("/api/v1", StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
context.Response.Headers["Deprecation"] = "true";
|
||||
context.Response.Headers["Sunset"] = "Tue, 31 Mar 2026 00:00:00 GMT";
|
||||
context.Response.Headers["Link"] =
|
||||
"<https://docs.stellaops.example.com/notify/deprecations>; rel=\"deprecation\"; type=\"text/html\"";
|
||||
}
|
||||
|
||||
await next().ConfigureAwait(false);
|
||||
});
|
||||
|
||||
app.MapPost("/api/v1/notify/pack-approvals", async (
|
||||
HttpContext context,
|
||||
PackApprovalRequest request,
|
||||
INotifyLockRepository locks,
|
||||
INotifyPackApprovalRepository packApprovals,
|
||||
INotifyAuditRepository audit,
|
||||
TimeProvider timeProvider) =>
|
||||
{
|
||||
var tenantId = context.Request.Headers["X-StellaOps-Tenant"].ToString();
|
||||
if (string.IsNullOrWhiteSpace(tenantId))
|
||||
{
|
||||
return Results.BadRequest(Error("tenant_missing", "X-StellaOps-Tenant header is required.", context));
|
||||
}
|
||||
|
||||
var idempotencyKey = context.Request.Headers["Idempotency-Key"].ToString();
|
||||
if (string.IsNullOrWhiteSpace(idempotencyKey))
|
||||
{
|
||||
return Results.BadRequest(Error("idempotency_key_missing", "Idempotency-Key header is required.", context));
|
||||
}
|
||||
|
||||
if (request.EventId == Guid.Empty || string.IsNullOrWhiteSpace(request.PackId) ||
|
||||
string.IsNullOrWhiteSpace(request.Kind) || string.IsNullOrWhiteSpace(request.Decision) ||
|
||||
string.IsNullOrWhiteSpace(request.Actor))
|
||||
{
|
||||
return Results.BadRequest(Error("invalid_request", "eventId, packId, kind, decision, actor are required.", context));
|
||||
}
|
||||
|
||||
var lockKey = $"pack-approvals|{tenantId}|{idempotencyKey}";
|
||||
var ttl = TimeSpan.FromMinutes(15);
|
||||
var reserved = await locks.TryAcquireAsync(tenantId, lockKey, "pack-approvals", ttl, context.RequestAborted)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
if (!reserved)
|
||||
{
|
||||
return Results.StatusCode(StatusCodes.Status200OK);
|
||||
}
|
||||
|
||||
var document = new PackApprovalDocument
|
||||
{
|
||||
TenantId = tenantId,
|
||||
EventId = request.EventId,
|
||||
PackId = request.PackId,
|
||||
Kind = request.Kind,
|
||||
Decision = request.Decision,
|
||||
Actor = request.Actor,
|
||||
IssuedAt = request.IssuedAt,
|
||||
PolicyId = request.Policy?.Id,
|
||||
PolicyVersion = request.Policy?.Version,
|
||||
ResumeToken = request.ResumeToken,
|
||||
Summary = request.Summary,
|
||||
Labels = request.Labels,
|
||||
CreatedAt = timeProvider.GetUtcNow()
|
||||
};
|
||||
|
||||
await packApprovals.UpsertAsync(document, context.RequestAborted).ConfigureAwait(false);
|
||||
|
||||
var auditEntry = new NotifyAuditEntryDocument
|
||||
{
|
||||
TenantId = tenantId,
|
||||
Actor = request.Actor,
|
||||
Action = "pack.approval.ingested",
|
||||
EntityId = request.PackId,
|
||||
EntityType = "pack-approval",
|
||||
Timestamp = timeProvider.GetUtcNow(),
|
||||
Payload = MongoDB.Bson.Serialization.BsonSerializer.Deserialize<MongoDB.Bson.BsonDocument>(JsonSerializer.Serialize(request))
|
||||
};
|
||||
|
||||
await audit.AppendAsync(auditEntry, context.RequestAborted).ConfigureAwait(false);
|
||||
|
||||
if (!string.IsNullOrWhiteSpace(request.ResumeToken))
|
||||
{
|
||||
context.Response.Headers["X-Resume-After"] = request.ResumeToken;
|
||||
}
|
||||
|
||||
return Results.Accepted();
|
||||
});
|
||||
|
||||
app.MapGet("/.well-known/openapi", (HttpContext context, OpenApiDocumentCache cache) =>
|
||||
{
|
||||
context.Response.Headers.CacheControl = "public, max-age=300";
|
||||
context.Response.Headers["X-OpenAPI-Scope"] = "notify";
|
||||
context.Response.Headers.ETag = $"\"{cache.Sha256}\"";
|
||||
return Results.Content(cache.Document, "application/yaml");
|
||||
});
|
||||
|
||||
app.Run();
|
||||
|
||||
public partial class Program;
|
||||
|
||||
static object Error(string code, string message, HttpContext context) => new
|
||||
{
|
||||
error = new
|
||||
{
|
||||
code,
|
||||
message,
|
||||
traceId = context.TraceIdentifier
|
||||
}
|
||||
};
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
using System.Text;
|
||||
|
||||
namespace StellaOps.Notifier.WebService.Setup;
|
||||
|
||||
public sealed class OpenApiDocumentCache
|
||||
{
|
||||
private readonly string _document;
|
||||
private readonly string _hash;
|
||||
|
||||
public OpenApiDocumentCache(IHostEnvironment environment)
|
||||
{
|
||||
var path = Path.Combine(environment.ContentRootPath, "openapi", "notify-openapi.yaml");
|
||||
if (!File.Exists(path))
|
||||
{
|
||||
throw new FileNotFoundException("OpenAPI document not found.", path);
|
||||
}
|
||||
|
||||
_document = File.ReadAllText(path, Encoding.UTF8);
|
||||
|
||||
using var sha = System.Security.Cryptography.SHA256.Create();
|
||||
var bytes = Encoding.UTF8.GetBytes(_document);
|
||||
_hash = Convert.ToHexString(sha.ComputeHash(bytes)).ToLowerInvariant();
|
||||
}
|
||||
|
||||
public string Document => _document;
|
||||
|
||||
public string Sha256 => _hash;
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
namespace StellaOps.Notifier.WebService;
|
||||
|
||||
/// <summary>
|
||||
/// Marker type used for testing/hosting the web application.
|
||||
/// </summary>
|
||||
public sealed class WebServiceAssemblyMarker;
|
||||
@@ -0,0 +1,501 @@
|
||||
# OpenAPI 3.1 specification for StellaOps Notifier WebService (draft)
|
||||
openapi: 3.1.0
|
||||
info:
|
||||
title: StellaOps Notifier API
|
||||
version: 0.6.0-draft
|
||||
description: |
|
||||
Contract for Notifications Studio (Notifier) covering rules, templates, incidents,
|
||||
and quiet hours. Uses the platform error envelope and tenant header `X-StellaOps-Tenant`.
|
||||
servers:
|
||||
- url: https://api.stellaops.example.com
|
||||
description: Production
|
||||
- url: https://api.dev.stellaops.example.com
|
||||
description: Development
|
||||
security:
|
||||
- oauth2: [notify.viewer]
|
||||
- oauth2: [notify.operator]
|
||||
- oauth2: [notify.admin]
|
||||
paths:
|
||||
/api/v1/notify/rules:
|
||||
get:
|
||||
summary: List notification rules
|
||||
tags: [Rules]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
- $ref: '#/components/parameters/PageSize'
|
||||
- $ref: '#/components/parameters/PageToken'
|
||||
responses:
|
||||
'200':
|
||||
description: Paginated rule list
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
items:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/NotifyRule' }
|
||||
nextPageToken:
|
||||
type: string
|
||||
examples:
|
||||
default:
|
||||
value:
|
||||
items:
|
||||
- ruleId: rule-critical
|
||||
tenantId: tenant-dev
|
||||
name: Critical scanner verdicts
|
||||
enabled: true
|
||||
match:
|
||||
eventKinds: [scanner.report.ready]
|
||||
minSeverity: critical
|
||||
actions:
|
||||
- actionId: act-slack-critical
|
||||
channel: chn-slack-soc
|
||||
template: tmpl-critical
|
||||
digest: instant
|
||||
nextPageToken: null
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
post:
|
||||
summary: Create a notification rule
|
||||
tags: [Rules]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/NotifyRule' }
|
||||
examples:
|
||||
create-rule:
|
||||
value:
|
||||
ruleId: rule-attest-fail
|
||||
tenantId: tenant-dev
|
||||
name: Attestation failures → SOC
|
||||
enabled: true
|
||||
match:
|
||||
eventKinds: [attestor.verification.failed]
|
||||
actions:
|
||||
- actionId: act-soc
|
||||
channel: chn-webhook-soc
|
||||
template: tmpl-attest-verify-fail
|
||||
responses:
|
||||
'201':
|
||||
description: Rule created
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/NotifyRule' }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
|
||||
/api/v1/notify/rules/{ruleId}:
|
||||
get:
|
||||
summary: Fetch a rule
|
||||
tags: [Rules]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
- $ref: '#/components/parameters/RuleId'
|
||||
responses:
|
||||
'200':
|
||||
description: Rule
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/NotifyRule' }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
patch:
|
||||
summary: Update a rule (partial)
|
||||
tags: [Rules]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
- $ref: '#/components/parameters/RuleId'
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
description: JSON Merge Patch
|
||||
responses:
|
||||
'200':
|
||||
description: Updated rule
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/NotifyRule' }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
|
||||
/api/v1/notify/templates:
|
||||
get:
|
||||
summary: List templates
|
||||
tags: [Templates]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
- name: key
|
||||
in: query
|
||||
description: Filter by template key
|
||||
schema: { type: string }
|
||||
responses:
|
||||
'200':
|
||||
description: Templates
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/NotifyTemplate' }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
post:
|
||||
summary: Create a template
|
||||
tags: [Templates]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/NotifyTemplate' }
|
||||
responses:
|
||||
'201':
|
||||
description: Template created
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/NotifyTemplate' }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
|
||||
/api/v1/notify/templates/{templateId}:
|
||||
get:
|
||||
summary: Fetch a template
|
||||
tags: [Templates]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
- $ref: '#/components/parameters/TemplateId'
|
||||
responses:
|
||||
'200':
|
||||
description: Template
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/NotifyTemplate' }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
patch:
|
||||
summary: Update a template (partial)
|
||||
tags: [Templates]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
- $ref: '#/components/parameters/TemplateId'
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
description: JSON Merge Patch
|
||||
responses:
|
||||
'200':
|
||||
description: Updated template
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/NotifyTemplate' }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
|
||||
/api/v1/notify/incidents:
|
||||
get:
|
||||
summary: List incidents (paged)
|
||||
tags: [Incidents]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
- $ref: '#/components/parameters/PageSize'
|
||||
- $ref: '#/components/parameters/PageToken'
|
||||
responses:
|
||||
'200':
|
||||
description: Incident page
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
items:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/Incident' }
|
||||
nextPageToken: { type: string }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
post:
|
||||
summary: Raise an incident (ops/toggle/override)
|
||||
tags: [Incidents]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/Incident' }
|
||||
examples:
|
||||
start-incident:
|
||||
value:
|
||||
incidentId: inc-telemetry-outage
|
||||
kind: outage
|
||||
severity: major
|
||||
startedAt: 2025-11-17T04:02:00Z
|
||||
shortDescription: "Telemetry pipeline degraded; burn-rate breach"
|
||||
metadata:
|
||||
source: slo-evaluator
|
||||
responses:
|
||||
'202':
|
||||
description: Incident accepted
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
|
||||
/api/v1/notify/incidents/{incidentId}/ack:
|
||||
post:
|
||||
summary: Acknowledge an incident notification
|
||||
tags: [Incidents]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
- $ref: '#/components/parameters/IncidentId'
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
ackToken:
|
||||
type: string
|
||||
description: DSSE-signed acknowledgement token
|
||||
responses:
|
||||
'204':
|
||||
description: Acknowledged
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
|
||||
/api/v1/notify/quiet-hours:
|
||||
get:
|
||||
summary: Get quiet-hours schedule
|
||||
tags: [QuietHours]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
responses:
|
||||
'200':
|
||||
description: Quiet hours schedule
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/QuietHours' }
|
||||
examples:
|
||||
current:
|
||||
value:
|
||||
quietHoursId: qh-default
|
||||
windows:
|
||||
- timezone: UTC
|
||||
days: [Mon, Tue, Wed, Thu, Fri]
|
||||
start: "22:00"
|
||||
end: "06:00"
|
||||
exemptions:
|
||||
- eventKinds: [attestor.verification.failed]
|
||||
reason: "Always alert for attestation failures"
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
post:
|
||||
summary: Set quiet-hours schedule
|
||||
tags: [QuietHours]
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/Tenant'
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/QuietHours' }
|
||||
responses:
|
||||
'200':
|
||||
description: Updated quiet hours
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/QuietHours' }
|
||||
default:
|
||||
$ref: '#/components/responses/Error'
|
||||
|
||||
components:
|
||||
securitySchemes:
|
||||
oauth2:
|
||||
type: oauth2
|
||||
flows:
|
||||
clientCredentials:
|
||||
tokenUrl: https://auth.stellaops.example.com/oauth/token
|
||||
scopes:
|
||||
notify.viewer: Read-only Notifier access
|
||||
notify.operator: Manage rules/templates/incidents within tenant
|
||||
notify.admin: Tenant-scoped administration
|
||||
parameters:
|
||||
Tenant:
|
||||
name: X-StellaOps-Tenant
|
||||
in: header
|
||||
required: true
|
||||
description: Tenant slug
|
||||
schema: { type: string }
|
||||
PageSize:
|
||||
name: pageSize
|
||||
in: query
|
||||
schema: { type: integer, minimum: 1, maximum: 200, default: 50 }
|
||||
PageToken:
|
||||
name: pageToken
|
||||
in: query
|
||||
schema: { type: string }
|
||||
RuleId:
|
||||
name: ruleId
|
||||
in: path
|
||||
required: true
|
||||
schema: { type: string }
|
||||
TemplateId:
|
||||
name: templateId
|
||||
in: path
|
||||
required: true
|
||||
schema: { type: string }
|
||||
IncidentId:
|
||||
name: incidentId
|
||||
in: path
|
||||
required: true
|
||||
schema: { type: string }
|
||||
|
||||
responses:
|
||||
Error:
|
||||
description: Standard error envelope
|
||||
content:
|
||||
application/json:
|
||||
schema: { $ref: '#/components/schemas/ErrorEnvelope' }
|
||||
examples:
|
||||
validation:
|
||||
value:
|
||||
error:
|
||||
code: validation_failed
|
||||
message: "quietHours.windows[0].start must be HH:mm"
|
||||
traceId: "f62f3c2b9c8e4c53"
|
||||
|
||||
schemas:
|
||||
ErrorEnvelope:
|
||||
type: object
|
||||
required: [error]
|
||||
properties:
|
||||
error:
|
||||
type: object
|
||||
required: [code, message, traceId]
|
||||
properties:
|
||||
code: { type: string }
|
||||
message: { type: string }
|
||||
traceId: { type: string }
|
||||
|
||||
NotifyRule:
|
||||
type: object
|
||||
required: [ruleId, tenantId, name, match, actions]
|
||||
properties:
|
||||
ruleId: { type: string }
|
||||
tenantId: { type: string }
|
||||
name: { type: string }
|
||||
description: { type: string }
|
||||
enabled: { type: boolean, default: true }
|
||||
match: { $ref: '#/components/schemas/RuleMatch' }
|
||||
actions:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/RuleAction' }
|
||||
labels:
|
||||
type: object
|
||||
additionalProperties: { type: string }
|
||||
metadata:
|
||||
type: object
|
||||
additionalProperties: { type: string }
|
||||
|
||||
RuleMatch:
|
||||
type: object
|
||||
properties:
|
||||
eventKinds:
|
||||
type: array
|
||||
items: { type: string }
|
||||
minSeverity: { type: string, enum: [info, low, medium, high, critical] }
|
||||
verdicts:
|
||||
type: array
|
||||
items: { type: string }
|
||||
labels:
|
||||
type: array
|
||||
items: { type: string }
|
||||
kevOnly: { type: boolean }
|
||||
|
||||
RuleAction:
|
||||
type: object
|
||||
required: [actionId, channel]
|
||||
properties:
|
||||
actionId: { type: string }
|
||||
channel: { type: string }
|
||||
template: { type: string }
|
||||
digest: { type: string, description: "Digest window key e.g. instant|5m|15m|1h|1d" }
|
||||
throttle: { type: string, description: "ISO-8601 duration, e.g. PT5M" }
|
||||
locale: { type: string }
|
||||
enabled: { type: boolean, default: true }
|
||||
metadata:
|
||||
type: object
|
||||
additionalProperties: { type: string }
|
||||
|
||||
NotifyTemplate:
|
||||
type: object
|
||||
required: [templateId, tenantId, key, channelType, locale, body, renderMode, format]
|
||||
properties:
|
||||
templateId: { type: string }
|
||||
tenantId: { type: string }
|
||||
key: { type: string }
|
||||
channelType: { type: string, enum: [slack, teams, email, webhook, custom] }
|
||||
locale: { type: string, description: "BCP-47, lower-case" }
|
||||
renderMode: { type: string, enum: [Markdown, Html, AdaptiveCard, PlainText, Json] }
|
||||
format: { type: string, enum: [slack, teams, email, webhook, json] }
|
||||
description: { type: string }
|
||||
body: { type: string }
|
||||
metadata:
|
||||
type: object
|
||||
additionalProperties: { type: string }
|
||||
|
||||
Incident:
|
||||
type: object
|
||||
required: [incidentId, kind, severity, startedAt]
|
||||
properties:
|
||||
incidentId: { type: string }
|
||||
kind: { type: string, description: "outage|degradation|security|ops-drill" }
|
||||
severity: { type: string, enum: [minor, major, critical] }
|
||||
startedAt: { type: string, format: date-time }
|
||||
endedAt: { type: string, format: date-time }
|
||||
shortDescription: { type: string }
|
||||
description: { type: string }
|
||||
metadata:
|
||||
type: object
|
||||
additionalProperties: { type: string }
|
||||
|
||||
QuietHours:
|
||||
type: object
|
||||
required: [quietHoursId, windows]
|
||||
properties:
|
||||
quietHoursId: { type: string }
|
||||
windows:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/QuietHoursWindow' }
|
||||
exemptions:
|
||||
type: array
|
||||
description: Event kinds that bypass quiet hours
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
eventKinds:
|
||||
type: array
|
||||
items: { type: string }
|
||||
reason: { type: string }
|
||||
|
||||
QuietHoursWindow:
|
||||
type: object
|
||||
required: [timezone, days, start, end]
|
||||
properties:
|
||||
timezone: { type: string, description: "IANA TZ, e.g., UTC" }
|
||||
days:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
enum: [Mon, Tue, Wed, Thu, Fri, Sat, Sun]
|
||||
start: { type: string, description: "HH:mm" }
|
||||
end: { type: string, description: "HH:mm" }
|
||||
15
src/Notifier/StellaOps.Notifier/TASKS.md
Normal file
15
src/Notifier/StellaOps.Notifier/TASKS.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# Sprint 171 · Notifier.I
|
||||
|
||||
| ID | Status | Owner(s) | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| NOTIFY-ATTEST-74-001 | DONE (2025-11-16) | Notifications Service Guild | Attestation template suite complete; Slack expiry template added; coverage tests guard required channels. |
|
||||
| NOTIFY-ATTEST-74-002 | TODO | Notifications Service Guild · KMS Guild | Wire notifications to key rotation/revocation events + transparency witness failures (depends on 74-001). |
|
||||
| NOTIFY-OAS-61-001 | DONE (2025-11-17) | Notifications Service Guild · API Contracts Guild | OAS updated with rules/templates/incidents/quiet hours and standard error envelope. |
|
||||
| NOTIFY-OAS-61-002 | DONE (2025-11-17) | Notifications Service Guild | `.well-known/openapi` discovery endpoint with scope metadata implemented. |
|
||||
| NOTIFY-OAS-62-001 | DONE (2025-11-17) | Notifications Service Guild · SDK Generator Guild | SDK usage examples + smoke tests (depends on 61-002). |
|
||||
| NOTIFY-OAS-63-001 | TODO | Notifications Service Guild · API Governance Guild | Deprecation headers + template notices for retiring APIs (depends on 62-001). |
|
||||
| NOTIFY-OBS-51-001 | TODO | Notifications Service Guild · Observability Guild | Integrate SLO evaluator webhooks once schema lands. |
|
||||
| NOTIFY-OBS-55-001 | TODO | Notifications Service Guild · Ops Guild | Incident mode start/stop notifications; quiet-hour overrides. |
|
||||
| NOTIFY-RISK-66-001 | TODO | Notifications Service Guild · Risk Engine Guild | Trigger risk severity escalation/downgrade notifications (waiting on Policy export). |
|
||||
| NOTIFY-RISK-67-001 | TODO | Notifications Service Guild · Policy Guild | Notify when risk profiles publish/deprecate/threshold-change (depends on 66-001). |
|
||||
| NOTIFY-RISK-68-001 | TODO | Notifications Service Guild | Per-profile routing rules + quiet hours for risk alerts (depends on 67-001). |
|
||||
15
src/Notifier/StellaOps.Notifier/docs/NOTIFY-OAS-61-ETAG.md
Normal file
15
src/Notifier/StellaOps.Notifier/docs/NOTIFY-OAS-61-ETAG.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# Notifier OAS Discovery — ETag Guidance
|
||||
|
||||
The Notifier WebService exposes its OpenAPI document at `/.well-known/openapi` with headers:
|
||||
|
||||
- `X-OpenAPI-Scope: notify`
|
||||
- `ETag: "<sha256>"` (stable per spec bytes)
|
||||
- `Cache-Control: public, max-age=300`
|
||||
|
||||
Usage notes:
|
||||
|
||||
- SDK generators and CI smoke tests should re-use the `ETag` for conditional GETs (`If-None-Match`) to avoid redundant downloads.
|
||||
- Mirror/Offline bundles should copy `openapi/notify-openapi.yaml` and retain the `ETag` alongside the file hash used in air-gap validation.
|
||||
- When the spec changes, the SHA-256 and `ETag` change together; callers can detect breaking/non-breaking updates via the published changelog (source of truth in `docs/api/notify-openapi.yaml`).
|
||||
|
||||
Applies to tasks: NOTIFY-OAS-61-001/61-002/63-001.
|
||||
@@ -0,0 +1,49 @@
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Bson.Serialization.Attributes;
|
||||
|
||||
namespace StellaOps.Notify.Storage.Mongo.Documents;
|
||||
|
||||
public sealed class PackApprovalDocument
|
||||
{
|
||||
[BsonId]
|
||||
public ObjectId Id { get; init; }
|
||||
|
||||
[BsonElement("tenantId")]
|
||||
public required string TenantId { get; init; }
|
||||
|
||||
[BsonElement("eventId")]
|
||||
public required Guid EventId { get; init; }
|
||||
|
||||
[BsonElement("packId")]
|
||||
public required string PackId { get; init; }
|
||||
|
||||
[BsonElement("kind")]
|
||||
public required string Kind { get; init; }
|
||||
|
||||
[BsonElement("decision")]
|
||||
public required string Decision { get; init; }
|
||||
|
||||
[BsonElement("actor")]
|
||||
public required string Actor { get; init; }
|
||||
|
||||
[BsonElement("issuedAt")]
|
||||
public required DateTimeOffset IssuedAt { get; init; }
|
||||
|
||||
[BsonElement("policyId")]
|
||||
public string? PolicyId { get; init; }
|
||||
|
||||
[BsonElement("policyVersion")]
|
||||
public string? PolicyVersion { get; init; }
|
||||
|
||||
[BsonElement("resumeToken")]
|
||||
public string? ResumeToken { get; init; }
|
||||
|
||||
[BsonElement("summary")]
|
||||
public string? Summary { get; init; }
|
||||
|
||||
[BsonElement("labels")]
|
||||
public Dictionary<string, string>? Labels { get; init; }
|
||||
|
||||
[BsonElement("createdAt")]
|
||||
public required DateTimeOffset CreatedAt { get; init; }
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user