CD/CD consolidation

This commit is contained in:
StellaOps Bot
2025-12-26 17:32:23 +02:00
parent a866eb6277
commit c786faae84
638 changed files with 3821 additions and 181 deletions

1
devops/telemetry/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
certs/

View File

@@ -0,0 +1,35 @@
# Telemetry Collector Assets
These assets provision the default OpenTelemetry Collector instance required by
`DEVOPS-OBS-50-001`. The collector acts as the secured ingest point for traces,
metrics, and logs emitted by StellaOps services.
## Contents
| File | Purpose |
| ---- | ------- |
| `otel-collector-config.yaml` | Baseline collector configuration (mutual TLS, OTLP receivers, Prometheus exporter). |
| `storage/prometheus.yaml` | Prometheus scrape configuration tuned for the collector and service tenants. |
| `storage/tempo.yaml` | Tempo configuration with multitenancy, WAL, and compaction settings. |
| `storage/loki.yaml` | Loki configuration enabling multitenant log ingestion with retention policies. |
| `storage/tenants/*.yaml` | Per-tenant overrides for Tempo and Loki rate/retention controls. |
## Development workflow
1. Generate development certificates (collector + client) using
`ops/devops/telemetry/generate_dev_tls.sh`.
2. Launch the collector via `docker compose -f docker-compose.telemetry.yaml up`.
3. Launch the storage backends (Prometheus, Tempo, Loki) via
`docker compose -f docker-compose.telemetry-storage.yaml up`.
4. Run the smoke test: `python ops/devops/telemetry/smoke_otel_collector.py`.
5. Explore the storage configuration (`storage/README.md`) to tune retention/limits.
The smoke test sends OTLP traffic over TLS and asserts the collector accepted
traces, metrics, and logs by scraping the Prometheus metrics endpoint.
## Kubernetes
The Helm chart consumes the same configuration (see `values.yaml`). Provide TLS
material via a secret referenced by `telemetry.collector.tls.secretName`,
containing `ca.crt`, `tls.crt`, and `tls.key`. Client certificates are required
for ingestion and should be issued by the same CA.

View File

@@ -0,0 +1,164 @@
# ExportCenter Alert Rules
# SLO Burn-rate alerts for export service reliability
groups:
- name: export-center-slo
interval: 30s
rules:
# SLO: 99.5% success rate target
# Error budget: 0.5% (432 errors per day at 86400 requests/day)
# Fast burn - 2% budget consumption in 1 hour (critical)
- alert: ExportCenterHighErrorBurnRate
expr: |
(
sum(rate(export_runs_failed_total[1h]))
/
sum(rate(export_runs_total[1h]))
) > (14.4 * 0.005)
for: 2m
labels:
severity: critical
service: export-center
slo: availability
annotations:
summary: "ExportCenter high error burn rate"
description: "Error rate is {{ $value | humanizePercentage }} over the last hour, consuming error budget at 14.4x the sustainable rate."
runbook_url: "https://docs.stellaops.io/runbooks/export-center/high-error-rate"
# Slow burn - 10% budget consumption in 6 hours (warning)
- alert: ExportCenterElevatedErrorBurnRate
expr: |
(
sum(rate(export_runs_failed_total[6h]))
/
sum(rate(export_runs_total[6h]))
) > (6 * 0.005)
for: 5m
labels:
severity: warning
service: export-center
slo: availability
annotations:
summary: "ExportCenter elevated error burn rate"
description: "Error rate is {{ $value | humanizePercentage }} over the last 6 hours, consuming error budget at 6x the sustainable rate."
runbook_url: "https://docs.stellaops.io/runbooks/export-center/elevated-error-rate"
- name: export-center-latency
interval: 30s
rules:
# SLO: 95% of exports complete within 120s
# Fast burn - p95 latency exceeding threshold
- alert: ExportCenterHighLatency
expr: |
histogram_quantile(0.95,
sum(rate(export_run_duration_seconds_bucket[5m])) by (le)
) > 120
for: 5m
labels:
severity: warning
service: export-center
slo: latency
annotations:
summary: "ExportCenter high latency"
description: "95th percentile export duration is {{ $value | humanizeDuration }}, exceeding 120s SLO target."
runbook_url: "https://docs.stellaops.io/runbooks/export-center/high-latency"
# Critical latency - p99 exceeding 5 minutes
- alert: ExportCenterCriticalLatency
expr: |
histogram_quantile(0.99,
sum(rate(export_run_duration_seconds_bucket[5m])) by (le)
) > 300
for: 2m
labels:
severity: critical
service: export-center
slo: latency
annotations:
summary: "ExportCenter critical latency"
description: "99th percentile export duration is {{ $value | humanizeDuration }}, indicating severe performance degradation."
runbook_url: "https://docs.stellaops.io/runbooks/export-center/critical-latency"
- name: export-center-capacity
interval: 60s
rules:
# Queue buildup warning
- alert: ExportCenterHighConcurrency
expr: sum(export_runs_in_progress) > 50
for: 5m
labels:
severity: warning
service: export-center
annotations:
summary: "ExportCenter high concurrency"
description: "{{ $value }} exports currently in progress. Consider scaling or investigating slow exports."
runbook_url: "https://docs.stellaops.io/runbooks/export-center/high-concurrency"
# Stuck exports - exports running longer than 30 minutes
- alert: ExportCenterStuckExports
expr: |
histogram_quantile(0.99,
sum(rate(export_run_duration_seconds_bucket{status!="completed"}[1h])) by (le)
) > 1800
for: 10m
labels:
severity: warning
service: export-center
annotations:
summary: "ExportCenter potentially stuck exports"
description: "Some exports may be stuck - 99th percentile duration for incomplete exports exceeds 30 minutes."
runbook_url: "https://docs.stellaops.io/runbooks/export-center/stuck-exports"
- name: export-center-errors
interval: 30s
rules:
# Specific error code spike
- alert: ExportCenterErrorCodeSpike
expr: |
sum by (error_code) (
rate(export_runs_failed_total[5m])
) > 0.1
for: 5m
labels:
severity: warning
service: export-center
annotations:
summary: "ExportCenter error code spike: {{ $labels.error_code }}"
description: "Error code {{ $labels.error_code }} is occurring at {{ $value | humanize }}/s rate."
runbook_url: "https://docs.stellaops.io/runbooks/export-center/error-codes"
# No successful exports in 15 minutes (when there is traffic)
- alert: ExportCenterNoSuccessfulExports
expr: |
(
sum(rate(export_runs_total[15m])) > 0
)
and
(
sum(rate(export_runs_success_total[15m])) == 0
)
for: 10m
labels:
severity: critical
service: export-center
annotations:
summary: "ExportCenter no successful exports"
description: "No exports have completed successfully in the last 15 minutes despite ongoing attempts."
runbook_url: "https://docs.stellaops.io/runbooks/export-center/no-successful-exports"
- name: export-center-deprecation
interval: 5m
rules:
# Deprecated endpoint usage
- alert: ExportCenterDeprecatedEndpointUsage
expr: |
sum(rate(export_center_deprecated_endpoint_access_total[1h])) > 0
for: 1h
labels:
severity: info
service: export-center
annotations:
summary: "Deprecated export endpoints still in use"
description: "Legacy /exports endpoints are still being accessed at {{ $value | humanize }}/s. Migration to v1 API recommended."
runbook_url: "https://docs.stellaops.io/api/export-center/migration"

View File

@@ -0,0 +1,42 @@
# Scanner FN-Drift Alert Rules
# SLO alerts for false-negative drift thresholds (30-day rolling window)
groups:
- name: scanner-fn-drift
interval: 30s
rules:
- alert: ScannerFnDriftWarning
expr: scanner_fn_drift_percent > 1.0
for: 5m
labels:
severity: warning
service: scanner
slo: fn-drift
annotations:
summary: "Scanner FN-Drift rate above warning threshold"
description: "FN-Drift is {{ $value | humanizePercentage }} (> 1.0%) over the 30-day rolling window."
runbook_url: "https://docs.stellaops.io/runbooks/scanner/fn-drift-warning"
- alert: ScannerFnDriftCritical
expr: scanner_fn_drift_percent > 2.5
for: 5m
labels:
severity: critical
service: scanner
slo: fn-drift
annotations:
summary: "Scanner FN-Drift rate above critical threshold"
description: "FN-Drift is {{ $value | humanizePercentage }} (> 2.5%) over the 30-day rolling window."
runbook_url: "https://docs.stellaops.io/runbooks/scanner/fn-drift-critical"
- alert: ScannerFnDriftEngineViolation
expr: scanner_fn_drift_cause_engine > 0
for: 1m
labels:
severity: page
service: scanner
slo: determinism
annotations:
summary: "Engine-caused FN drift detected (determinism violation)"
description: "Engine-caused FN drift count is {{ $value }} (> 0). This indicates non-feed, non-policy changes affecting outcomes."
runbook_url: "https://docs.stellaops.io/runbooks/scanner/fn-drift-engine-violation"

View File

@@ -0,0 +1,638 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": { "type": "grafana", "uid": "-- Grafana --" },
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"description": "ExportCenter service observability dashboard",
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": null,
"links": [],
"liveNow": false,
"panels": [
{
"collapsed": false,
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 },
"id": 1,
"panels": [],
"title": "Export Runs Overview",
"type": "row"
},
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"fieldConfig": {
"defaults": {
"color": { "mode": "thresholds" },
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{ "color": "green", "value": null }
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": { "h": 4, "w": 4, "x": 0, "y": 1 },
"id": 2,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"editorMode": "code",
"expr": "sum(increase(export_runs_total{tenant=~\"$tenant\"}[$__range]))",
"legendFormat": "Total Runs",
"range": true,
"refId": "A"
}
],
"title": "Total Export Runs",
"type": "stat"
},
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"fieldConfig": {
"defaults": {
"color": { "mode": "thresholds" },
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{ "color": "green", "value": null }
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": { "h": 4, "w": 4, "x": 4, "y": 1 },
"id": 3,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"editorMode": "code",
"expr": "sum(increase(export_runs_success_total{tenant=~\"$tenant\"}[$__range]))",
"legendFormat": "Successful",
"range": true,
"refId": "A"
}
],
"title": "Successful Runs",
"type": "stat"
},
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"fieldConfig": {
"defaults": {
"color": { "mode": "thresholds" },
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{ "color": "green", "value": null },
{ "color": "yellow", "value": 1 },
{ "color": "red", "value": 5 }
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": { "h": 4, "w": 4, "x": 8, "y": 1 },
"id": 4,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"editorMode": "code",
"expr": "sum(increase(export_runs_failed_total{tenant=~\"$tenant\"}[$__range]))",
"legendFormat": "Failed",
"range": true,
"refId": "A"
}
],
"title": "Failed Runs",
"type": "stat"
},
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"fieldConfig": {
"defaults": {
"color": { "mode": "thresholds" },
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{ "color": "red", "value": null },
{ "color": "yellow", "value": 95 },
{ "color": "green", "value": 99 }
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": { "h": 4, "w": 4, "x": 12, "y": 1 },
"id": 5,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"editorMode": "code",
"expr": "100 * sum(increase(export_runs_success_total{tenant=~\"$tenant\"}[$__range])) / sum(increase(export_runs_total{tenant=~\"$tenant\"}[$__range]))",
"legendFormat": "Success Rate",
"range": true,
"refId": "A"
}
],
"title": "Success Rate",
"type": "stat"
},
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"fieldConfig": {
"defaults": {
"color": { "mode": "thresholds" },
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{ "color": "green", "value": null }
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": { "h": 4, "w": 4, "x": 16, "y": 1 },
"id": 6,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"editorMode": "code",
"expr": "sum(export_runs_in_progress{tenant=~\"$tenant\"})",
"legendFormat": "In Progress",
"range": true,
"refId": "A"
}
],
"title": "Runs In Progress",
"type": "stat"
},
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" },
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": { "type": "linear" },
"showPoints": "auto",
"spanNulls": false,
"stacking": { "group": "A", "mode": "none" },
"thresholdsStyle": { "mode": "off" }
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [{ "color": "green", "value": null }]
},
"unit": "short"
},
"overrides": []
},
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 5 },
"id": 7,
"options": {
"legend": { "calcs": ["mean", "max"], "displayMode": "table", "placement": "bottom", "showLegend": true },
"tooltip": { "mode": "multi", "sort": "desc" }
},
"targets": [
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"editorMode": "code",
"expr": "sum by (export_type) (rate(export_runs_total{tenant=~\"$tenant\"}[5m]))",
"legendFormat": "{{export_type}}",
"range": true,
"refId": "A"
}
],
"title": "Export Runs by Type (rate/5m)",
"type": "timeseries"
},
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" },
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": { "type": "linear" },
"showPoints": "auto",
"spanNulls": false,
"stacking": { "group": "A", "mode": "none" },
"thresholdsStyle": { "mode": "off" }
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [{ "color": "green", "value": null }]
},
"unit": "s"
},
"overrides": []
},
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 5 },
"id": 8,
"options": {
"legend": { "calcs": ["mean", "max", "p95"], "displayMode": "table", "placement": "bottom", "showLegend": true },
"tooltip": { "mode": "multi", "sort": "desc" }
},
"targets": [
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"editorMode": "code",
"expr": "histogram_quantile(0.50, sum by (le) (rate(export_run_duration_seconds_bucket{tenant=~\"$tenant\"}[5m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"editorMode": "code",
"expr": "histogram_quantile(0.95, sum by (le) (rate(export_run_duration_seconds_bucket{tenant=~\"$tenant\"}[5m])))",
"legendFormat": "p95",
"range": true,
"refId": "B"
},
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(export_run_duration_seconds_bucket{tenant=~\"$tenant\"}[5m])))",
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Export Run Duration (latency percentiles)",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 13 },
"id": 9,
"panels": [],
"title": "Artifacts & Bundle Sizes",
"type": "row"
},
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" },
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "bars",
"fillOpacity": 50,
"gradientMode": "none",
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": { "type": "linear" },
"showPoints": "never",
"spanNulls": false,
"stacking": { "group": "A", "mode": "normal" },
"thresholdsStyle": { "mode": "off" }
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [{ "color": "green", "value": null }]
},
"unit": "short"
},
"overrides": []
},
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 14 },
"id": 10,
"options": {
"legend": { "calcs": ["sum"], "displayMode": "table", "placement": "bottom", "showLegend": true },
"tooltip": { "mode": "multi", "sort": "desc" }
},
"targets": [
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"editorMode": "code",
"expr": "sum by (artifact_type) (increase(export_artifacts_total{tenant=~\"$tenant\"}[1h]))",
"legendFormat": "{{artifact_type}}",
"range": true,
"refId": "A"
}
],
"title": "Artifacts Exported by Type (per hour)",
"type": "timeseries"
},
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" },
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": { "type": "linear" },
"showPoints": "auto",
"spanNulls": false,
"stacking": { "group": "A", "mode": "none" },
"thresholdsStyle": { "mode": "off" }
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [{ "color": "green", "value": null }]
},
"unit": "bytes"
},
"overrides": []
},
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 14 },
"id": 11,
"options": {
"legend": { "calcs": ["mean", "max"], "displayMode": "table", "placement": "bottom", "showLegend": true },
"tooltip": { "mode": "multi", "sort": "desc" }
},
"targets": [
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"editorMode": "code",
"expr": "histogram_quantile(0.50, sum by (le, export_type) (rate(export_bundle_size_bytes_bucket{tenant=~\"$tenant\"}[5m])))",
"legendFormat": "{{export_type}} p50",
"range": true,
"refId": "A"
},
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"editorMode": "code",
"expr": "histogram_quantile(0.95, sum by (le, export_type) (rate(export_bundle_size_bytes_bucket{tenant=~\"$tenant\"}[5m])))",
"legendFormat": "{{export_type}} p95",
"range": true,
"refId": "B"
}
],
"title": "Bundle Size Distribution by Type",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 22 },
"id": 12,
"panels": [],
"title": "Error Analysis",
"type": "row"
},
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" },
"custom": {
"hideFrom": { "legend": false, "tooltip": false, "viz": false }
},
"mappings": [],
"unit": "short"
},
"overrides": []
},
"gridPos": { "h": 8, "w": 8, "x": 0, "y": 23 },
"id": 13,
"options": {
"legend": { "displayMode": "table", "placement": "right", "showLegend": true },
"pieType": "pie",
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false },
"tooltip": { "mode": "single", "sort": "none" }
},
"targets": [
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"editorMode": "code",
"expr": "sum by (error_code) (increase(export_runs_failed_total{tenant=~\"$tenant\"}[$__range]))",
"legendFormat": "{{error_code}}",
"range": true,
"refId": "A"
}
],
"title": "Failures by Error Code",
"type": "piechart"
},
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" },
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
"lineInterpolation": "linear",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": { "type": "linear" },
"showPoints": "never",
"spanNulls": false,
"stacking": { "group": "A", "mode": "none" },
"thresholdsStyle": { "mode": "line" }
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{ "color": "green", "value": null },
{ "color": "red", "value": 0.01 }
]
},
"unit": "percentunit"
},
"overrides": []
},
"gridPos": { "h": 8, "w": 16, "x": 8, "y": 23 },
"id": 14,
"options": {
"legend": { "calcs": ["mean", "max"], "displayMode": "table", "placement": "bottom", "showLegend": true },
"tooltip": { "mode": "multi", "sort": "desc" }
},
"targets": [
{
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"editorMode": "code",
"expr": "sum(rate(export_runs_failed_total{tenant=~\"$tenant\"}[5m])) / sum(rate(export_runs_total{tenant=~\"$tenant\"}[5m]))",
"legendFormat": "Error Rate",
"range": true,
"refId": "A"
}
],
"title": "Error Rate (5m window)",
"type": "timeseries"
}
],
"refresh": "30s",
"schemaVersion": 38,
"style": "dark",
"tags": ["export-center", "stellaops"],
"templating": {
"list": [
{
"current": {},
"hide": 0,
"includeAll": false,
"multi": false,
"name": "datasource",
"options": [],
"query": "prometheus",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"type": "datasource"
},
{
"allValue": ".*",
"current": {},
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"definition": "label_values(export_runs_total, tenant)",
"hide": 0,
"includeAll": true,
"multi": true,
"name": "tenant",
"options": [],
"query": { "query": "label_values(export_runs_total, tenant)", "refId": "StandardVariableQuery" },
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"type": "query"
}
]
},
"time": { "from": "now-6h", "to": "now" },
"timepicker": {},
"timezone": "utc",
"title": "ExportCenter Service",
"uid": "export-center-overview",
"version": 1,
"weekStart": ""
}

View File

@@ -0,0 +1,555 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": null,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"max": 1,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "red",
"value": null
},
{
"color": "yellow",
"value": 0.9
},
{
"color": "green",
"value": 0.95
}
]
},
"unit": "percentunit"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 0,
"y": 0
},
"id": 1,
"options": {
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": true,
"showThresholdMarkers": true
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "sum(stella_attestations_created_total) / (sum(stella_attestations_created_total) + sum(stella_attestations_failed_total))",
"refId": "A"
}
],
"title": "Attestation Completeness (Target: ≥95%)",
"type": "gauge"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "bars",
"fillOpacity": 80,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "line"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 30
}
]
},
"unit": "s"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 9,
"x": 6,
"y": 0
},
"id": 2,
"options": {
"legend": {
"calcs": ["mean", "max"],
"displayMode": "table",
"placement": "right",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "histogram_quantile(0.95, rate(stella_ttfe_seconds_bucket[5m]))",
"legendFormat": "p95",
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "histogram_quantile(0.50, rate(stella_ttfe_seconds_bucket[5m]))",
"legendFormat": "p50",
"refId": "B"
}
],
"title": "TTFE Distribution (Target: ≤30s)",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"lineInterpolation": "smooth",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"max": 1,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "percentunit"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 9,
"x": 15,
"y": 0
},
"id": 3,
"options": {
"legend": {
"calcs": ["mean", "last"],
"displayMode": "table",
"placement": "right",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "sum(rate(stella_attestations_verified_total[5m])) / (sum(rate(stella_attestations_verified_total[5m])) + sum(rate(stella_attestations_failed_total[5m])))",
"legendFormat": "Success Rate",
"refId": "A"
}
],
"title": "Verification Success Rate",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"lineInterpolation": "smooth",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "line"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 1
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8
},
"id": 4,
"options": {
"legend": {
"calcs": ["sum"],
"displayMode": "table",
"placement": "right",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "sum by (environment, reason) (rate(stella_post_deploy_reversions_total[5m]))",
"legendFormat": "{{environment}}: {{reason}}",
"refId": "A"
}
],
"title": "Post-Deploy Reversions (Trend to Zero)",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
}
},
"mappings": []
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 12,
"y": 8
},
"id": 5,
"options": {
"legend": {
"displayMode": "table",
"placement": "right",
"showLegend": true,
"values": ["value"]
},
"pieType": "pie",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "sum by (predicate_type) (stella_attestations_created_total)",
"legendFormat": "{{predicate_type}}",
"refId": "A"
}
],
"title": "Attestations by Type",
"type": "piechart"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"lineInterpolation": "smooth",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 18,
"y": 8
},
"id": 6,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "sum(stella_attestations_failed_total{reason=\"stale_evidence\"})",
"legendFormat": "Stale Evidence Alerts",
"refId": "A"
}
],
"title": "Stale Evidence Alerts",
"type": "timeseries"
}
],
"refresh": "30s",
"schemaVersion": 38,
"style": "dark",
"tags": ["stellaops", "attestations", "security"],
"templating": {
"list": [
{
"current": {
"selected": false,
"text": "Prometheus",
"value": "Prometheus"
},
"hide": 0,
"includeAll": false,
"label": "Data Source",
"multi": false,
"name": "DS_PROMETHEUS",
"options": [],
"query": "prometheus",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"type": "datasource"
}
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "StellaOps - Attestation Metrics",
"uid": "stellaops-attestations",
"version": 1,
"weekStart": ""
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,92 @@
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
tls:
cert_file: ${STELLAOPS_OTEL_TLS_CERT:?STELLAOPS_OTEL_TLS_CERT not set}
key_file: ${STELLAOPS_OTEL_TLS_KEY:?STELLAOPS_OTEL_TLS_KEY not set}
client_ca_file: ${STELLAOPS_OTEL_TLS_CA:?STELLAOPS_OTEL_TLS_CA not set}
require_client_certificate: ${STELLAOPS_OTEL_REQUIRE_CLIENT_CERT:true}
http:
endpoint: 0.0.0.0:4318
tls:
cert_file: ${STELLAOPS_OTEL_TLS_CERT:?STELLAOPS_OTEL_TLS_CERT not set}
key_file: ${STELLAOPS_OTEL_TLS_KEY:?STELLAOPS_OTEL_TLS_KEY not set}
client_ca_file: ${STELLAOPS_OTEL_TLS_CA:?STELLAOPS_OTEL_TLS_CA not set}
require_client_certificate: ${STELLAOPS_OTEL_REQUIRE_CLIENT_CERT:true}
processors:
attributes/tenant-tag:
actions:
- key: tenant.id
action: insert
value: ${STELLAOPS_TENANT_ID:unknown}
batch:
send_batch_size: 1024
timeout: 5s
exporters:
logging:
verbosity: normal
prometheus:
endpoint: ${STELLAOPS_OTEL_PROMETHEUS_ENDPOINT:0.0.0.0:9464}
enable_open_metrics: true
metric_expiration: 5m
tls:
cert_file: ${STELLAOPS_OTEL_TLS_CERT:?STELLAOPS_OTEL_TLS_CERT not set}
key_file: ${STELLAOPS_OTEL_TLS_KEY:?STELLAOPS_OTEL_TLS_KEY not set}
client_ca_file: ${STELLAOPS_OTEL_TLS_CA:?STELLAOPS_OTEL_TLS_CA not set}
otlphttp/tempo:
endpoint: ${STELLAOPS_TEMPO_ENDPOINT:https://stellaops-tempo:3200}
compression: gzip
tls:
ca_file: ${STELLAOPS_TEMPO_TLS_CA_FILE:/etc/otel-collector/tls/ca.crt}
cert_file: ${STELLAOPS_TEMPO_TLS_CERT_FILE:/etc/otel-collector/tls/client.crt}
key_file: ${STELLAOPS_TEMPO_TLS_KEY_FILE:/etc/otel-collector/tls/client.key}
insecure_skip_verify: false
headers:
"X-Scope-OrgID": ${STELLAOPS_TENANT_ID:unknown}
loki/tenant:
endpoint: ${STELLAOPS_LOKI_ENDPOINT:https://stellaops-loki:3100/loki/api/v1/push}
tenant_id: ${STELLAOPS_TENANT_ID:unknown}
tls:
ca_file: ${STELLAOPS_LOKI_TLS_CA_FILE:/etc/otel-collector/tls/ca.crt}
cert_file: ${STELLAOPS_LOKI_TLS_CERT_FILE:/etc/otel-collector/tls/client.crt}
key_file: ${STELLAOPS_LOKI_TLS_KEY_FILE:/etc/otel-collector/tls/client.key}
insecure_skip_verify: false
default_labels_enabled:
exporter: false
job: false
instance: false
format: json
drain_interval: 5s
queue:
enabled: true
queue_size: 1024
retry_on_failure: true
extensions:
health_check:
endpoint: ${STELLAOPS_OTEL_HEALTH_ENDPOINT:0.0.0.0:13133}
pprof:
endpoint: ${STELLAOPS_OTEL_PPROF_ENDPOINT:0.0.0.0:1777}
service:
telemetry:
logs:
level: ${STELLAOPS_OTEL_LOG_LEVEL:info}
extensions: [health_check, pprof]
pipelines:
traces:
receivers: [otlp]
processors: [attributes/tenant-tag, batch]
exporters: [logging, otlphttp/tempo]
metrics:
receivers: [otlp]
processors: [attributes/tenant-tag, batch]
exporters: [logging, prometheus]
logs:
receivers: [otlp]
processors: [attributes/tenant-tag, batch]
exporters: [logging, loki/tenant]

View File

@@ -0,0 +1,36 @@
# Telemetry Storage Stack
Configuration snippets for the default StellaOps observability backends used in
staging and production environments. The stack comprises:
- **Prometheus** for metrics (scraping the collector's Prometheus exporter)
- **Tempo** for traces (OTLP ingest via mTLS)
- **Loki** for logs (HTTP ingest with tenant isolation)
## Files
| Path | Description |
| ---- | ----------- |
| `prometheus.yaml` | Scrape configuration for the collector (mTLS + bearer token placeholder). |
| `tempo.yaml` | Tempo configuration with multitenancy enabled and local storage paths. |
| `loki.yaml` | Loki configuration enabling per-tenant overrides and boltdb-shipper storage. |
| `tenants/tempo-overrides.yaml` | Example tenant overrides for Tempo (retention, limits). |
| `tenants/loki-overrides.yaml` | Example tenant overrides for Loki (rate limits, retention). |
| `auth/` | Placeholder directory for Prometheus bearer token files (e.g., `token`). |
These configurations are referenced by the Docker Compose overlay
(`deploy/compose/docker-compose.telemetry-storage.yaml`) and the staging rollout documented in
`docs/modules/telemetry/operations/storage.md`. Adjust paths, credentials, and overrides before running in
connected environments. Place the Prometheus bearer token in `auth/token` when using the
Compose overlay (the directory contains a `.gitkeep` placeholder and is gitignored by default).
Run `python ops/devops/telemetry/validate_storage_stack.py` after editing any of these files to
ensure TLS, multitenancy, and override references remain intact.
## Security
- Both Tempo and Loki require mutual TLS.
- Prometheus uses mTLS plus a bearer token that should be minted by Authority.
- Update the overrides files to enforce per-tenant retention/ingestion limits.
For comprehensive deployment steps see `docs/modules/telemetry/operations/storage.md`.

View File

View File

@@ -0,0 +1,48 @@
auth_enabled: true
server:
http_listen_port: 3100
log_level: info
common:
ring:
instance_addr: 127.0.0.1
kvstore:
store: inmemory
replication_factor: 1
path_prefix: /var/loki
schema_config:
configs:
- from: 2024-01-01
store: boltdb-shipper
object_store: filesystem
schema: v13
index:
prefix: loki_index_
period: 24h
storage_config:
filesystem:
directory: /var/loki/chunks
boltdb_shipper:
active_index_directory: /var/loki/index
cache_location: /var/loki/index_cache
shared_store: filesystem
ruler:
storage:
type: local
local:
directory: /var/loki/rules
rule_path: /tmp/loki-rules
enable_api: true
limits_config:
enforce_metric_name: false
reject_old_samples: true
reject_old_samples_max_age: 168h
max_entries_limit_per_query: 5000
ingestion_rate_mb: 10
ingestion_burst_size_mb: 20
per_tenant_override_config: /etc/telemetry/tenants/loki-overrides.yaml

View File

@@ -0,0 +1,19 @@
global:
scrape_interval: 15s
evaluation_interval: 30s
scrape_configs:
- job_name: "stellaops-otel-collector"
scheme: https
metrics_path: /
tls_config:
ca_file: ${PROMETHEUS_TLS_CA_FILE:-/etc/telemetry/tls/ca.crt}
cert_file: ${PROMETHEUS_TLS_CERT_FILE:-/etc/telemetry/tls/client.crt}
key_file: ${PROMETHEUS_TLS_KEY_FILE:-/etc/telemetry/tls/client.key}
insecure_skip_verify: false
authorization:
type: Bearer
credentials_file: ${PROMETHEUS_BEARER_TOKEN_FILE:-/etc/telemetry/auth/token}
static_configs:
- targets:
- ${PROMETHEUS_COLLECTOR_TARGET:-stellaops-otel-collector:9464}

View File

@@ -0,0 +1,56 @@
multitenancy_enabled: true
usage_report:
reporting_enabled: false
server:
http_listen_port: 3200
log_level: info
distributor:
receivers:
otlp:
protocols:
grpc:
tls:
cert_file: ${TEMPO_TLS_CERT_FILE:-/etc/telemetry/tls/server.crt}
key_file: ${TEMPO_TLS_KEY_FILE:-/etc/telemetry/tls/server.key}
client_ca_file: ${TEMPO_TLS_CA_FILE:-/etc/telemetry/tls/ca.crt}
require_client_cert: true
http:
tls:
cert_file: ${TEMPO_TLS_CERT_FILE:-/etc/telemetry/tls/server.crt}
key_file: ${TEMPO_TLS_KEY_FILE:-/etc/telemetry/tls/server.key}
client_ca_file: ${TEMPO_TLS_CA_FILE:-/etc/telemetry/tls/ca.crt}
require_client_cert: true
ingester:
lifecycler:
ring:
instance_availability_zone: ${TEMPO_ZONE:-zone-a}
trace_idle_period: 10s
max_block_bytes: 1_048_576
compactor:
compaction:
block_retention: 168h
metrics_generator:
registry:
external_labels:
cluster: stellaops
storage:
trace:
backend: local
local:
path: /var/tempo/traces
wal:
path: /var/tempo/wal
metrics:
backend: prometheus
overrides:
defaults:
ingestion_rate_limit_bytes: 1048576
max_traces_per_user: 200000
per_tenant_override_config: /etc/telemetry/tenants/tempo-overrides.yaml

View File

@@ -0,0 +1,19 @@
# Example Loki per-tenant overrides
# Adjust according to https://grafana.com/docs/loki/latest/configuration/#limits_config
stellaops-dev:
ingestion_rate_mb: 10
ingestion_burst_size_mb: 20
max_global_streams_per_user: 5000
retention_period: 168h
stellaops-stage:
ingestion_rate_mb: 20
ingestion_burst_size_mb: 40
max_global_streams_per_user: 10000
retention_period: 336h
__default__:
ingestion_rate_mb: 5
ingestion_burst_size_mb: 10
retention_period: 72h

View File

@@ -0,0 +1,16 @@
# Example Tempo per-tenant overrides
# Consult https://grafana.com/docs/tempo/latest/configuration/#limits-configuration
# before applying in production.
stellaops-dev:
traces_per_second_limit: 100000
max_bytes_per_trace: 10485760
max_search_bytes_per_trace: 20971520
stellaops-stage:
traces_per_second_limit: 200000
max_bytes_per_trace: 20971520
__default__:
traces_per_second_limit: 50000
max_bytes_per_trace: 5242880

View File

@@ -0,0 +1,33 @@
# Telemetry bundle verifier
Files:
- `verify-telemetry-bundle.sh`: offline verifier (checksums + optional JSON schema)
- `tests/sample-bundle/telemetry-bundle.json`: sample manifest
- `tests/sample-bundle/telemetry-bundle.sha256`: checksum list for sample bundle
- `tests/telemetry-bundle.tar`: deterministic sample bundle (ustar, mtime=0, owner/group 0)
- `tests/run-schema-tests.sh`: validates sample config against config schema
- `tests/ci-run.sh`: runs schema test + bundle verifier (use in CI)
Dependencies for full validation:
- `python` with `jsonschema` installed (`pip install jsonschema`)
- `tar`, `sha256sum`
Deterministic TAR flags used for sample bundle:
`tar --mtime=@0 --owner=0 --group=0 --numeric-owner --format=ustar`
Exit codes:
- 0 success
- 21 missing manifest/checksums
- 22 checksum mismatch
- 23 schema validation failed
- 64 usage error
Quick check:
```bash
./verify-telemetry-bundle.sh tests/telemetry-bundle.tar
```
CI suggestion:
```bash
ops/devops/telemetry/tests/ci-run.sh
```

View File

@@ -0,0 +1,77 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CERT_DIR="${SCRIPT_DIR}/../../deploy/telemetry/certs"
mkdir -p "${CERT_DIR}"
CA_KEY="${CERT_DIR}/ca.key"
CA_CRT="${CERT_DIR}/ca.crt"
COL_KEY="${CERT_DIR}/collector.key"
COL_CSR="${CERT_DIR}/collector.csr"
COL_CRT="${CERT_DIR}/collector.crt"
CLIENT_KEY="${CERT_DIR}/client.key"
CLIENT_CSR="${CERT_DIR}/client.csr"
CLIENT_CRT="${CERT_DIR}/client.crt"
echo "[*] Generating OpenTelemetry dev CA and certificates in ${CERT_DIR}"
# Root CA
if [[ ! -f "${CA_KEY}" ]]; then
openssl genrsa -out "${CA_KEY}" 4096 >/dev/null 2>&1
fi
openssl req -x509 -new -key "${CA_KEY}" -days 365 -sha256 \
-out "${CA_CRT}" -subj "/CN=StellaOps Dev Telemetry CA" \
-config <(cat <<'EOF'
[req]
distinguished_name = req_distinguished_name
prompt = no
[req_distinguished_name]
EOF
) >/dev/null 2>&1
# Collector certificate (server + client auth)
openssl req -new -nodes -newkey rsa:4096 \
-keyout "${COL_KEY}" \
-out "${COL_CSR}" \
-subj "/CN=stellaops-otel-collector" >/dev/null 2>&1
openssl x509 -req -in "${COL_CSR}" -CA "${CA_CRT}" -CAkey "${CA_KEY}" \
-CAcreateserial -out "${COL_CRT}" -days 365 -sha256 \
-extensions v3_req -extfile <(cat <<'EOF'
[v3_req]
subjectAltName = @alt_names
extendedKeyUsage = serverAuth, clientAuth
[alt_names]
DNS.1 = stellaops-otel-collector
DNS.2 = localhost
IP.1 = 127.0.0.1
EOF
) >/dev/null 2>&1
# Client certificate
openssl req -new -nodes -newkey rsa:4096 \
-keyout "${CLIENT_KEY}" \
-out "${CLIENT_CSR}" \
-subj "/CN=stellaops-otel-client" >/dev/null 2>&1
openssl x509 -req -in "${CLIENT_CSR}" -CA "${CA_CRT}" -CAkey "${CA_KEY}" \
-CAcreateserial -out "${CLIENT_CRT}" -days 365 -sha256 \
-extensions v3_req -extfile <(cat <<'EOF'
[v3_req]
extendedKeyUsage = clientAuth
subjectAltName = @alt_names
[alt_names]
DNS.1 = stellaops-otel-client
DNS.2 = localhost
IP.1 = 127.0.0.1
EOF
) >/dev/null 2>&1
rm -f "${COL_CSR}" "${CLIENT_CSR}"
rm -f "${CERT_DIR}/ca.srl"
echo "[✓] Certificates ready:"
ls -1 "${CERT_DIR}"

View File

@@ -0,0 +1,136 @@
#!/usr/bin/env python3
"""Package telemetry collector assets for offline/air-gapped installs.
Outputs a tarball containing the collector configuration, Compose overlay,
Helm defaults, and operator README. A SHA-256 checksum sidecar is emitted, and
optional Cosign signing can be enabled with --sign.
"""
from __future__ import annotations
import argparse
import hashlib
import os
import subprocess
import sys
import tarfile
from pathlib import Path
from typing import Iterable
REPO_ROOT = Path(__file__).resolve().parents[3]
DEFAULT_OUTPUT = REPO_ROOT / "out" / "telemetry" / "telemetry-offline-bundle.tar.gz"
BUNDLE_CONTENTS: tuple[Path, ...] = (
Path("deploy/telemetry/README.md"),
Path("deploy/telemetry/otel-collector-config.yaml"),
Path("deploy/telemetry/storage/README.md"),
Path("deploy/telemetry/storage/prometheus.yaml"),
Path("deploy/telemetry/storage/tempo.yaml"),
Path("deploy/telemetry/storage/loki.yaml"),
Path("deploy/telemetry/storage/tenants/tempo-overrides.yaml"),
Path("deploy/telemetry/storage/tenants/loki-overrides.yaml"),
Path("deploy/helm/stellaops/files/otel-collector-config.yaml"),
Path("deploy/helm/stellaops/values.yaml"),
Path("deploy/helm/stellaops/templates/otel-collector.yaml"),
Path("deploy/compose/docker-compose.telemetry.yaml"),
Path("deploy/compose/docker-compose.telemetry-storage.yaml"),
Path("docs/modules/telemetry/operations/collector.md"),
Path("docs/modules/telemetry/operations/storage.md"),
)
def compute_sha256(path: Path) -> str:
sha = hashlib.sha256()
with path.open("rb") as handle:
for chunk in iter(lambda: handle.read(1024 * 1024), b""):
sha.update(chunk)
return sha.hexdigest()
def validate_files(paths: Iterable[Path]) -> None:
missing = [str(p) for p in paths if not (REPO_ROOT / p).exists()]
if missing:
raise FileNotFoundError(f"Missing bundle artefacts: {', '.join(missing)}")
def create_bundle(output_path: Path) -> Path:
output_path.parent.mkdir(parents=True, exist_ok=True)
with tarfile.open(output_path, "w:gz") as tar:
for rel_path in BUNDLE_CONTENTS:
abs_path = REPO_ROOT / rel_path
tar.add(abs_path, arcname=str(rel_path))
return output_path
def write_checksum(bundle_path: Path) -> Path:
digest = compute_sha256(bundle_path)
sha_path = bundle_path.with_suffix(bundle_path.suffix + ".sha256")
sha_path.write_text(f"{digest} {bundle_path.name}\n", encoding="utf-8")
return sha_path
def cosign_sign(bundle_path: Path, key_ref: str | None, identity_token: str | None) -> None:
cmd = ["cosign", "sign-blob", "--yes", str(bundle_path)]
if key_ref:
cmd.extend(["--key", key_ref])
env = os.environ.copy()
if identity_token:
env["COSIGN_IDENTITY_TOKEN"] = identity_token
try:
subprocess.run(cmd, check=True, env=env)
except FileNotFoundError as exc:
raise RuntimeError("cosign not found on PATH; install cosign or omit --sign") from exc
except subprocess.CalledProcessError as exc:
raise RuntimeError(f"cosign sign-blob failed: {exc}") from exc
def parse_args(argv: list[str] | None = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--output",
type=Path,
default=DEFAULT_OUTPUT,
help=f"Output bundle path (default: {DEFAULT_OUTPUT})",
)
parser.add_argument(
"--sign",
action="store_true",
help="Sign the bundle using cosign (requires cosign on PATH)",
)
parser.add_argument(
"--cosign-key",
type=str,
default=os.environ.get("COSIGN_KEY_REF"),
help="Cosign key reference (file:..., azurekms://..., etc.)",
)
parser.add_argument(
"--identity-token",
type=str,
default=os.environ.get("COSIGN_IDENTITY_TOKEN"),
help="OIDC identity token for keyless signing",
)
return parser.parse_args(argv)
def main(argv: list[str] | None = None) -> int:
args = parse_args(argv)
validate_files(BUNDLE_CONTENTS)
bundle_path = args.output.resolve()
print(f"[*] Creating telemetry bundle at {bundle_path}")
create_bundle(bundle_path)
sha_path = write_checksum(bundle_path)
print(f"[✓] SHA-256 written to {sha_path}")
if args.sign:
print("[*] Signing bundle with cosign")
cosign_sign(bundle_path, args.cosign_key, args.identity_token)
sig_path = bundle_path.with_suffix(bundle_path.suffix + ".sig")
if sig_path.exists():
print(f"[✓] Cosign signature written to {sig_path}")
else:
print("[!] Cosign completed but signature file not found (ensure cosign version >= 2.2)")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,197 @@
#!/usr/bin/env python3
"""
Smoke test for the StellaOps OpenTelemetry Collector deployment.
The script sends sample traces, metrics, and logs over OTLP/HTTP with mutual TLS
and asserts that the collector accepted the payloads by checking its Prometheus
metrics endpoint.
"""
from __future__ import annotations
import argparse
import json
import ssl
import sys
import time
import urllib.request
from pathlib import Path
TRACE_PAYLOAD = {
"resourceSpans": [
{
"resource": {
"attributes": [
{"key": "service.name", "value": {"stringValue": "smoke-client"}},
{"key": "tenant.id", "value": {"stringValue": "dev"}},
]
},
"scopeSpans": [
{
"scope": {"name": "smoke-test"},
"spans": [
{
"traceId": "00000000000000000000000000000001",
"spanId": "0000000000000001",
"name": "smoke-span",
"kind": 1,
"startTimeUnixNano": "1730000000000000000",
"endTimeUnixNano": "1730000000500000000",
"status": {"code": 0},
}
],
}
],
}
]
}
METRIC_PAYLOAD = {
"resourceMetrics": [
{
"resource": {
"attributes": [
{"key": "service.name", "value": {"stringValue": "smoke-client"}},
{"key": "tenant.id", "value": {"stringValue": "dev"}},
]
},
"scopeMetrics": [
{
"scope": {"name": "smoke-test"},
"metrics": [
{
"name": "smoke_gauge",
"gauge": {
"dataPoints": [
{
"asDouble": 1.0,
"timeUnixNano": "1730000001000000000",
"attributes": [
{"key": "phase", "value": {"stringValue": "ingest"}}
],
}
]
},
}
],
}
],
}
]
}
LOG_PAYLOAD = {
"resourceLogs": [
{
"resource": {
"attributes": [
{"key": "service.name", "value": {"stringValue": "smoke-client"}},
{"key": "tenant.id", "value": {"stringValue": "dev"}},
]
},
"scopeLogs": [
{
"scope": {"name": "smoke-test"},
"logRecords": [
{
"timeUnixNano": "1730000002000000000",
"severityNumber": 9,
"severityText": "Info",
"body": {"stringValue": "StellaOps collector smoke log"},
}
],
}
],
}
]
}
def _load_context(ca: Path, cert: Path, key: Path) -> ssl.SSLContext:
context = ssl.create_default_context(cafile=str(ca))
context.check_hostname = False
context.verify_mode = ssl.CERT_REQUIRED
context.load_cert_chain(certfile=str(cert), keyfile=str(key))
return context
def _post_json(url: str, payload: dict, context: ssl.SSLContext) -> None:
data = json.dumps(payload).encode("utf-8")
request = urllib.request.Request(
url,
data=data,
headers={
"Content-Type": "application/json",
"User-Agent": "stellaops-otel-smoke/1.0",
},
method="POST",
)
with urllib.request.urlopen(request, context=context, timeout=10) as response:
if response.status // 100 != 2:
raise RuntimeError(f"{url} returned HTTP {response.status}")
def _fetch_metrics(url: str, context: ssl.SSLContext) -> str:
request = urllib.request.Request(
url,
headers={
"User-Agent": "stellaops-otel-smoke/1.0",
},
)
with urllib.request.urlopen(request, context=context, timeout=10) as response:
return response.read().decode("utf-8")
def _assert_counter(metrics: str, metric_name: str) -> None:
for line in metrics.splitlines():
if line.startswith(metric_name):
try:
_, value = line.split(" ")
if float(value) > 0:
return
except ValueError:
continue
raise AssertionError(f"{metric_name} not incremented")
def main() -> int:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--host", default="localhost", help="Collector host (default: %(default)s)")
parser.add_argument("--otlp-port", type=int, default=4318, help="OTLP/HTTP port")
parser.add_argument("--metrics-port", type=int, default=9464, help="Prometheus metrics port")
parser.add_argument("--health-port", type=int, default=13133, help="Health check port")
parser.add_argument("--ca", type=Path, default=Path("deploy/telemetry/certs/ca.crt"), help="CA certificate path")
parser.add_argument("--cert", type=Path, default=Path("deploy/telemetry/certs/client.crt"), help="Client certificate path")
parser.add_argument("--key", type=Path, default=Path("deploy/telemetry/certs/client.key"), help="Client key path")
args = parser.parse_args()
for path in (args.ca, args.cert, args.key):
if not path.exists():
print(f"[!] missing TLS material: {path}", file=sys.stderr)
return 1
context = _load_context(args.ca, args.cert, args.key)
otlp_base = f"https://{args.host}:{args.otlp_port}/v1"
print(f"[*] Sending OTLP traffic to {otlp_base}")
_post_json(f"{otlp_base}/traces", TRACE_PAYLOAD, context)
_post_json(f"{otlp_base}/metrics", METRIC_PAYLOAD, context)
_post_json(f"{otlp_base}/logs", LOG_PAYLOAD, context)
# Allow Prometheus exporter to update metrics
time.sleep(2)
metrics_url = f"https://{args.host}:{args.metrics_port}/metrics"
print(f"[*] Fetching collector metrics from {metrics_url}")
metrics = _fetch_metrics(metrics_url, context)
_assert_counter(metrics, "otelcol_receiver_accepted_spans")
_assert_counter(metrics, "otelcol_receiver_accepted_logs")
_assert_counter(metrics, "otelcol_receiver_accepted_metric_points")
print("[✓] Collector accepted traces, logs, and metrics.")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,232 @@
#!/usr/bin/env python3
"""Tenant isolation smoke test for DEVOPS-OBS-50-002.
The script assumes the telemetry storage stack (Tempo + Loki) is running with
mutual TLS enabled and enforces `X-Scope-OrgID` multi-tenancy. It performs the
following checks:
1. Pushes a trace via the collector OTLP/HTTP endpoint and verifies it is
retrievable from Tempo when using the matching tenant header, but not when
querying as a different tenant.
2. Pushes a log entry to Loki with a tenant header and verifies it is only
visible to the matching tenant.
The goal is to provide a deterministic CI-friendly check that our storage
configuration preserves tenant isolation guard rails before promoting bundles.
"""
from __future__ import annotations
import argparse
import json
import ssl
import sys
import time
import urllib.parse
import urllib.request
import uuid
from pathlib import Path
def _load_context(ca_file: Path, cert_file: Path, key_file: Path) -> ssl.SSLContext:
context = ssl.create_default_context(cafile=str(ca_file))
context.minimum_version = ssl.TLSVersion.TLSv1_2
context.check_hostname = False
context.load_cert_chain(certfile=str(cert_file), keyfile=str(key_file))
return context
def _post_json(url: str, payload: dict, context: ssl.SSLContext, headers: dict | None = None) -> None:
body = json.dumps(payload, separators=(",", ":")).encode("utf-8")
request = urllib.request.Request(
url,
data=body,
method="POST",
headers={
"Content-Type": "application/json",
"User-Agent": "stellaops-tenant-smoke/1.0",
**(headers or {}),
},
)
with urllib.request.urlopen(request, context=context, timeout=10) as response:
status = response.status
if status // 100 != 2:
raise RuntimeError(f"POST {url} returned HTTP {status}")
def _get(url: str, context: ssl.SSLContext, headers: dict | None = None) -> tuple[int, str]:
request = urllib.request.Request(
url,
method="GET",
headers={
"User-Agent": "stellaops-tenant-smoke/1.0",
**(headers or {}),
},
)
try:
with urllib.request.urlopen(request, context=context, timeout=10) as response:
return response.status, response.read().decode("utf-8")
except urllib.error.HTTPError as exc: # type: ignore[attr-defined]
body = exc.read().decode("utf-8") if exc.fp else ""
return exc.code, body
def _payload_trace(trace_id: str, tenant: str) -> dict:
return {
"resourceSpans": [
{
"resource": {
"attributes": [
{"key": "service.name", "value": {"stringValue": "tenant-smoke"}},
{"key": "tenant.id", "value": {"stringValue": tenant}},
]
},
"scopeSpans": [
{
"scope": {"name": "tenant-smoke"},
"spans": [
{
"traceId": trace_id,
"spanId": "0000000000000001",
"name": "tenant-check",
"kind": 1,
"startTimeUnixNano": "1730500000000000000",
"endTimeUnixNano": "1730500000500000000",
"status": {"code": 0},
}
],
}
],
}
]
}
def _payload_log(ts_ns: int, tenant: str, marker: str) -> dict:
return {
"resourceLogs": [
{
"resource": {
"attributes": [
{"key": "service.name", "value": {"stringValue": "tenant-smoke"}},
{"key": "tenant.id", "value": {"stringValue": tenant}},
]
},
"scopeLogs": [
{
"scope": {"name": "tenant-smoke"},
"logRecords": [
{
"timeUnixNano": str(ts_ns),
"severityNumber": 9,
"severityText": "Info",
"body": {"stringValue": f"tenant={tenant} marker={marker}"},
}
],
}
],
}
]
}
def _assert_tenant_access(
tempo_url: str,
loki_url: str,
collector_url: str,
tenant: str,
other_tenant: str,
context: ssl.SSLContext,
) -> None:
trace_id = uuid.uuid4().hex + uuid.uuid4().hex[:16]
trace_payload = _payload_trace(trace_id, tenant)
_post_json(f"{collector_url}/traces", trace_payload, context)
log_marker = uuid.uuid4().hex[:12]
timestamp_ns = int(time.time() * 1_000_000_000)
log_payload = _payload_log(timestamp_ns, tenant, log_marker)
_post_json(f"{collector_url}/logs", log_payload, context)
# Allow background processing to flush to storage.
time.sleep(2)
tempo_headers = {"X-Scope-OrgID": tenant}
tempo_status, tempo_body = _get(f"{tempo_url}/api/traces/{trace_id}", context, headers=tempo_headers)
if tempo_status != 200:
raise AssertionError(f"Tempo returned HTTP {tempo_status} for tenant {tenant}: {tempo_body}")
if trace_id not in tempo_body:
raise AssertionError("Tempo response missing expected trace data")
other_status, _ = _get(
f"{tempo_url}/api/traces/{trace_id}", context, headers={"X-Scope-OrgID": other_tenant}
)
if other_status not in (401, 403, 404):
raise AssertionError(
f"Tempo should deny tenant {other_tenant}, received status {other_status}"
)
log_query = urllib.parse.urlencode({"query": "{app=\"tenant-smoke\"}"})
loki_status, loki_body = _get(
f"{loki_url}/loki/api/v1/query?{log_query}", context, headers={"X-Scope-OrgID": tenant}
)
if loki_status != 200:
raise AssertionError(f"Loki returned HTTP {loki_status} for tenant {tenant}: {loki_body}")
if log_marker not in loki_body:
raise AssertionError("Loki response missing expected log entry")
other_log_status, other_log_body = _get(
f"{loki_url}/loki/api/v1/query?{log_query}",
context,
headers={"X-Scope-OrgID": other_tenant},
)
if other_log_status == 200 and log_marker in other_log_body:
raise AssertionError("Loki returned tenant data to the wrong org")
if other_log_status not in (200, 401, 403):
raise AssertionError(
f"Unexpected Loki status when querying as {other_tenant}: {other_log_status}"
)
def main() -> int:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--collector", default="https://localhost:4318/v1", help="Collector OTLP base URL")
parser.add_argument("--tempo", default="https://localhost:3200", help="Tempo base URL")
parser.add_argument("--loki", default="https://localhost:3100", help="Loki base URL")
parser.add_argument("--tenant", default="dev", help="Primary tenant ID to test")
parser.add_argument("--other-tenant", default="stage", help="Secondary tenant expected to be denied")
parser.add_argument("--ca", type=Path, default=Path("deploy/telemetry/certs/ca.crt"), help="CA certificate path")
parser.add_argument(
"--cert", type=Path, default=Path("deploy/telemetry/certs/client.crt"), help="mTLS client certificate"
)
parser.add_argument(
"--key", type=Path, default=Path("deploy/telemetry/certs/client.key"), help="mTLS client key"
)
args = parser.parse_args()
for path in (args.ca, args.cert, args.key):
if not path.exists():
print(f"[!] missing TLS material: {path}", file=sys.stderr)
return 1
context = _load_context(args.ca, args.cert, args.key)
collector_base = args.collector.rstrip("/")
tempo_base = args.tempo.rstrip("/")
loki_base = args.loki.rstrip("/")
print(f"[*] Validating tenant isolation using tenant={args.tenant} and other={args.other_tenant}")
_assert_tenant_access(
tempo_base,
loki_base,
collector_base,
tenant=args.tenant,
other_tenant=args.other_tenant,
context=context,
)
print("[✓] Tempo and Loki enforce tenant isolation with mTLS + scoped headers.")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT="$(cd "$(dirname "$0")/../../" && pwd)"
SCHEMA="$ROOT/docs/modules/telemetry/schemas/telemetry-bundle.schema.json"
"$ROOT/ops/devops/telemetry/tests/run-schema-tests.sh"
TELEMETRY_BUNDLE_SCHEMA="$SCHEMA" "$ROOT/ops/devops/telemetry/verify-telemetry-bundle.sh" "$ROOT/ops/devops/telemetry/tests/telemetry-bundle.tar"

View File

@@ -0,0 +1,35 @@
{
"schemaVersion": "1.0.0",
"hashAlgorithm": "sha256",
"profiles": [
{
"name": "default",
"description": "default profile",
"collectorVersion": "otelcol/1.0.0",
"cryptoProfile": "fips",
"sealedMode": false,
"allowlistedEndpoints": ["http://localhost:4318"],
"exporters": [
{
"type": "otlp",
"endpoint": "http://localhost:4318",
"protocol": "http",
"compression": "none",
"enabled": true
}
],
"redactionPolicyUri": "https://example.com/redaction-policy.json",
"sampling": {
"strategy": "traceidratio",
"seed": "0000000000000001",
"rules": [
{"match": "service.name == 'api'", "priority": 10, "sampleRate": 0.2}
]
},
"tenantRouting": {
"attribute": "tenant.id",
"quotasPerTenant": {"tenant-a": 1000}
}
}
]
}

View File

@@ -0,0 +1,9 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT="$(cd "$(dirname "$0")/../" && pwd)"
BUNDLE_DIR="$ROOT/tests/sample-bundle"
mkdir -p "$BUNDLE_DIR"
cp "$ROOT/tests/manifest-valid.json" "$BUNDLE_DIR/telemetry-bundle.json"
(cd "$BUNDLE_DIR" && sha256sum telemetry-bundle.json > telemetry-bundle.sha256)
tar --mtime=@0 --owner=0 --group=0 --numeric-owner --format=ustar -C "$BUNDLE_DIR" -cf "$ROOT/tests/telemetry-bundle.tar" telemetry-bundle.json telemetry-bundle.sha256
echo "Wrote sample bundle to $ROOT/tests/telemetry-bundle.tar"

View File

@@ -0,0 +1,26 @@
{
"schemaVersion": "1.0.0",
"bundleId": "00000000-0000-0000-0000-000000000001",
"createdAt": "2025-12-01T00:00:00Z",
"profileHash": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"collectorVersion": "otelcol/1.0.0",
"sealedMode": true,
"redactionManifest": "redaction-manifest.json",
"manifestHashAlgorithm": "sha256",
"timeAnchor": {
"type": "rfc3161",
"value": "dummy-token"
},
"artifacts": [
{
"path": "logs.ndjson",
"sha256": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
"mediaType": "application/x-ndjson",
"size": 123
}
],
"dsseEnvelope": {
"hash": "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
"location": "bundle.dsse.json"
}
}

View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT="$(cd "$(dirname "$0")/../../" && pwd)"
if ! command -v python >/dev/null 2>&1; then
echo "python not found" >&2; exit 127; fi
if ! python - <<'PY' >/dev/null 2>&1; then
import jsonschema
PY
then
echo "python jsonschema module not installed" >&2; exit 127; fi
python - <<'PY'
import json, pathlib
from jsonschema import validate
root = pathlib.Path('ops/devops/telemetry/tests')
config = json.loads((root / 'config-valid.json').read_text())
schema = json.loads(pathlib.Path('docs/modules/telemetry/schemas/telemetry-config.schema.json').read_text())
validate(config, schema)
print('telemetry-config schema ok')
PY

View File

@@ -0,0 +1,26 @@
{
"schemaVersion": "1.0.0",
"bundleId": "00000000-0000-0000-0000-000000000001",
"createdAt": "2025-12-01T00:00:00Z",
"profileHash": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"collectorVersion": "otelcol/1.0.0",
"sealedMode": true,
"redactionManifest": "redaction-manifest.json",
"manifestHashAlgorithm": "sha256",
"timeAnchor": {
"type": "rfc3161",
"value": "dummy-token"
},
"artifacts": [
{
"path": "logs.ndjson",
"sha256": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
"mediaType": "application/x-ndjson",
"size": 123
}
],
"dsseEnvelope": {
"hash": "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
"location": "bundle.dsse.json"
}
}

View File

@@ -0,0 +1 @@
6e3fedbf183aece5dfa14a90ebce955e2887d36747c424e628dc2cc03bcb0ed3 telemetry-bundle.json

View File

@@ -0,0 +1 @@
6e3fedbf183aece5dfa14a90ebce955e2887d36747c424e628dc2cc03bcb0ed3 ops/devops/telemetry/tests/manifest-valid.json

Binary file not shown.

View File

@@ -0,0 +1,83 @@
#!/usr/bin/env python3
"""
Static validation for the telemetry storage stack configuration.
Checks the Prometheus, Tempo, and Loki configuration snippets to ensure:
- mutual TLS is enabled end-to-end
- tenant override files are referenced
- multitenancy flags are set
- retention/limit defaults exist for __default__ tenant entries
This script is intended to back `DEVOPS-OBS-50-002` and can run in CI
before publishing bundles or rolling out staging updates.
"""
from __future__ import annotations
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).resolve().parents[3]
PROMETHEUS_PATH = REPO_ROOT / "deploy/telemetry/storage/prometheus.yaml"
TEMPO_PATH = REPO_ROOT / "deploy/telemetry/storage/tempo.yaml"
LOKI_PATH = REPO_ROOT / "deploy/telemetry/storage/loki.yaml"
TEMPO_OVERRIDES_PATH = REPO_ROOT / "deploy/telemetry/storage/tenants/tempo-overrides.yaml"
LOKI_OVERRIDES_PATH = REPO_ROOT / "deploy/telemetry/storage/tenants/loki-overrides.yaml"
def read(path: Path) -> str:
if not path.exists():
raise FileNotFoundError(f"Required configuration file missing: {path}")
return path.read_text(encoding="utf-8")
def assert_contains(haystack: str, needle: str, path: Path) -> None:
if needle not in haystack:
raise AssertionError(f"{path} is missing required snippet: {needle!r}")
def validate_prometheus() -> None:
content = read(PROMETHEUS_PATH)
assert_contains(content, "tls_config:", PROMETHEUS_PATH)
assert_contains(content, "ca_file:", PROMETHEUS_PATH)
assert_contains(content, "cert_file:", PROMETHEUS_PATH)
assert_contains(content, "key_file:", PROMETHEUS_PATH)
assert_contains(content, "authorization:", PROMETHEUS_PATH)
assert_contains(content, "credentials_file:", PROMETHEUS_PATH)
def validate_tempo() -> None:
content = read(TEMPO_PATH)
assert_contains(content, "multitenancy_enabled: true", TEMPO_PATH)
assert_contains(content, "require_client_cert: true", TEMPO_PATH)
assert_contains(content, "per_tenant_override_config", TEMPO_PATH)
overrides = read(TEMPO_OVERRIDES_PATH)
assert_contains(overrides, "__default__", TEMPO_OVERRIDES_PATH)
assert_contains(overrides, "traces_per_second_limit", TEMPO_OVERRIDES_PATH)
assert_contains(overrides, "max_bytes_per_trace", TEMPO_OVERRIDES_PATH)
def validate_loki() -> None:
content = read(LOKI_PATH)
assert_contains(content, "auth_enabled: true", LOKI_PATH)
assert_contains(content, "per_tenant_override_config", LOKI_PATH)
overrides = read(LOKI_OVERRIDES_PATH)
assert_contains(overrides, "__default__", LOKI_OVERRIDES_PATH)
assert_contains(overrides, "retention_period", LOKI_OVERRIDES_PATH)
def main() -> int:
try:
validate_prometheus()
validate_tempo()
validate_loki()
except (AssertionError, FileNotFoundError) as exc:
print(f"[❌] telemetry storage validation failed: {exc}", file=sys.stderr)
return 1
print("[✓] telemetry storage configuration meets multi-tenant guard rails.")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,76 @@
#!/usr/bin/env bash
set -euo pipefail
# Minimal offline verifier for telemetry bundles (v1)
# Exits:
# 0 success
# 21 checksum/manifest missing
# 22 checksum mismatch
# 23 schema validation failed
BUNDLE=${1:-}
SCHEMA_PATH=${TELEMETRY_BUNDLE_SCHEMA:-}
if [[ -z "$BUNDLE" ]]; then
echo "Usage: $0 path/to/telemetry-bundle.tar" >&2
echo "Optional: set TELEMETRY_BUNDLE_SCHEMA=/abs/path/to/telemetry-bundle.schema.json" >&2
exit 64
fi
WORKDIR=$(mktemp -d)
cleanup() { rm -rf "$WORKDIR"; }
trap cleanup EXIT
tar --extract --file "$BUNDLE" --directory "$WORKDIR"
MANIFEST="$WORKDIR/telemetry-bundle.json"
HASHES="$WORKDIR/telemetry-bundle.sha256"
if [[ ! -f "$MANIFEST" || ! -f "$HASHES" ]]; then
echo "Missing manifest or checksum file." >&2
exit 21
fi
# Verify checksums
pushd "$WORKDIR" >/dev/null
if ! sha256sum --quiet --check telemetry-bundle.sha256; then
echo "Checksum mismatch." >&2
exit 22
fi
popd >/dev/null
# JSON schema validation (optional if jsonschema not present).
if command -v python >/dev/null 2>&1; then
SCHEMA_FILE="$SCHEMA_PATH"
if [[ -z "$SCHEMA_FILE" ]]; then
SCHEMA_DIR="$(cd "$(dirname "$0")/../../docs/modules/telemetry/schemas" 2>/dev/null || echo "")"
SCHEMA_FILE="$SCHEMA_DIR/telemetry-bundle.schema.json"
fi
if [[ -n "$SCHEMA_FILE" && -f "$SCHEMA_FILE" ]]; then
python - "$MANIFEST" "$SCHEMA_FILE" <<'PY'
import json, sys
from jsonschema import validate, Draft202012Validator
manifest_path = sys.argv[1]
schema_path = sys.argv[2]
with open(manifest_path, 'r', encoding='utf-8') as f:
manifest = json.load(f)
with open(schema_path, 'r', encoding='utf-8') as f:
schema = json.load(f)
Draft202012Validator.check_schema(schema)
validate(manifest, schema)
PY
if [[ $? -ne 0 ]]; then
echo "Schema validation failed." >&2
exit 23
fi
else
echo "Schema file not found ($SCHEMA_FILE); skipping validation." >&2
fi
else
echo "jsonschema validation skipped (requires python + jsonschema)." >&2
fi
echo "Telemetry bundle verified." >&2
exit 0