devops folders consolidate

This commit is contained in:
master
2026-01-25 23:27:41 +02:00
parent 6e687b523a
commit a50bbb38ef
334 changed files with 35079 additions and 5569 deletions

View File

@@ -1,6 +1,13 @@
version: "3.9"
# =============================================================================
# ADVISORY AI - LOCAL DEVELOPMENT STACK
# =============================================================================
# Local/offline deployment for AdvisoryAI WebService + Worker.
#
# Usage:
# docker compose -f docker-compose.advisoryai.yaml up -d
#
# For production, use compose/docker-compose.stella-ops.yml instead.
# =============================================================================
services:
advisoryai-web:
build:

View File

@@ -1,6 +1,6 @@
# StellaOps Authority Container Scaffold
This directory provides a distroless Dockerfile and `docker-compose` sample for bootstrapping the Authority service alongside MongoDB (required) and Redis (optional).
This directory provides a distroless Dockerfile and `docker-compose` sample for bootstrapping the Authority service alongside PostgreSQL (required) and Valkey (cache).
## Prerequisites
@@ -16,14 +16,14 @@ This directory provides a distroless Dockerfile and `docker-compose` sample for
docker compose -f ops/authority/docker-compose.authority.yaml up --build
```
`authority.yaml` is mounted read-only at `/etc/authority.yaml` inside the container. Plugin manifests are mounted to `/app/etc/authority.plugins`. Update the issuer URL plus any Mongo credentials in the compose file or via an `.env`.
`authority.yaml` is mounted read-only at `/etc/authority.yaml` inside the container. Plugin manifests are mounted to `/app/etc/authority.plugins`. Update the issuer URL plus any PostgreSQL credentials in the compose file or via an `.env`.
To run with pre-built images, replace the `build:` block in the compose file with an `image:` reference.
## Volumes
- `mongo-data` persists MongoDB state.
- `redis-data` optional Redis persistence (enable the service before use).
- `postgres-data` persists PostgreSQL state.
- `valkey-data` Valkey cache persistence.
- `authority-keys` writable volume for Authority signing keys.
## Environment overrides
@@ -33,6 +33,9 @@ Key environment variables (mirroring `StellaOpsAuthorityOptions`):
| Variable | Description |
| --- | --- |
| `STELLAOPS_AUTHORITY__ISSUER` | Public issuer URL advertised by Authority |
| `STELLAOPS_AUTHORITY__STORAGE__DRIVER` | Storage driver (postgres) |
| `STELLAOPS_AUTHORITY__STORAGE__POSTGRES__CONNECTIONSTRING` | PostgreSQL connection string |
| `STELLAOPS_AUTHORITY__CACHE__REDIS__CONNECTIONSTRING` | Valkey/Redis cache connection |
| `STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0` | Primary plugin binaries directory inside the container |
| `STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY` | Path to plugin manifest directory |

View File

@@ -1,4 +1,13 @@
version: "3.9"
# =============================================================================
# AUTHORITY - LOCAL DEVELOPMENT STACK
# =============================================================================
# OAuth2/OIDC identity provider development environment.
#
# Usage:
# docker compose -f docker-compose.authority.yaml up -d
#
# For production, use compose/docker-compose.stella-ops.yml instead.
# =============================================================================
services:
authority:
@@ -8,12 +17,19 @@ services:
image: stellaops-authority:dev
container_name: stellaops-authority
depends_on:
mongo:
condition: service_started
postgres:
condition: service_healthy
valkey:
condition: service_healthy
environment:
# Override issuer to match your deployment URL.
STELLAOPS_AUTHORITY__ISSUER: "https://authority.localtest.me"
# Point the Authority host at the Mongo instance defined below.
# Storage configuration (PostgreSQL)
STELLAOPS_AUTHORITY__STORAGE__DRIVER: "postgres"
STELLAOPS_AUTHORITY__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=postgres;Port=5432;Database=authority;Username=stellaops;Password=stellaops"
# Cache configuration (Valkey)
STELLAOPS_AUTHORITY__CACHE__REDIS__CONNECTIONSTRING: "valkey:6379"
# Plugin configuration
STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0: "/app/plugins"
STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY: "/app/etc/authority.plugins"
volumes:
@@ -26,17 +42,22 @@ services:
- "8080:8080"
restart: unless-stopped
mongo:
image: mongo:7
container_name: stellaops-authority-mongo
command: ["mongod", "--bind_ip_all"]
postgres:
image: postgres:18.1-alpine
container_name: stellaops-authority-postgres
environment:
MONGO_INITDB_ROOT_USERNAME: stellaops
MONGO_INITDB_ROOT_PASSWORD: stellaops
POSTGRES_USER: stellaops
POSTGRES_PASSWORD: stellaops
POSTGRES_DB: authority
volumes:
- mongo-data:/data/db
- postgres-data:/var/lib/postgresql/data
ports:
- "27017:27017"
- "5432:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U stellaops -d authority"]
interval: 10s
timeout: 5s
retries: 5
restart: unless-stopped
valkey:
@@ -47,13 +68,14 @@ services:
- valkey-data:/data
ports:
- "6379:6379"
healthcheck:
test: ["CMD", "valkey-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
restart: unless-stopped
# Uncomment to enable if/when Authority consumes Valkey.
# deploy:
# replicas: 0
volumes:
mongo-data:
postgres-data:
valkey-data:
authority-keys:

View File

@@ -2,7 +2,6 @@
# CI helper for DEVOPS-CI-110-001
# - Warms NuGet cache from local sources
# - Ensures OpenSSL 1.1 compatibility if available
# - Runs targeted Concelier and Excititor test slices with TRX output
# - Writes artefacts under ops/devops/artifacts/ci-110/<timestamp>/
@@ -27,25 +26,6 @@ log() {
printf '[%s] %s\n' "$(date -u +%H:%M:%S)" "$*"
}
ensure_openssl11() {
if openssl version 2>/dev/null | grep -q "1\\.1."; then
log "OpenSSL 1.1 detected: $(openssl version)"
return
fi
if command -v apt-get >/dev/null 2>&1; then
log "OpenSSL 1.1 not found; attempting install via apt-get (libssl1.1)"
sudo DEBIAN_FRONTEND=noninteractive apt-get update -y >/dev/null || true
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libssl1.1 || true
if openssl version 2>/dev/null | grep -q "1\\.1."; then
log "OpenSSL 1.1 available after install: $(openssl version)"
return
fi
fi
log "OpenSSL 1.1 still unavailable. Provide it via LD_LIBRARY_PATH if required."
}
restore_solution() {
local sln="$1"
log "Restore $sln"
@@ -71,12 +51,11 @@ run_test_slice() {
main() {
log "Starting CI-110 runner; artefacts -> $ARTIFACT_ROOT"
ensure_openssl11
restore_solution "$ROOT/concelier-webservice.slnf"
restore_solution "$ROOT/src/Excititor/StellaOps.Excititor.sln"
# Concelier: lightweight health slice to validate runner + Mongo wiring
# Concelier: lightweight health slice to validate runner + PostgreSQL wiring
run_test_slice "$ROOT/src/Concelier/__Tests/StellaOps.Concelier.WebService.Tests/StellaOps.Concelier.WebService.Tests.csproj" \
"HealthAndReadyEndpointsRespond" \
"concelier-health"

View File

@@ -1,9 +1,9 @@
# Concelier CI Runner Harness (DEVOPS-CONCELIER-CI-24-101)
Purpose: provide a deterministic, offline-friendly harness that restores, builds, and runs Concelier WebService + Storage Mongo tests with warmed NuGet cache and TRX/binlog artefacts for downstream sprints (Concelier II/III).
Purpose: provide a deterministic, offline-friendly harness that restores, builds, and runs Concelier WebService + Storage PostgreSQL tests with warmed NuGet cache and TRX/binlog artefacts for downstream sprints (Concelier II/III).
Usage
- From repo root run: `ops/devops/concelier-ci-runner/run-concelier-ci.sh`
- From repo root run: `devops/services/concelier-ci-runner/run-concelier-ci.sh`
- Outputs land in `ops/devops/artifacts/concelier-ci/<UTC timestamp>/`:
- `build.binlog` (solution build)
- `tests/webservice.trx`, `tests/storage.trx` (VSTest results)
@@ -13,11 +13,11 @@ Usage
Environment
- Defaults: `DOTNET_CLI_TELEMETRY_OPTOUT=1`, `DOTNET_SKIP_FIRST_TIME_EXPERIENCE=1`, `NUGET_PACKAGES=$REPO/.nuget/packages`.
- Uses `.nuget/packages` cache (can be overridden via `NUGET_SOURCES`).
- No external services required; Mongo2Go provides ephemeral Mongo for tests.
- No external services required; Testcontainers provides ephemeral PostgreSQL for tests.
What it does
1) `dotnet restore` + `dotnet build` on `concelier-webservice.slnf` with `/bl`.
3) Run WebService and Storage.Mongo test projects with TRX output and without rebuild (`--no-build`).
3) Run WebService and Storage.Postgres test projects with TRX output and without rebuild (`--no-build`).
4) Emit a concise `summary.json` listing artefacts and SHA256s for reproducibility.
Notes

View File

@@ -2,7 +2,7 @@
set -euo pipefail
# Concelier CI runner harness (DEVOPS-CONCELIER-CI-24-101)
# Produces warmed-cache restore, build binlog, and TRX outputs for WebService + Storage Mongo tests.
# Produces warmed-cache restore, build binlog, and TRX outputs for WebService + Storage tests.
repo_root="$(cd "$(dirname "$0")/../../.." && pwd)"
ts="$(date -u +%Y%m%dT%H%M%SZ)"
@@ -44,9 +44,9 @@ dotnet test "$repo_root/src/Concelier/__Tests/StellaOps.Concelier.WebService.Tes
"${common_test_args[@]}" \
--logger "trx;LogFileName=$web_trx"
# Storage Mongo tests
# Storage PostgreSQL tests
storage_trx="storage.trx"
dotnet test "$repo_root/src/Concelier/__Tests/StellaOps.Concelier.Storage.Mongo.Tests/StellaOps.Concelier.Storage.Mongo.Tests.csproj" \
dotnet test "$repo_root/src/Concelier/__Tests/StellaOps.Concelier.Storage.Postgres.Tests/StellaOps.Concelier.Storage.Postgres.Tests.csproj" \
"${common_test_args[@]}" \
--logger "trx;LogFileName=$storage_trx"
@@ -58,7 +58,7 @@ summary="$out_dir/summary.json"
printf ' "build_binlog": "%s",\n' "${build_binlog#${repo_root}/}"
printf ' "tests": [\n'
printf ' {"project": "WebService", "trx": "%s"},\n' "${logs_dir#${repo_root}/}/$web_trx"
printf ' {"project": "Storage.Mongo", "trx": "%s"}\n' "${logs_dir#${repo_root}/}/$storage_trx"
printf ' {"project": "Storage.Postgres", "trx": "%s"}\n' "${logs_dir#${repo_root}/}/$storage_trx"
printf ' ],\n'
printf ' "nuget_packages": "%s",\n' "${NUGET_PACKAGES#${repo_root}/}"
printf ' "sources": [\n'

View File

@@ -1,21 +0,0 @@
version: '3.8'
services:
minio:
image: minio/minio:RELEASE.2024-10-08T09-56-18Z
command: server /data --console-address ":9001"
environment:
MINIO_ROOT_USER: exportci
MINIO_ROOT_PASSWORD: exportci123
ports:
- "9000:9000"
- "9001:9001"
volumes:
- minio-data:/data
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 5s
timeout: 3s
retries: 5
volumes:
minio-data:
driver: local

View File

@@ -1,23 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
MINIO_ENDPOINT=${MINIO_ENDPOINT:-http://localhost:9000}
MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-exportci}
MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-exportci123}
BUCKET=${BUCKET:-export-ci}
TMP=$(mktemp)
cleanup(){ rm -f "$TMP"; }
trap cleanup EXIT
cat > "$TMP" <<'DATA'
{"id":"exp-001","object":"s3://export-ci/sample-export.ndjson","status":"ready"}
DATA
export AWS_ACCESS_KEY_ID="$MINIO_ACCESS_KEY"
export AWS_SECRET_ACCESS_KEY="$MINIO_SECRET_KEY"
export AWS_EC2_METADATA_DISABLED=true
if ! aws --endpoint-url "$MINIO_ENDPOINT" s3 ls "s3://$BUCKET" >/dev/null 2>&1; then
aws --endpoint-url "$MINIO_ENDPOINT" s3 mb "s3://$BUCKET"
fi
aws --endpoint-url "$MINIO_ENDPOINT" s3 cp "$TMP" "s3://$BUCKET/sample-export.ndjson"
echo "Seeded $BUCKET/sample-export.ndjson"

View File

@@ -0,0 +1,22 @@
#!/usr/bin/env bash
set -euo pipefail
RUSTFS_ENDPOINT=${RUSTFS_ENDPOINT:-http://localhost:8080}
BUCKET=${BUCKET:-export-ci}
TMP=$(mktemp)
cleanup(){ rm -f "$TMP"; }
trap cleanup EXIT
cat > "$TMP" <<'DATA'
{"id":"exp-001","object":"s3://export-ci/sample-export.ndjson","status":"ready"}
DATA
# RustFS uses S3-compatible API
export AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY_ID:-exportci}"
export AWS_SECRET_ACCESS_KEY="${AWS_SECRET_ACCESS_KEY:-exportci123}"
export AWS_EC2_METADATA_DISABLED=true
if ! aws --endpoint-url "$RUSTFS_ENDPOINT" s3 ls "s3://$BUCKET" >/dev/null 2>&1; then
aws --endpoint-url "$RUSTFS_ENDPOINT" s3 mb "s3://$BUCKET"
fi
aws --endpoint-url "$RUSTFS_ENDPOINT" s3 cp "$TMP" "s3://$BUCKET/sample-export.ndjson"
echo "Seeded $BUCKET/sample-export.ndjson"

View File

@@ -1,46 +1,39 @@
# Orchestrator Infra Bootstrap (DEVOPS-ORCH-32-001)
## Components
- Postgres 16 (state/config)
- Mongo 7 (job ledger history)
- NATS 2.10 JetStream (queue/bus)
- PostgreSQL 18.1 (state/config/job ledger)
- Valkey 9.0.1 (queue/bus/cache)
Compose file: `ops/devops/orchestrator/docker-compose.orchestrator.yml`
Compose file: `devops/compose/docker-compose.stella-ops.yml`
## Quick start (offline-friendly)
```bash
# bring up infra
COMPOSE_FILE=ops/devops/orchestrator/docker-compose.orchestrator.yml docker compose up -d
docker compose -f devops/compose/docker-compose.stella-ops.yml up -d stellaops-postgres stellaops-valkey
# smoke check and emit connection strings
scripts/orchestrator/smoke.sh
devops/tools/orchestrator-scripts/smoke.sh
cat out/orchestrator-smoke/readiness.txt
# synthetic probe (postgres/mongo/nats health)
scripts/orchestrator/probe.sh
# synthetic probe (postgres/valkey health)
devops/tools/orchestrator-scripts/probe.sh
cat out/orchestrator-probe/status.txt
# replay readiness (restart then smoke)
scripts/orchestrator/replay-smoke.sh
```
Connection strings
- Postgres: `postgres://orch:orchpass@localhost:55432/orchestrator`
- Mongo: `mongodb://localhost:57017`
- NATS: `nats://localhost:4222`
- Postgres: `postgres://stellaops:stellaops@localhost:5432/stellaops`
- Valkey: `valkey://localhost:6379`
## Observability
- Alerts: `ops/devops/orchestrator/alerts.yaml`
- Grafana dashboard: `ops/devops/orchestrator/grafana/orchestrator-overview.json`
- Alerts: `devops/observability/alerting/`
- Grafana dashboard: `devops/observability/dashboards/`
- Metrics expected: `job_queue_depth`, `job_failures_total`, `lease_extensions_total`, `job_latency_seconds_bucket`.
- Runbook: `ops/devops/orchestrator/incident-response.md`
- Synthetic probes: `scripts/orchestrator/probe.sh` (writes `out/orchestrator-probe/status.txt`).
- Replay smoke: `scripts/orchestrator/replay-smoke.sh` (idempotent restart + smoke).
- Synthetic probes: `devops/tools/orchestrator-scripts/probe.sh` (writes `out/orchestrator-probe/status.txt`).
## CI hook (suggested)
Add a workflow step (or local cron) to run `scripts/orchestrator/smoke.sh` with `SKIP_UP=1` against existing infra and publish the `readiness.txt` artifact for traceability.
Add a workflow step (or local cron) to run `devops/tools/orchestrator-scripts/smoke.sh` with `SKIP_UP=1` against existing infra and publish the `readiness.txt` artifact for traceability.
## Notes
- Uses fixed ports for determinism; adjust via COMPOSE overrides if needed.
- Data volumes: `orch_pg_data`, `orch_mongo_data` (docker volumes).
- Data volumes: `stellaops-postgres`, `stellaops-valkey` (docker volumes).
- No external downloads beyond base images; pin images to specific tags above.

View File

@@ -1,4 +1,14 @@
version: "3.9"
# =============================================================================
# ORCHESTRATOR - LOCAL DEVELOPMENT INFRASTRUCTURE
# =============================================================================
# Infrastructure services for Orchestrator local development.
#
# Usage:
# docker compose -f docker-compose.orchestrator.yml up -d
#
# For production, use compose/docker-compose.stella-ops.yml instead.
# =============================================================================
services:
orchestrator-postgres:
image: postgres:18.1-alpine
@@ -17,28 +27,15 @@ services:
retries: 5
restart: unless-stopped
orchestrator-mongo:
image: mongo:7
command: ["mongod", "--quiet", "--storageEngine=wiredTiger"]
orchestrator-valkey:
image: valkey/valkey:9.0.1-alpine
ports:
- "57017:27017"
- "56379:6379"
command: ["valkey-server", "--appendonly", "yes"]
volumes:
- orch_mongo_data:/data/db
- orch_valkey_data:/data
healthcheck:
test: ["CMD", "mongosh", "--quiet", "--eval", "db.adminCommand('ping')"]
interval: 10s
timeout: 5s
retries: 5
restart: unless-stopped
orchestrator-nats:
image: nats:2.10-alpine
ports:
- "5422:4222"
- "5822:8222"
command: ["-js", "-m", "8222"]
healthcheck:
test: ["CMD", "nats", "--server", "localhost:4222", "ping"]
test: ["CMD", "valkey-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
@@ -46,5 +43,4 @@ services:
volumes:
orch_pg_data:
orch_mongo_data:
orch_valkey_data:

View File

@@ -5,7 +5,7 @@ This harness supports `DEVOPS-AIRGAP-57-002` by exercising services with the `se
## Workflow
1. Run `./run-sealed-ci.sh` from this directory (the script now boots the stack, applies the iptables guard, and captures artefacts automatically).
2. The harness:
- Launches `sealed-mode-compose.yml` with Authority/Signer/Attestor + Mongo.
- Launches `sealed-mode-compose.yml` with Authority/Signer/Attestor + PostgreSQL + Valkey.
- Snapshots iptables, injects a `STELLAOPS_SEALED` chain into `DOCKER-USER`/`OUTPUT`, and whitelists only loopback + RFC1918 ranges so container egress is denied.
- Repeatedly polls `/healthz` on `5088/6088/7088` to verify sealed-mode bindings stay healthy while egress is blocked.
- Executes `egress_probe.py`, which runs curl probes from inside the compose network to confirm off-cluster addresses are unreachable.

View File

@@ -16,9 +16,11 @@ plugins:
enabled: true
configFile: standard.yaml
storage:
connectionString: mongodb://sealedci:sealedci@mongo:27017/authority?authSource=admin
databaseName: authority
driver: postgres
connectionString: "Host=postgres;Port=5432;Database=authority;Username=sealedci;Password=sealedci-secret"
commandTimeout: 00:00:30
cache:
connectionString: "valkey:6379"
signing:
enabled: true
activeKeyId: sealed-ci

View File

@@ -1,4 +1,11 @@
version: '3.9'
# =============================================================================
# SEALED MODE CI - AIR-GAPPED TESTING ENVIRONMENT
# =============================================================================
# Sealed/air-gapped CI environment for testing offline functionality.
#
# Usage:
# docker compose -f sealed-mode-compose.yml up -d
# =============================================================================
x-release-labels: &release-labels
com.stellaops.profile: 'sealed-ci'
@@ -9,31 +16,57 @@ networks:
driver: bridge
volumes:
sealed-mongo-data:
sealed-postgres-data:
sealed-valkey-data:
services:
mongo:
image: docker.io/library/mongo@sha256:c258b26dbb7774f97f52aff52231ca5f228273a84329c5f5e451c3739457db49
command: ['mongod', '--bind_ip_all']
postgres:
image: docker.io/library/postgres@sha256:8e97b8526ed19304b144f7478bc9201646acf0723cdc6e4b19bc9eb34879a27e
restart: unless-stopped
environment:
MONGO_INITDB_ROOT_USERNAME: sealedci
MONGO_INITDB_ROOT_PASSWORD: sealedci-secret
POSTGRES_USER: sealedci
POSTGRES_PASSWORD: sealedci-secret
POSTGRES_DB: stellaops
volumes:
- sealed-mongo-data:/data/db
- sealed-postgres-data:/var/lib/postgresql/data
networks:
- sealed-ci
healthcheck:
test: ["CMD-SHELL", "pg_isready -U sealedci -d stellaops"]
interval: 10s
timeout: 5s
retries: 5
labels: *release-labels
valkey:
image: docker.io/valkey/valkey:9.0.1-alpine
restart: unless-stopped
command: ["valkey-server", "--appendonly", "yes"]
volumes:
- sealed-valkey-data:/data
networks:
- sealed-ci
healthcheck:
test: ["CMD", "valkey-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
labels: *release-labels
authority:
image: registry.stella-ops.org/stellaops/authority@sha256:a8e8faec44a579aa5714e58be835f25575710430b1ad2ccd1282a018cd9ffcdd
depends_on:
- mongo
postgres:
condition: service_healthy
valkey:
condition: service_healthy
restart: unless-stopped
environment:
ASPNETCORE_URLS: http://+:5088
STELLAOPS_AUTHORITY__ISSUER: http://authority.sealed-ci.local
STELLAOPS_AUTHORITY__MONGO__CONNECTIONSTRING: mongodb://sealedci:sealedci-secret@mongo:27017/authority?authSource=admin
STELLAOPS_AUTHORITY__STORAGE__DRIVER: postgres
STELLAOPS_AUTHORITY__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=postgres;Port=5432;Database=authority;Username=sealedci;Password=sealedci-secret"
STELLAOPS_AUTHORITY__CACHE__REDIS__CONNECTIONSTRING: "valkey:6379"
STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0: /app/plugins
STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY: /app/plugins
STELLAOPS_AUTHORITY__SECURITY__SENDERCONSTRAINTS__DPOP__ENABLED: 'true'
@@ -58,7 +91,9 @@ services:
ASPNETCORE_URLS: http://+:6088
SIGNER__AUTHORITY__BASEURL: http://authority:5088
SIGNER__POE__INTROSPECTURL: http://authority:5088/device-code
SIGNER__STORAGE__MONGO__CONNECTIONSTRING: mongodb://sealedci:sealedci-secret@mongo:27017/signer?authSource=admin
SIGNER__STORAGE__DRIVER: postgres
SIGNER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=postgres;Port=5432;Database=signer;Username=sealedci;Password=sealedci-secret"
SIGNER__CACHE__REDIS__CONNECTIONSTRING: "valkey:6379"
SIGNER__SEALED__MODE: Enabled
ports:
- '6088:6088'
@@ -74,7 +109,9 @@ services:
environment:
ASPNETCORE_URLS: http://+:7088
ATTESTOR__SIGNER__BASEURL: http://signer:6088
ATTESTOR__MONGO__CONNECTIONSTRING: mongodb://sealedci:sealedci-secret@mongo:27017/attestor?authSource=admin
ATTESTOR__STORAGE__DRIVER: postgres
ATTESTOR__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=postgres;Port=5432;Database=attestor;Username=sealedci;Password=sealedci-secret"
ATTESTOR__CACHE__REDIS__CONNECTIONSTRING: "valkey:6379"
ATTESTOR__SEALED__MODE: Enabled
ports:
- '7088:7088'

View File

@@ -1,31 +1,29 @@
# Signals CI/CD & Local Stack (DEVOPS-SIG-26-001)
Artifacts:
- Compose stack: `ops/devops/signals/docker-compose.signals.yml` (Signals API + Mongo + Valkey + artifact volume).
- Sample config: `ops/devops/signals/signals.yaml` (mounted into the container at `/app/signals.yaml` if desired).
- Dockerfile: `ops/devops/signals/Dockerfile` (multi-stage build on .NET 10 RC).
- Build/export helper: `scripts/signals/build.sh` (saves image tar to `out/signals/signals-image.tar`).
- Span sink stack: `ops/devops/signals/docker-compose.spansink.yml` + `otel-spansink.yaml` to collect OTLP traces (Excititor `/v1/vex/observations/**`) and write NDJSON to `spansink-data` volume. Run via `scripts/signals/run-spansink.sh`.
- Grafana dashboard stub: `ops/devops/signals/dashboards/excititor-vex-traces.json` (import into Tempo-enabled Grafana).
- Compose stack: `devops/compose/docker-compose.stella-ops.yml` (Signals API + PostgreSQL + Valkey + artifact volume).
- Sample config: `devops/services/signals-ops/signals.yaml` (mounted into the container at `/app/signals.yaml` if desired).
- Dockerfile: `devops/services/signals-ops/Dockerfile` (multi-stage build on .NET 10 RC).
- Build/export helper: `devops/tools/signals-scripts/build.sh` (saves image tar to `out/signals/signals-image.tar`).
- Span sink stack: `devops/services/signals-ops/docker-compose.spansink.yml` + `otel-spansink.yaml` to collect OTLP traces (Excititor `/v1/vex/observations/**`) and write NDJSON to `spansink-data` volume.
- Grafana dashboard stub: `devops/services/signals-ops/dashboards/excititor-vex-traces.json` (import into Tempo-enabled Grafana).
Quick start (offline-friendly):
```bash
# build image
scripts/signals/build.sh
devops/tools/signals-scripts/build.sh
# run stack
COMPOSE_FILE=ops/devops/signals/docker-compose.signals.yml docker compose up -d
docker compose -f devops/compose/docker-compose.stella-ops.yml up -d signals
# hit health
curl -s http://localhost:5088/health
# run span sink collector
scripts/signals/run-spansink.sh
```
Configuration (ENV or YAML):
- `Signals__Mongo__ConnectionString` default `mongodb://signals-mongo:27017/signals`
- `Signals__Cache__ConnectionString` default `signals-valkey:6379`
- `Signals__Storage__Driver` default `postgres`
- `Signals__Storage__ConnectionString` default `Host=stellaops-postgres;Port=5432;Database=signals;Username=stellaops;Password=stellaops`
- `Signals__Cache__ConnectionString` default `stellaops-valkey:6379`
- `Signals__Storage__RootPath` default `/data/artifacts`
- Authority disabled by default for local; enable with `Signals__Authority__Enabled=true` and issuer settings.
@@ -33,6 +31,6 @@ CI workflow:
- `.gitea/workflows/signals-ci.yml` restores, builds, tests, builds container, and uploads `signals-image.tar` artifact.
Dependencies:
- Mongo 7 (wiredTiger)
- Valkey 8 (cache, BSD-3 licensed Redis fork)
- PostgreSQL 18.1 (primary data store)
- Valkey 9.0.1 (cache, BSD-3 licensed Redis fork)
- Artifact volume `signals_artifacts` for callgraph blobs.

View File

@@ -1,4 +1,13 @@
version: "3.9"
# =============================================================================
# SIGNALS - LOCAL DEVELOPMENT STACK
# =============================================================================
# Signals API local development environment.
#
# Usage:
# docker compose -f docker-compose.signals.yml up -d
#
# For production, use compose/docker-compose.stella-ops.yml instead.
# =============================================================================
services:
signals-api:
@@ -8,8 +17,8 @@ services:
image: stellaops/signals:local
environment:
ASPNETCORE_URLS: "http://+:5088"
Signals__Mongo__ConnectionString: "mongodb://signals-mongo:27017/signals"
Signals__Mongo__Database: "signals"
Signals__Storage__Driver: "postgres"
Signals__Storage__Postgres__ConnectionString: "Host=signals-postgres;Port=5432;Database=signals;Username=signals;Password=signals"
Signals__Cache__ConnectionString: "signals-valkey:6379"
Signals__Storage__RootPath: "/data/artifacts"
Signals__Authority__Enabled: "false"
@@ -17,21 +26,26 @@ services:
ports:
- "5088:5088"
depends_on:
- signals-mongo
- signals-valkey
signals-postgres:
condition: service_healthy
signals-valkey:
condition: service_healthy
volumes:
- signals_artifacts:/data/artifacts
- ./signals.yaml:/app/signals.yaml:ro
signals-mongo:
image: mongo:7
command: ["mongod", "--quiet", "--storageEngine=wiredTiger"]
signals-postgres:
image: postgres:18.1-alpine
environment:
POSTGRES_USER: signals
POSTGRES_PASSWORD: signals
POSTGRES_DB: signals
ports:
- "57027:27017"
- "55433:5432"
volumes:
- signals_mongo:/data/db
- signals_postgres:/var/lib/postgresql/data
healthcheck:
test: ["CMD", "mongosh", "--quiet", "--eval", "db.adminCommand('ping')"]
test: ["CMD-SHELL", "pg_isready -U signals -d signals"]
interval: 10s
timeout: 5s
retries: 5
@@ -49,5 +63,4 @@ services:
volumes:
signals_artifacts:
signals_mongo:
signals_postgres:

View File

@@ -1,13 +1,13 @@
# Sample offline configuration for Signals
Signals:
Mongo:
ConnectionString: "mongodb://signals-mongo:27017/signals"
Database: "signals"
Storage:
Driver: "postgres"
ConnectionString: "Host=signals-postgres;Port=5432;Database=signals;Username=stellaops;Password=stellaops"
Cache:
ConnectionString: "signals-valkey:6379"
DefaultTtlSeconds: 600
Storage:
Artifacts:
RootPath: "/data/artifacts"
Authority:
Enabled: false

View File

@@ -9,10 +9,10 @@ service:
env:
ASPNETCORE_URLS: "http://+:5088"
Signals__Mongo__ConnectionString: "mongodb://signals-mongo:27017/signals"
Signals__Mongo__Database: "signals"
Signals__Storage__Driver: "postgres"
Signals__Storage__ConnectionString: "Host=signals-postgres;Port=5432;Database=signals;Username=stellaops;Password=stellaops"
Signals__Cache__ConnectionString: "signals-valkey:6379"
Signals__Storage__RootPath: "/data/artifacts"
Signals__Artifacts__RootPath: "/data/artifacts"
Signals__Authority__Enabled: "false"
Signals__OpenApi__Enabled: "true"
@@ -27,9 +27,9 @@ valkey:
host: signals-valkey
port: 6379
mongo:
postgres:
enabled: true
connectionString: "mongodb://signals-mongo:27017/signals"
connectionString: "Host=signals-postgres;Port=5432;Database=signals;Username=stellaops;Password=stellaops"
ingress:
enabled: false

View File

@@ -1,33 +1,62 @@
version: "3.9"
# =============================================================================
# SYMBOLS SERVER - LOCAL DEVELOPMENT STACK
# =============================================================================
# Symbols server local development environment with PostgreSQL and RustFS.
#
# Usage:
# docker compose -f docker-compose.symbols.yaml up -d
#
# For production, use compose/docker-compose.stella-ops.yml instead.
# =============================================================================
services:
mongo:
image: mongo:7.0
restart: unless-stopped
command: ["mongod", "--bind_ip_all"]
ports:
- "27017:27017"
minio:
image: minio/minio:RELEASE.2024-08-17T00-00-00Z
postgres:
image: postgres:18.1-alpine
restart: unless-stopped
environment:
MINIO_ROOT_USER: minio
MINIO_ROOT_PASSWORD: minio123
command: server /data --console-address :9001
POSTGRES_USER: symbols
POSTGRES_PASSWORD: ${SYMBOLS_DB_PASSWORD:-symbols_dev}
POSTGRES_DB: symbols
volumes:
- postgres-data:/var/lib/postgresql/data
ports:
- "9000:9000"
- "9001:9001"
- "5432:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U symbols -d symbols"]
interval: 10s
timeout: 5s
retries: 5
rustfs:
image: registry.stella-ops.org/stellaops/rustfs:2025.09.2
restart: unless-stopped
command: ["serve", "--listen", "0.0.0.0:8080", "--root", "/data"]
environment:
RUSTFS__LOG__LEVEL: info
RUSTFS__STORAGE__PATH: /data
volumes:
- rustfs-data:/data
ports:
- "9000:8080"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
symbols:
image: ghcr.io/stella-ops/symbols-server:edge
depends_on:
- mongo
- minio
postgres:
condition: service_healthy
rustfs:
condition: service_healthy
environment:
Mongo__ConnectionString: mongodb://mongo:27017/symbols
Storage__Driver: postgres
Storage__Postgres__ConnectionString: "Host=postgres;Port=5432;Database=symbols;Username=symbols;Password=${SYMBOLS_DB_PASSWORD:-symbols_dev}"
Storage__Provider: S3
Storage__S3__Endpoint: http://minio:9000
Storage__S3__Endpoint: http://rustfs:8080
Storage__S3__Bucket: symbols
Storage__S3__AccessKeyId: minio
Storage__S3__SecretAccessKey: minio123
Storage__S3__UsePathStyle: "true"
Logging__Console__FormatterName: json
ports:
@@ -38,6 +67,11 @@ services:
timeout: 5s
retries: 6
start_period: 10s
volumes:
postgres-data:
rustfs-data:
networks:
default:
name: symbols-ci

View File

@@ -3,16 +3,14 @@ image:
repository: ghcr.io/stella-ops/symbols-server
tag: edge
mongodb:
postgres:
enabled: true
connectionString: "mongodb://mongo:27017/symbols"
connectionString: "Host=postgres;Port=5432;Database=symbols;Username=stellaops;Password=stellaops"
minio:
rustfs:
enabled: true
endpoint: "http://minio:9000"
endpoint: "http://rustfs:8080"
bucket: "symbols"
accessKey: "minio"
secretKey: "minio123"
ingress:
enabled: false