fix merge conflicts

This commit is contained in:
master
2026-01-26 00:12:22 +02:00
parent 727e945e21
commit 03153765be
203 changed files with 0 additions and 25553 deletions

View File

@@ -1,164 +0,0 @@
# Deploy
Deployment infrastructure for StellaOps. Clean, consolidated deployment configurations.
## Infrastructure Stack
| Component | Technology | Version |
|-----------|------------|---------|
| Database | PostgreSQL | 18.1 |
| Messaging/Cache | Valkey | 9.0.1 |
| Object Storage | RustFS | latest |
| Transparency Log | Rekor | v2 (tiles) |
## Directory Structure
```
deploy/
├── compose/ # Docker Compose configurations
│ ├── docker-compose.stella-ops.yml # Main stack
│ ├── docker-compose.telemetry.yml # Observability (OTEL, Prometheus, Tempo, Loki)
│ ├── docker-compose.testing.yml # CI/testing infrastructure
│ ├── docker-compose.compliance-*.yml # Regional crypto overlays
│ ├── env/ # Environment templates
│ └── scripts/ # Compose lifecycle scripts
├── helm/ # Kubernetes Helm charts
│ └── stellaops/ # Main chart with env-specific values
│ ├── values-dev.yaml
│ ├── values-stage.yaml
│ ├── values-prod.yaml
│ └── values-airgap.yaml
├── docker/ # Container build infrastructure
│ ├── Dockerfile.hardened.template # Multi-stage hardened template
│ ├── Dockerfile.console # Angular UI
│ ├── build-all.sh # Build matrix
│ └── services-matrix.env # Service build args
├── database/ # PostgreSQL infrastructure
│ ├── migrations/ # Schema migrations
│ ├── postgres/ # CloudNativePG configs
│ ├── postgres-partitioning/ # Table partitioning
│ └── postgres-validation/ # RLS validation
├── scripts/ # Operational scripts
│ ├── bootstrap-trust.sh # TrustMonger initialization
│ ├── rotate-rekor-key.sh # Key rotation
│ ├── test-local.sh # Local testing
│ └── lib/ # Shared script libraries
├── offline/ # Air-gap deployment
│ ├── airgap/ # Bundle creation tools
│ ├── kit/ # Installation kit
│ └── templates/ # Offline config templates
├── telemetry/ # Observability (consolidated)
│ ├── alerts/ # Prometheus/Alertmanager rules
│ ├── dashboards/ # Grafana dashboards
│ ├── collectors/ # OTEL collector configs
│ └── storage/ # Prometheus/Loki/Tempo configs
├── secrets/ # Secret management templates
│ └── *.example # Example secret structures
├── releases/ # Release manifests
│ └── *.yaml # Version pinning per channel
└── tools/ # Curated operational tools
├── ci/ # Build/CI tools (nuget-prime, determinism)
├── feeds/ # Feed management (concelier, vex)
├── security/ # Security (attest, cosign, crypto)
└── validation/ # Validation scripts
```
## Quick Start
### Local Development (Docker Compose)
```bash
# Start full stack
docker compose -f deploy/compose/docker-compose.stella-ops.yml up -d
# Start with telemetry
docker compose -f deploy/compose/docker-compose.stella-ops.yml \
-f deploy/compose/docker-compose.telemetry.yml up -d
# Regional compliance overlay (e.g., China SM2/SM3/SM4)
docker compose -f deploy/compose/docker-compose.stella-ops.yml \
-f deploy/compose/docker-compose.compliance-china.yml up -d
```
### Kubernetes (Helm)
```bash
# Install to dev environment
helm install stellaops deploy/helm/stellaops \
-f deploy/helm/stellaops/values-dev.yaml \
-n stellaops --create-namespace
# Install to production
helm install stellaops deploy/helm/stellaops \
-f deploy/helm/stellaops/values-prod.yaml \
-n stellaops --create-namespace
```
### Air-Gapped Installation
```bash
# Create offline bundle
python deploy/offline/airgap/build_bootstrap_pack.py --version 2026.04
# Import on air-gapped system
deploy/offline/airgap/import-bundle.sh stellaops-2026.04-bundle.tar.gz
```
## Compose Profiles
| File | Purpose | Services |
|------|---------|----------|
| `stella-ops.yml` | Main stack | PostgreSQL, Valkey, RustFS, Rekor, all StellaOps services |
| `telemetry.yml` | Observability | OTEL Collector, Prometheus, Tempo, Loki |
| `testing.yml` | CI/Testing | postgres-test, valkey-test, mock-registry |
| `compliance-china.yml` | China crypto | SM2/SM3/SM4 overlays |
| `compliance-russia.yml` | Russia crypto | GOST R 34.10 overlays |
| `compliance-eu.yml` | EU crypto | eIDAS overlays |
| `dev.yml` | Development | Minimal stack with hot-reload |
## Connection Strings
```bash
# PostgreSQL
Host=stellaops-postgres;Port=5432;Database=stellaops;Username=stellaops;Password=<secret>
# Valkey
stellaops-valkey:6379
# RustFS (S3-compatible)
http://stellaops-rustfs:8080
```
## Migration from devops/
This `deploy/` directory is the consolidated replacement for the scattered `devops/` directory.
Content has been reorganized:
| Old Location | New Location |
|--------------|--------------|
| `devops/compose/` | `deploy/compose/` |
| `devops/helm/` | `deploy/helm/` |
| `devops/docker/` | `deploy/docker/` |
| `devops/database/` | `deploy/database/` |
| `devops/scripts/` | `deploy/scripts/` |
| `devops/offline/` | `deploy/offline/` |
| `devops/observability/` + `devops/telemetry/` | `deploy/telemetry/` |
| `devops/secrets/` | `deploy/secrets/` |
| `devops/releases/` | `deploy/releases/` |
The following `devops/` content was archived or removed:
- `devops/services/` - Scattered service configs (use compose overlays or helm values)
- `devops/tools/` - Move operational tools to `tools/` at repo root
- `devops/artifacts/` - CI artifacts (transient, should not be committed)
- `devops/.nuget/` - Package cache (restore during build)
- `devops/docs/` - Move to `docs/operations/`
- `devops/gitlab/` - Legacy CI templates (repo uses Gitea)

View File

@@ -1,459 +0,0 @@
# Stella Ops Docker Compose Profiles
Consolidated Docker Compose configuration for the StellaOps platform. All profiles use immutable image digests from `deploy/releases/*.yaml` and are validated via `docker compose config` in CI.
## Quick Reference
| I want to... | Command |
|--------------|---------|
| Run the full platform | `docker compose -f docker-compose.stella-ops.yml up -d` |
| Add observability | `docker compose -f docker-compose.stella-ops.yml -f docker-compose.telemetry.yml up -d` |
| Run CI/testing infrastructure | `docker compose -f docker-compose.testing.yml --profile ci up -d` |
| Deploy with China compliance | See [China Compliance](#china-compliance-sm2sm3sm4) |
| Deploy with Russia compliance | See [Russia Compliance](#russia-compliance-gost) |
| Deploy with EU compliance | See [EU Compliance](#eu-compliance-eidas) |
---
## File Structure
### Core Stack Files
| File | Purpose |
|------|---------|
| `docker-compose.stella-ops.yml` | **Main stack**: PostgreSQL 18.1, Valkey 9.0.1, RustFS, Rekor v2, all StellaOps services |
| `docker-compose.telemetry.yml` | **Observability**: OpenTelemetry collector, Prometheus, Tempo, Loki |
| `docker-compose.testing.yml` | **CI/Testing**: Test databases, mock services, Gitea for integration tests |
| `docker-compose.dev.yml` | **Minimal dev infrastructure**: PostgreSQL, Valkey, RustFS only |
### Specialized Infrastructure
| File | Purpose |
|------|---------|
| `docker-compose.bsim.yml` | **BSim analysis**: PostgreSQL for Ghidra binary similarity corpus |
| `docker-compose.corpus.yml` | **Function corpus**: PostgreSQL for function behavior database |
| `docker-compose.sealed-ci.yml` | **Air-gapped CI**: Sealed testing environment with authority, signer, attestor |
| `docker-compose.telemetry-offline.yml` | **Offline observability**: Air-gapped Loki, Promtail, OTEL collector, Tempo, Prometheus |
### Regional Compliance Overlays
| File | Purpose | Jurisdiction |
|------|---------|--------------|
| `docker-compose.compliance-china.yml` | SM2/SM3/SM4 ShangMi crypto configuration | China (OSCCA) |
| `docker-compose.compliance-russia.yml` | GOST R 34.10-2012 crypto configuration | Russia (FSB) |
| `docker-compose.compliance-eu.yml` | eIDAS qualified trust services configuration | EU |
### Crypto Provider Overlays
| File | Purpose | Use Case |
|------|---------|----------|
| `docker-compose.crypto-sim.yml` | Universal crypto simulation | Testing without licensed crypto |
| `docker-compose.cryptopro.yml` | CryptoPro CSP (real GOST) | Production Russia deployments |
| `docker-compose.sm-remote.yml` | SM Remote service (real SM2) | Production China deployments |
### Additional Overlays
| File | Purpose | Use Case |
|------|---------|----------|
| `docker-compose.gpu.yaml` | NVIDIA GPU acceleration | Advisory AI inference with GPU |
| `docker-compose.cas.yaml` | Content Addressable Storage | Dedicated CAS with retention policies |
| `docker-compose.tile-proxy.yml` | Rekor tile caching proxy | Air-gapped Sigstore deployments |
### Supporting Files
| Path | Purpose |
|------|---------|
| `env/*.env.example` | Environment variable templates per profile |
| `scripts/backup.sh` | Create deterministic volume snapshots |
| `scripts/reset.sh` | Stop stack and remove volumes (with confirmation) |
---
## Usage Patterns
### Basic Development
```bash
# Copy environment template
cp env/stellaops.env.example .env
# Validate configuration
docker compose -f docker-compose.stella-ops.yml config
# Start the platform
docker compose -f docker-compose.stella-ops.yml up -d
# View logs
docker compose -f docker-compose.stella-ops.yml logs -f scanner-web
```
### With Observability
```bash
# Generate TLS certificates for telemetry
./ops/devops/telemetry/generate_dev_tls.sh
# Start platform with telemetry
docker compose -f docker-compose.stella-ops.yml \
-f docker-compose.telemetry.yml up -d
```
### CI/Testing Infrastructure
```bash
# Start CI infrastructure only (different ports to avoid conflicts)
docker compose -f docker-compose.testing.yml --profile ci up -d
# Start mock services for integration testing
docker compose -f docker-compose.testing.yml --profile mock up -d
# Start Gitea for SCM integration tests
docker compose -f docker-compose.testing.yml --profile gitea up -d
# Start everything
docker compose -f docker-compose.testing.yml --profile all up -d
```
**Test Infrastructure Ports:**
| Service | Port | Purpose |
|---------|------|---------|
| postgres-test | 5433 | PostgreSQL 18 for tests |
| valkey-test | 6380 | Valkey for cache/queue tests |
| rustfs-test | 8180 | S3-compatible storage |
| mock-registry | 5001 | Container registry mock |
| gitea | 3000 | Git hosting for SCM tests |
---
## Regional Compliance Deployments
### China Compliance (SM2/SM3/SM4)
**For Testing (simulation):**
```bash
docker compose -f docker-compose.stella-ops.yml \
-f docker-compose.compliance-china.yml \
-f docker-compose.crypto-sim.yml up -d
```
**For Production (real SM crypto):**
```bash
docker compose -f docker-compose.stella-ops.yml \
-f docker-compose.compliance-china.yml \
-f docker-compose.sm-remote.yml up -d
```
**With OSCCA-certified HSM:**
```bash
# Set HSM connection details in environment
export SM_REMOTE_HSM_URL="https://sm-hsm.example.com:8900"
export SM_REMOTE_HSM_API_KEY="your-api-key"
docker compose -f docker-compose.stella-ops.yml \
-f docker-compose.compliance-china.yml \
-f docker-compose.sm-remote.yml up -d
```
**Algorithms:**
- SM2: Public key cryptography (GM/T 0003-2012)
- SM3: Hash function, 256-bit (GM/T 0004-2012)
- SM4: Block cipher, 128-bit (GM/T 0002-2012)
---
### Russia Compliance (GOST)
**For Testing (simulation):**
```bash
docker compose -f docker-compose.stella-ops.yml \
-f docker-compose.compliance-russia.yml \
-f docker-compose.crypto-sim.yml up -d
```
**For Production (CryptoPro CSP):**
```bash
# CryptoPro requires EULA acceptance
CRYPTOPRO_ACCEPT_EULA=1 docker compose -f docker-compose.stella-ops.yml \
-f docker-compose.compliance-russia.yml \
-f docker-compose.cryptopro.yml up -d
```
**Requirements for CryptoPro:**
- CryptoPro CSP license files in `opt/cryptopro/downloads/`
- `CRYPTOPRO_ACCEPT_EULA=1` environment variable
- Valid CryptoPro container images
**Algorithms:**
- GOST R 34.10-2012: Digital signature (256/512-bit)
- GOST R 34.11-2012: Hash function (Streebog, 256/512-bit)
- GOST R 34.12-2015: Block cipher (Kuznyechik, Magma)
---
### EU Compliance (eIDAS)
**For Testing (simulation):**
```bash
docker compose -f docker-compose.stella-ops.yml \
-f docker-compose.compliance-eu.yml \
-f docker-compose.crypto-sim.yml up -d
```
**For Production:**
EU eIDAS deployments typically integrate with external Qualified Trust Service Providers (QTSPs) rather than hosting crypto locally. Configure your QTSP integration in the application settings.
```bash
docker compose -f docker-compose.stella-ops.yml \
-f docker-compose.compliance-eu.yml up -d
```
**Standards:**
- ETSI TS 119 312 compliant algorithms
- Qualified electronic signatures
- QTSP integration for qualified trust services
---
## Crypto Simulation Details
The `docker-compose.crypto-sim.yml` overlay provides a unified simulation service for all sovereign crypto profiles:
| Algorithm ID | Simulation | Use Case |
|--------------|------------|----------|
| `SM2`, `sm.sim` | HMAC-SHA256 | China testing |
| `GOST12-256`, `GOST12-512` | HMAC-SHA256 | Russia testing |
| `ru.magma.sim`, `ru.kuznyechik.sim` | HMAC-SHA256 | Russia testing |
| `DILITHIUM3`, `FALCON512`, `pq.sim` | HMAC-SHA256 | Post-quantum testing |
| `fips.sim`, `eidas.sim`, `kcmvp.sim` | ECDSA P-256 | FIPS/EU/Korea testing |
**Important:** Simulation is for testing only. Uses deterministic HMAC or static ECDSA keys—not suitable for production or compliance certification.
---
## Configuration Reference
### Infrastructure Services
| Service | Default Port | Purpose |
|---------|--------------|---------|
| PostgreSQL | 5432 | Primary database |
| Valkey | 6379 | Cache, queues, events |
| RustFS | 8080 | S3-compatible artifact storage |
| Rekor v2 | (internal) | Sigstore transparency log |
### Application Services
| Service | Default Port | Purpose |
|---------|--------------|---------|
| Authority | 8440 | OAuth2/OIDC identity provider |
| Signer | 8441 | Cryptographic signing |
| Attestor | 8442 | SLSA attestation |
| Scanner Web | 8444 | SBOM/vulnerability scanning API |
| Concelier | 8445 | Advisory aggregation |
| Notify Web | 8446 | Notification service |
| Issuer Directory | 8447 | CSAF publisher registry |
| Advisory AI Web | 8448 | AI-powered advisory analysis |
| Web UI | 8443 | Angular frontend |
### Environment Variables
Key variables (see `env/*.env.example` for complete list):
```bash
# Database
POSTGRES_USER=stellaops
POSTGRES_PASSWORD=<secret>
POSTGRES_DB=stellaops_platform
# Authority
AUTHORITY_ISSUER=https://authority.example.com
# Scanner
SCANNER_EVENTS_ENABLED=false
SCANNER_OFFLINEKIT_ENABLED=false
# Crypto (for compliance overlays)
STELLAOPS_CRYPTO_PROFILE=default # or: china, russia, eu
STELLAOPS_CRYPTO_ENABLE_SIM=0 # set to 1 for simulation
# CryptoPro (Russia only)
CRYPTOPRO_ACCEPT_EULA=0 # must be 1 to use CryptoPro
# SM Remote (China only)
SM_SOFT_ALLOWED=1 # software-only SM2
SM_REMOTE_HSM_URL= # optional: OSCCA-certified HSM
```
---
## Networking
All profiles use a shared `stellaops` Docker network. Production deployments can attach a `frontdoor` network for reverse proxy integration:
```bash
# Create external network for load balancer
docker network create stellaops_frontdoor
# Set in environment
export FRONTDOOR_NETWORK=stellaops_frontdoor
```
Only externally-reachable services (Authority, Signer, Attestor, Concelier, Scanner Web, Notify Web, UI) attach to the frontdoor network. Infrastructure services (PostgreSQL, Valkey, RustFS) remain on the private network.
---
## Sigstore Tools
Enable Sigstore CLI tools (rekor-cli, cosign) with the `sigstore` profile:
```bash
docker compose -f docker-compose.stella-ops.yml --profile sigstore up -d
```
---
## GPU Support for Advisory AI
GPU is disabled by default. To enable NVIDIA GPU inference:
```bash
docker compose -f docker-compose.stella-ops.yml \
-f docker-compose.gpu.yaml up -d
```
**Requirements:**
- NVIDIA GPU with CUDA support
- nvidia-container-toolkit installed
- Docker configured with nvidia runtime
---
## Content Addressable Storage (CAS)
The CAS overlay provides dedicated RustFS instances with retention policies for different artifact types:
```bash
# Standalone CAS infrastructure
docker compose -f docker-compose.cas.yaml up -d
# Combined with main stack
docker compose -f docker-compose.stella-ops.yml \
-f docker-compose.cas.yaml up -d
```
**CAS Services:**
| Service | Port | Purpose |
|---------|------|---------|
| rustfs-cas | 8180 | Runtime facts, signals, replay artifacts |
| rustfs-evidence | 8181 | Merkle roots, hash chains, evidence bundles (immutable) |
| rustfs-attestation | 8182 | DSSE envelopes, in-toto attestations (immutable) |
**Retention Policies (configurable via `env/cas.env.example`):**
- Vulnerability DB: 7 days
- SBOM artifacts: 365 days
- Scan results: 90 days
- Evidence bundles: Indefinite (immutable)
- Attestations: Indefinite (immutable)
---
## Tile Proxy (Air-Gapped Sigstore)
For air-gapped deployments, the tile-proxy caches Rekor transparency log tiles locally from public Sigstore:
```bash
docker compose -f docker-compose.stella-ops.yml \
-f docker-compose.tile-proxy.yml up -d
```
**Tile Proxy vs Rekor v2:**
- Use `--profile sigstore` when running your own Rekor transparency log locally
- Use `docker-compose.tile-proxy.yml` when caching tiles from public Sigstore (rekor.sigstore.dev)
**Configuration:**
| Variable | Default | Purpose |
|----------|---------|---------|
| `REKOR_SERVER_URL` | `https://rekor.sigstore.dev` | Upstream Rekor to proxy |
| `TILE_PROXY_SYNC_ENABLED` | `true` | Enable periodic tile sync |
| `TILE_PROXY_SYNC_SCHEDULE` | `0 */6 * * *` | Sync every 6 hours |
| `TILE_PROXY_CACHE_MAX_SIZE_GB` | `10` | Local cache size limit |
The proxy syncs tiles on schedule and serves them to internal services for offline verification.
---
## Maintenance
### Backup
```bash
./scripts/backup.sh # Creates timestamped tar.gz of volumes
```
### Reset
```bash
./scripts/reset.sh # Stops stack, removes volumes (requires confirmation)
```
### Validate Configuration
```bash
docker compose -f docker-compose.stella-ops.yml config
```
### Update to New Release
1. Import new manifest to `deploy/releases/`
2. Update image digests in compose files
3. Run `docker compose config` to validate
4. Run `deploy/tools/validate-profiles.sh` for audit
---
## Troubleshooting
### Port Conflicts
Override ports in your `.env` file:
```bash
POSTGRES_PORT=5433
VALKEY_PORT=6380
SCANNER_WEB_PORT=8544
```
### Service Dependencies
Services declare `depends_on` with health checks. If a service fails to start, check its dependencies:
```bash
docker compose -f docker-compose.stella-ops.yml ps
docker compose -f docker-compose.stella-ops.yml logs postgres
docker compose -f docker-compose.stella-ops.yml logs valkey
```
### Crypto Provider Issues
For crypto simulation issues:
```bash
# Check sim-crypto service
docker compose logs sim-crypto
curl http://localhost:18090/keys
```
For CryptoPro issues:
```bash
# Verify EULA acceptance
echo $CRYPTOPRO_ACCEPT_EULA # must be 1
# Check CryptoPro service
docker compose logs cryptopro-csp
```
---
## Related Documentation
- [Deployment Upgrade Runbook](../../docs/operations/devops/runbooks/deployment-upgrade.md)
- [Local CI Guide](../../docs/technical/testing/LOCAL_CI_GUIDE.md)
- [Crypto Profile Configuration](../../docs/security/crypto-profile-configuration.md)
- [Regional Deployments](../../docs/operations/regional-deployments.md)

View File

@@ -1,73 +0,0 @@
# =============================================================================
# BSIM - BINARY SIMILARITY ANALYSIS
# =============================================================================
# BSim PostgreSQL Database and Ghidra Headless Services for binary analysis.
#
# Usage:
# docker compose -f docker-compose.bsim.yml up -d
#
# Environment:
# BSIM_DB_PASSWORD - PostgreSQL password for BSim database
# =============================================================================
services:
bsim-postgres:
image: postgres:18.1-alpine
container_name: stellaops-bsim-db
environment:
POSTGRES_DB: bsim_corpus
POSTGRES_USER: bsim_user
POSTGRES_PASSWORD: ${BSIM_DB_PASSWORD:-stellaops_bsim_dev}
POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C"
volumes:
- bsim-data:/var/lib/postgresql/data
- ../docker/ghidra/scripts/init-bsim.sql:/docker-entrypoint-initdb.d/10-init-bsim.sql:ro
ports:
- "${BSIM_DB_PORT:-5433}:5432"
networks:
- stellaops-bsim
healthcheck:
test: ["CMD-SHELL", "pg_isready -U bsim_user -d bsim_corpus"]
interval: 10s
timeout: 5s
retries: 5
restart: unless-stopped
ghidra-headless:
build:
context: ../docker/ghidra
dockerfile: Dockerfile.headless
image: stellaops/ghidra-headless:11.2
container_name: stellaops-ghidra
depends_on:
bsim-postgres:
condition: service_healthy
environment:
BSIM_DB_URL: "postgresql://bsim-postgres:5432/bsim_corpus"
BSIM_DB_USER: bsim_user
BSIM_DB_PASSWORD: ${BSIM_DB_PASSWORD:-stellaops_bsim_dev}
JAVA_HOME: /opt/java/openjdk
MAXMEM: 4G
volumes:
- ghidra-projects:/projects
- ghidra-scripts:/scripts
- ghidra-output:/output
networks:
- stellaops-bsim
deploy:
resources:
limits:
cpus: '4'
memory: 8G
entrypoint: ["tail", "-f", "/dev/null"]
restart: unless-stopped
volumes:
bsim-data:
ghidra-projects:
ghidra-scripts:
ghidra-output:
networks:
stellaops-bsim:
driver: bridge

View File

@@ -1,212 +0,0 @@
# Content Addressable Storage (CAS) Infrastructure
# Uses RustFS for S3-compatible immutable object storage
# Aligned with best-in-class vulnerability scanner retention policies
#
# Usage (standalone):
# docker compose -f docker-compose.cas.yaml up -d
#
# Usage (with main stack):
# docker compose -f docker-compose.stella-ops.yml -f docker-compose.cas.yaml up -d
x-release-labels: &release-labels
com.stellaops.release.version: "2025.10.0-edge"
com.stellaops.release.channel: "edge"
com.stellaops.profile: "cas"
x-cas-config: &cas-config
# Retention policies (aligned with Trivy/Grype/Anchore Enterprise)
# - vulnerability-db: 7 days (matches Trivy default)
# - sbom-artifacts: 365 days (audit compliance)
# - scan-results: 90 days (SOC2/ISO27001 typical)
# - evidence-bundles: indefinite (immutable, content-addressed)
# - attestations: indefinite (in-toto/DSSE signed)
CAS__RETENTION__VULNERABILITY_DB_DAYS: "7"
CAS__RETENTION__SBOM_ARTIFACTS_DAYS: "365"
CAS__RETENTION__SCAN_RESULTS_DAYS: "90"
CAS__RETENTION__EVIDENCE_BUNDLES_DAYS: "0" # 0 = indefinite
CAS__RETENTION__ATTESTATIONS_DAYS: "0" # 0 = indefinite
CAS__RETENTION__TEMP_ARTIFACTS_DAYS: "1"
networks:
cas:
driver: bridge
volumes:
rustfs-cas-data:
driver: local
driver_opts:
type: none
o: bind
device: ${CAS_DATA_PATH:-/var/lib/stellaops/cas}
rustfs-evidence-data:
driver: local
driver_opts:
type: none
o: bind
device: ${CAS_EVIDENCE_PATH:-/var/lib/stellaops/evidence}
rustfs-attestation-data:
driver: local
driver_opts:
type: none
o: bind
device: ${CAS_ATTESTATION_PATH:-/var/lib/stellaops/attestations}
services:
# Primary CAS storage - runtime facts, signals, replay artifacts
rustfs-cas:
image: registry.stella-ops.org/stellaops/rustfs:2025.09.2
command: ["serve", "--listen", "0.0.0.0:8080", "--root", "/data"]
restart: unless-stopped
environment:
RUSTFS__LOG__LEVEL: "${RUSTFS_LOG_LEVEL:-info}"
RUSTFS__STORAGE__PATH: /data
RUSTFS__STORAGE__DEDUP: "true"
RUSTFS__STORAGE__COMPRESSION: "${RUSTFS_COMPRESSION:-zstd}"
RUSTFS__STORAGE__COMPRESSION_LEVEL: "${RUSTFS_COMPRESSION_LEVEL:-3}"
# Bucket lifecycle (retention enforcement)
RUSTFS__LIFECYCLE__ENABLED: "true"
RUSTFS__LIFECYCLE__SCAN_INTERVAL_HOURS: "24"
RUSTFS__LIFECYCLE__DEFAULT_RETENTION_DAYS: "90"
# Access control
RUSTFS__AUTH__ENABLED: "${RUSTFS_AUTH_ENABLED:-true}"
RUSTFS__AUTH__API_KEY: "${RUSTFS_CAS_API_KEY:-cas-api-key-change-me}"
RUSTFS__AUTH__READONLY_KEY: "${RUSTFS_CAS_READONLY_KEY:-cas-readonly-key-change-me}"
# Service account configuration
RUSTFS__ACCOUNTS__SCANNER__KEY: "${RUSTFS_SCANNER_KEY:-scanner-svc-key}"
RUSTFS__ACCOUNTS__SCANNER__BUCKETS: "scanner-artifacts,surface-cache,runtime-facts"
RUSTFS__ACCOUNTS__SCANNER__PERMISSIONS: "read,write"
RUSTFS__ACCOUNTS__SIGNALS__KEY: "${RUSTFS_SIGNALS_KEY:-signals-svc-key}"
RUSTFS__ACCOUNTS__SIGNALS__BUCKETS: "runtime-facts,signals-data,provenance-feed"
RUSTFS__ACCOUNTS__SIGNALS__PERMISSIONS: "read,write"
RUSTFS__ACCOUNTS__REPLAY__KEY: "${RUSTFS_REPLAY_KEY:-replay-svc-key}"
RUSTFS__ACCOUNTS__REPLAY__BUCKETS: "replay-bundles,inputs-lock"
RUSTFS__ACCOUNTS__REPLAY__PERMISSIONS: "read,write"
RUSTFS__ACCOUNTS__READONLY__KEY: "${RUSTFS_READONLY_KEY:-readonly-svc-key}"
RUSTFS__ACCOUNTS__READONLY__BUCKETS: "*"
RUSTFS__ACCOUNTS__READONLY__PERMISSIONS: "read"
<<: *cas-config
volumes:
- rustfs-cas-data:/data
ports:
- "${RUSTFS_CAS_PORT:-8180}:8080"
networks:
- cas
labels: *release-labels
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
# Evidence storage - Merkle roots, hash chains, evidence bundles (immutable)
rustfs-evidence:
image: registry.stella-ops.org/stellaops/rustfs:2025.09.2
command: ["serve", "--listen", "0.0.0.0:8080", "--root", "/data", "--immutable"]
restart: unless-stopped
environment:
RUSTFS__LOG__LEVEL: "${RUSTFS_LOG_LEVEL:-info}"
RUSTFS__STORAGE__PATH: /data
RUSTFS__STORAGE__DEDUP: "true"
RUSTFS__STORAGE__COMPRESSION: "${RUSTFS_COMPRESSION:-zstd}"
RUSTFS__STORAGE__IMMUTABLE: "true" # Write-once, never delete
# Access control
RUSTFS__AUTH__ENABLED: "true"
RUSTFS__AUTH__API_KEY: "${RUSTFS_EVIDENCE_API_KEY:-evidence-api-key-change-me}"
RUSTFS__AUTH__READONLY_KEY: "${RUSTFS_EVIDENCE_READONLY_KEY:-evidence-readonly-key-change-me}"
# Service accounts
RUSTFS__ACCOUNTS__LEDGER__KEY: "${RUSTFS_LEDGER_KEY:-ledger-svc-key}"
RUSTFS__ACCOUNTS__LEDGER__BUCKETS: "evidence-bundles,merkle-roots,hash-chains"
RUSTFS__ACCOUNTS__LEDGER__PERMISSIONS: "read,write"
RUSTFS__ACCOUNTS__EXPORTER__KEY: "${RUSTFS_EXPORTER_KEY:-exporter-svc-key}"
RUSTFS__ACCOUNTS__EXPORTER__BUCKETS: "evidence-bundles"
RUSTFS__ACCOUNTS__EXPORTER__PERMISSIONS: "read"
volumes:
- rustfs-evidence-data:/data
ports:
- "${RUSTFS_EVIDENCE_PORT:-8181}:8080"
networks:
- cas
labels: *release-labels
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
# Attestation storage - DSSE envelopes, in-toto attestations (immutable)
rustfs-attestation:
image: registry.stella-ops.org/stellaops/rustfs:2025.09.2
command: ["serve", "--listen", "0.0.0.0:8080", "--root", "/data", "--immutable"]
restart: unless-stopped
environment:
RUSTFS__LOG__LEVEL: "${RUSTFS_LOG_LEVEL:-info}"
RUSTFS__STORAGE__PATH: /data
RUSTFS__STORAGE__DEDUP: "true"
RUSTFS__STORAGE__COMPRESSION: "${RUSTFS_COMPRESSION:-zstd}"
RUSTFS__STORAGE__IMMUTABLE: "true" # Write-once, never delete
# Access control
RUSTFS__AUTH__ENABLED: "true"
RUSTFS__AUTH__API_KEY: "${RUSTFS_ATTESTATION_API_KEY:-attestation-api-key-change-me}"
RUSTFS__AUTH__READONLY_KEY: "${RUSTFS_ATTESTATION_READONLY_KEY:-attestation-readonly-key-change-me}"
# Service accounts
RUSTFS__ACCOUNTS__ATTESTOR__KEY: "${RUSTFS_ATTESTOR_KEY:-attestor-svc-key}"
RUSTFS__ACCOUNTS__ATTESTOR__BUCKETS: "attestations,dsse-envelopes,rekor-receipts"
RUSTFS__ACCOUNTS__ATTESTOR__PERMISSIONS: "read,write"
RUSTFS__ACCOUNTS__VERIFIER__KEY: "${RUSTFS_VERIFIER_KEY:-verifier-svc-key}"
RUSTFS__ACCOUNTS__VERIFIER__BUCKETS: "attestations,dsse-envelopes,rekor-receipts"
RUSTFS__ACCOUNTS__VERIFIER__PERMISSIONS: "read"
volumes:
- rustfs-attestation-data:/data
ports:
- "${RUSTFS_ATTESTATION_PORT:-8182}:8080"
networks:
- cas
labels: *release-labels
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
rekor-cli:
image: ghcr.io/sigstore/rekor-cli:v1.4.3
entrypoint: ["rekor-cli"]
command: ["version"]
profiles: ["sigstore"]
networks:
- cas
labels: *release-labels
cosign:
image: ghcr.io/sigstore/cosign:v3.0.4
entrypoint: ["cosign"]
command: ["version"]
profiles: ["sigstore"]
networks:
- cas
labels: *release-labels
# Lifecycle manager - enforces retention policies
cas-lifecycle:
image: registry.stella-ops.org/stellaops/cas-lifecycle:2025.10.0-edge
restart: unless-stopped
depends_on:
rustfs-cas:
condition: service_healthy
environment:
LIFECYCLE__CAS__ENDPOINT: "http://rustfs-cas:8080"
LIFECYCLE__CAS__API_KEY: "${RUSTFS_CAS_API_KEY:-cas-api-key-change-me}"
LIFECYCLE__SCHEDULE__CRON: "${LIFECYCLE_CRON:-0 3 * * *}" # 3 AM daily
LIFECYCLE__POLICIES__VULNERABILITY_DB: "7d"
LIFECYCLE__POLICIES__SBOM_ARTIFACTS: "365d"
LIFECYCLE__POLICIES__SCAN_RESULTS: "90d"
LIFECYCLE__POLICIES__TEMP_ARTIFACTS: "1d"
LIFECYCLE__TELEMETRY__ENABLED: "${LIFECYCLE_TELEMETRY:-true}"
LIFECYCLE__TELEMETRY__OTLP_ENDPOINT: "${OTLP_ENDPOINT:-}"
networks:
- cas
labels: *release-labels

View File

@@ -1,197 +0,0 @@
# =============================================================================
# STELLA OPS - COMPLIANCE OVERLAY: CHINA
# =============================================================================
# SM2/SM3/SM4 ShangMi (Commercial Cipher) crypto overlay.
# This file extends docker-compose.stella-ops.yml with China-specific crypto.
#
# Usage:
# docker compose -f devops/compose/docker-compose.stella-ops.yml \
# -f devops/compose/docker-compose.compliance-china.yml up -d
#
# Cryptography:
# - SM2: Elliptic curve cryptography (signature, key exchange)
# - SM3: Hash function (256-bit digest)
# - SM4: Block cipher (128-bit)
#
# =============================================================================
x-crypto-env: &crypto-env
STELLAOPS_CRYPTO_PROFILE: "china"
STELLAOPS_CRYPTO_CONFIG_PATH: "/app/etc/appsettings.crypto.yaml"
STELLAOPS_CRYPTO_MANIFEST_PATH: "/app/etc/crypto-plugins-manifest.json"
x-crypto-volumes: &crypto-volumes
- ../../etc/appsettings.crypto.china.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
services:
# ---------------------------------------------------------------------------
# Authority - China crypto overlay
# ---------------------------------------------------------------------------
authority:
image: registry.stella-ops.org/stellaops/authority:china
environment:
<<: *crypto-env
volumes:
- ../../etc/authority:/app/etc/authority:ro
- ../../etc/certificates/trust-roots:/etc/ssl/certs/stellaops:ro
- ../../etc/appsettings.crypto.china.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "china"
# ---------------------------------------------------------------------------
# Signer - China crypto overlay
# ---------------------------------------------------------------------------
signer:
image: registry.stella-ops.org/stellaops/signer:china
environment:
<<: *crypto-env
volumes:
- ../../etc/appsettings.crypto.china.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "china"
# ---------------------------------------------------------------------------
# Attestor - China crypto overlay
# ---------------------------------------------------------------------------
attestor:
image: registry.stella-ops.org/stellaops/attestor:china
environment:
<<: *crypto-env
volumes:
- ../../etc/appsettings.crypto.china.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "china"
# ---------------------------------------------------------------------------
# Concelier - China crypto overlay
# ---------------------------------------------------------------------------
concelier:
image: registry.stella-ops.org/stellaops/concelier:china
environment:
<<: *crypto-env
volumes:
- concelier-jobs:/var/lib/concelier/jobs
- ../../etc/appsettings.crypto.china.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "china"
# ---------------------------------------------------------------------------
# Scanner Web - China crypto overlay
# ---------------------------------------------------------------------------
scanner-web:
image: registry.stella-ops.org/stellaops/scanner-web:china
environment:
<<: *crypto-env
volumes:
- ../../etc/scanner:/app/etc/scanner:ro
- ../../etc/certificates/trust-roots:/etc/ssl/certs/stellaops:ro
- scanner-surface-cache:/var/lib/stellaops/surface
- ${SURFACE_SECRETS_HOST_PATH:-./offline/surface-secrets}:${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}:ro
- ${SCANNER_OFFLINEKIT_TRUSTROOTS_HOST_PATH:-./offline/trust-roots}:${SCANNER_OFFLINEKIT_TRUSTROOTDIRECTORY:-/etc/stellaops/trust-roots}:ro
- ${SCANNER_OFFLINEKIT_REKOR_SNAPSHOT_HOST_PATH:-./offline/rekor-snapshot}:${SCANNER_OFFLINEKIT_REKORSNAPSHOTDIRECTORY:-/var/lib/stellaops/rekor-snapshot}:ro
- ../../etc/appsettings.crypto.china.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "china"
# ---------------------------------------------------------------------------
# Scanner Worker - China crypto overlay
# ---------------------------------------------------------------------------
scanner-worker:
image: registry.stella-ops.org/stellaops/scanner-worker:china
environment:
<<: *crypto-env
volumes:
- scanner-surface-cache:/var/lib/stellaops/surface
- ${SURFACE_SECRETS_HOST_PATH:-./offline/surface-secrets}:${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}:ro
- ../../etc/appsettings.crypto.china.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "china"
# ---------------------------------------------------------------------------
# Scheduler Worker - China crypto overlay
# ---------------------------------------------------------------------------
scheduler-worker:
image: registry.stella-ops.org/stellaops/scheduler-worker:china
environment:
<<: *crypto-env
volumes:
- ../../etc/appsettings.crypto.china.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "china"
# ---------------------------------------------------------------------------
# Notify Web - China crypto overlay
# ---------------------------------------------------------------------------
notify-web:
image: registry.stella-ops.org/stellaops/notify-web:china
environment:
<<: *crypto-env
volumes:
- ../../etc/notify:/app/etc/notify:ro
- ../../etc/appsettings.crypto.china.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "china"
# ---------------------------------------------------------------------------
# Excititor - China crypto overlay
# ---------------------------------------------------------------------------
excititor:
image: registry.stella-ops.org/stellaops/excititor:china
environment:
<<: *crypto-env
volumes:
- ../../etc/appsettings.crypto.china.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "china"
# ---------------------------------------------------------------------------
# Advisory AI Web - China crypto overlay
# ---------------------------------------------------------------------------
advisory-ai-web:
image: registry.stella-ops.org/stellaops/advisory-ai-web:china
environment:
<<: *crypto-env
volumes:
- ../../etc/llm-providers:/app/etc/llm-providers:ro
- advisory-ai-queue:/var/lib/advisory-ai/queue
- advisory-ai-plans:/var/lib/advisory-ai/plans
- advisory-ai-outputs:/var/lib/advisory-ai/outputs
- ../../etc/appsettings.crypto.china.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "china"
# ---------------------------------------------------------------------------
# Advisory AI Worker - China crypto overlay
# ---------------------------------------------------------------------------
advisory-ai-worker:
image: registry.stella-ops.org/stellaops/advisory-ai-worker:china
environment:
<<: *crypto-env
volumes:
- ../../etc/llm-providers:/app/etc/llm-providers:ro
- advisory-ai-queue:/var/lib/advisory-ai/queue
- advisory-ai-plans:/var/lib/advisory-ai/plans
- advisory-ai-outputs:/var/lib/advisory-ai/outputs
- ../../etc/appsettings.crypto.china.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "china"
# ---------------------------------------------------------------------------
# Web UI - China crypto overlay
# ---------------------------------------------------------------------------
web-ui:
image: registry.stella-ops.org/stellaops/web-ui:china
labels:
com.stellaops.crypto.profile: "china"

View File

@@ -1,209 +0,0 @@
# =============================================================================
# STELLA OPS - COMPLIANCE OVERLAY: EU
# =============================================================================
# eIDAS qualified trust services crypto overlay.
# This file extends docker-compose.stella-ops.yml with EU-specific crypto.
#
# Usage:
# docker compose -f devops/compose/docker-compose.stella-ops.yml \
# -f devops/compose/docker-compose.compliance-eu.yml up -d
#
# Cryptography:
# - eIDAS-compliant qualified electronic signatures
# - ETSI TS 119 312 compliant algorithms
# - Qualified Trust Service Provider (QTSP) integration
#
# =============================================================================
x-crypto-env: &crypto-env
STELLAOPS_CRYPTO_PROFILE: "eu"
STELLAOPS_CRYPTO_CONFIG_PATH: "/app/etc/appsettings.crypto.yaml"
STELLAOPS_CRYPTO_MANIFEST_PATH: "/app/etc/crypto-plugins-manifest.json"
x-crypto-volumes: &crypto-volumes
- ../../etc/appsettings.crypto.eu.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
services:
# ---------------------------------------------------------------------------
# Authority - EU crypto overlay
# ---------------------------------------------------------------------------
authority:
image: registry.stella-ops.org/stellaops/authority:eu
environment:
<<: *crypto-env
volumes:
- ../../etc/authority:/app/etc/authority:ro
- ../../etc/certificates/trust-roots:/etc/ssl/certs/stellaops:ro
- ../../etc/appsettings.crypto.eu.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "eu"
com.stellaops.compliance: "eidas"
# ---------------------------------------------------------------------------
# Signer - EU crypto overlay
# ---------------------------------------------------------------------------
signer:
image: registry.stella-ops.org/stellaops/signer:eu
environment:
<<: *crypto-env
volumes:
- ../../etc/appsettings.crypto.eu.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "eu"
com.stellaops.compliance: "eidas"
# ---------------------------------------------------------------------------
# Attestor - EU crypto overlay
# ---------------------------------------------------------------------------
attestor:
image: registry.stella-ops.org/stellaops/attestor:eu
environment:
<<: *crypto-env
volumes:
- ../../etc/appsettings.crypto.eu.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "eu"
com.stellaops.compliance: "eidas"
# ---------------------------------------------------------------------------
# Concelier - EU crypto overlay
# ---------------------------------------------------------------------------
concelier:
image: registry.stella-ops.org/stellaops/concelier:eu
environment:
<<: *crypto-env
volumes:
- concelier-jobs:/var/lib/concelier/jobs
- ../../etc/appsettings.crypto.eu.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "eu"
com.stellaops.compliance: "eidas"
# ---------------------------------------------------------------------------
# Scanner Web - EU crypto overlay
# ---------------------------------------------------------------------------
scanner-web:
image: registry.stella-ops.org/stellaops/scanner-web:eu
environment:
<<: *crypto-env
volumes:
- ../../etc/scanner:/app/etc/scanner:ro
- ../../etc/certificates/trust-roots:/etc/ssl/certs/stellaops:ro
- scanner-surface-cache:/var/lib/stellaops/surface
- ${SURFACE_SECRETS_HOST_PATH:-./offline/surface-secrets}:${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}:ro
- ${SCANNER_OFFLINEKIT_TRUSTROOTS_HOST_PATH:-./offline/trust-roots}:${SCANNER_OFFLINEKIT_TRUSTROOTDIRECTORY:-/etc/stellaops/trust-roots}:ro
- ${SCANNER_OFFLINEKIT_REKOR_SNAPSHOT_HOST_PATH:-./offline/rekor-snapshot}:${SCANNER_OFFLINEKIT_REKORSNAPSHOTDIRECTORY:-/var/lib/stellaops/rekor-snapshot}:ro
- ../../etc/appsettings.crypto.eu.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "eu"
com.stellaops.compliance: "eidas"
# ---------------------------------------------------------------------------
# Scanner Worker - EU crypto overlay
# ---------------------------------------------------------------------------
scanner-worker:
image: registry.stella-ops.org/stellaops/scanner-worker:eu
environment:
<<: *crypto-env
volumes:
- scanner-surface-cache:/var/lib/stellaops/surface
- ${SURFACE_SECRETS_HOST_PATH:-./offline/surface-secrets}:${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}:ro
- ../../etc/appsettings.crypto.eu.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "eu"
com.stellaops.compliance: "eidas"
# ---------------------------------------------------------------------------
# Scheduler Worker - EU crypto overlay
# ---------------------------------------------------------------------------
scheduler-worker:
image: registry.stella-ops.org/stellaops/scheduler-worker:eu
environment:
<<: *crypto-env
volumes:
- ../../etc/appsettings.crypto.eu.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "eu"
com.stellaops.compliance: "eidas"
# ---------------------------------------------------------------------------
# Notify Web - EU crypto overlay
# ---------------------------------------------------------------------------
notify-web:
image: registry.stella-ops.org/stellaops/notify-web:eu
environment:
<<: *crypto-env
volumes:
- ../../etc/notify:/app/etc/notify:ro
- ../../etc/appsettings.crypto.eu.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "eu"
com.stellaops.compliance: "eidas"
# ---------------------------------------------------------------------------
# Excititor - EU crypto overlay
# ---------------------------------------------------------------------------
excititor:
image: registry.stella-ops.org/stellaops/excititor:eu
environment:
<<: *crypto-env
volumes:
- ../../etc/appsettings.crypto.eu.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "eu"
com.stellaops.compliance: "eidas"
# ---------------------------------------------------------------------------
# Advisory AI Web - EU crypto overlay
# ---------------------------------------------------------------------------
advisory-ai-web:
image: registry.stella-ops.org/stellaops/advisory-ai-web:eu
environment:
<<: *crypto-env
volumes:
- ../../etc/llm-providers:/app/etc/llm-providers:ro
- advisory-ai-queue:/var/lib/advisory-ai/queue
- advisory-ai-plans:/var/lib/advisory-ai/plans
- advisory-ai-outputs:/var/lib/advisory-ai/outputs
- ../../etc/appsettings.crypto.eu.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "eu"
com.stellaops.compliance: "eidas"
# ---------------------------------------------------------------------------
# Advisory AI Worker - EU crypto overlay
# ---------------------------------------------------------------------------
advisory-ai-worker:
image: registry.stella-ops.org/stellaops/advisory-ai-worker:eu
environment:
<<: *crypto-env
volumes:
- ../../etc/llm-providers:/app/etc/llm-providers:ro
- advisory-ai-queue:/var/lib/advisory-ai/queue
- advisory-ai-plans:/var/lib/advisory-ai/plans
- advisory-ai-outputs:/var/lib/advisory-ai/outputs
- ../../etc/appsettings.crypto.eu.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "eu"
com.stellaops.compliance: "eidas"
# ---------------------------------------------------------------------------
# Web UI - EU crypto overlay
# ---------------------------------------------------------------------------
web-ui:
image: registry.stella-ops.org/stellaops/web-ui:eu
labels:
com.stellaops.crypto.profile: "eu"
com.stellaops.compliance: "eidas"

View File

@@ -1,216 +0,0 @@
# =============================================================================
# STELLA OPS - COMPLIANCE OVERLAY: RUSSIA
# =============================================================================
# GOST R 34.10-2012, GOST R 34.11-2012 (Streebog) crypto overlay.
# This file extends docker-compose.stella-ops.yml with Russia-specific crypto.
#
# Usage:
# docker compose -f devops/compose/docker-compose.stella-ops.yml \
# -f devops/compose/docker-compose.compliance-russia.yml up -d
#
# With CryptoPro CSP:
# docker compose -f devops/compose/docker-compose.stella-ops.yml \
# -f devops/compose/docker-compose.compliance-russia.yml \
# -f devops/compose/docker-compose.cryptopro.yml up -d
#
# Cryptography:
# - GOST R 34.10-2012: Digital signature
# - GOST R 34.11-2012: Hash function (Streebog, 256/512-bit)
# - GOST R 34.12-2015: Block cipher (Kuznyechik)
#
# Providers: openssl.gost, pkcs11.gost, cryptopro.gost
#
# =============================================================================
x-crypto-env: &crypto-env
STELLAOPS_CRYPTO_PROFILE: "russia"
STELLAOPS_CRYPTO_CONFIG_PATH: "/app/etc/appsettings.crypto.yaml"
STELLAOPS_CRYPTO_MANIFEST_PATH: "/app/etc/crypto-plugins-manifest.json"
STELLAOPS_CRYPTO_PROVIDERS: "openssl.gost,pkcs11.gost,cryptopro.gost"
x-crypto-volumes: &crypto-volumes
- ../../etc/appsettings.crypto.russia.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
services:
# ---------------------------------------------------------------------------
# Authority - Russia crypto overlay
# ---------------------------------------------------------------------------
authority:
image: registry.stella-ops.org/stellaops/authority:russia
environment:
<<: *crypto-env
volumes:
- ../../etc/authority:/app/etc/authority:ro
- ../../etc/certificates/trust-roots:/etc/ssl/certs/stellaops:ro
- ../../etc/appsettings.crypto.russia.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "russia"
com.stellaops.crypto.provider: "openssl.gost,pkcs11.gost,cryptopro.gost"
# ---------------------------------------------------------------------------
# Signer - Russia crypto overlay
# ---------------------------------------------------------------------------
signer:
image: registry.stella-ops.org/stellaops/signer:russia
environment:
<<: *crypto-env
volumes:
- ../../etc/appsettings.crypto.russia.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "russia"
com.stellaops.crypto.provider: "openssl.gost,pkcs11.gost,cryptopro.gost"
# ---------------------------------------------------------------------------
# Attestor - Russia crypto overlay
# ---------------------------------------------------------------------------
attestor:
image: registry.stella-ops.org/stellaops/attestor:russia
environment:
<<: *crypto-env
volumes:
- ../../etc/appsettings.crypto.russia.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "russia"
com.stellaops.crypto.provider: "openssl.gost,pkcs11.gost,cryptopro.gost"
# ---------------------------------------------------------------------------
# Concelier - Russia crypto overlay
# ---------------------------------------------------------------------------
concelier:
image: registry.stella-ops.org/stellaops/concelier:russia
environment:
<<: *crypto-env
volumes:
- concelier-jobs:/var/lib/concelier/jobs
- ../../etc/appsettings.crypto.russia.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "russia"
com.stellaops.crypto.provider: "openssl.gost,pkcs11.gost,cryptopro.gost"
# ---------------------------------------------------------------------------
# Scanner Web - Russia crypto overlay
# ---------------------------------------------------------------------------
scanner-web:
image: registry.stella-ops.org/stellaops/scanner-web:russia
environment:
<<: *crypto-env
volumes:
- ../../etc/scanner:/app/etc/scanner:ro
- ../../etc/certificates/trust-roots:/etc/ssl/certs/stellaops:ro
- scanner-surface-cache:/var/lib/stellaops/surface
- ${SURFACE_SECRETS_HOST_PATH:-./offline/surface-secrets}:${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}:ro
- ${SCANNER_OFFLINEKIT_TRUSTROOTS_HOST_PATH:-./offline/trust-roots}:${SCANNER_OFFLINEKIT_TRUSTROOTDIRECTORY:-/etc/stellaops/trust-roots}:ro
- ${SCANNER_OFFLINEKIT_REKOR_SNAPSHOT_HOST_PATH:-./offline/rekor-snapshot}:${SCANNER_OFFLINEKIT_REKORSNAPSHOTDIRECTORY:-/var/lib/stellaops/rekor-snapshot}:ro
- ../../etc/appsettings.crypto.russia.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "russia"
com.stellaops.crypto.provider: "openssl.gost,pkcs11.gost,cryptopro.gost"
# ---------------------------------------------------------------------------
# Scanner Worker - Russia crypto overlay
# ---------------------------------------------------------------------------
scanner-worker:
image: registry.stella-ops.org/stellaops/scanner-worker:russia
environment:
<<: *crypto-env
volumes:
- scanner-surface-cache:/var/lib/stellaops/surface
- ${SURFACE_SECRETS_HOST_PATH:-./offline/surface-secrets}:${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}:ro
- ../../etc/appsettings.crypto.russia.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "russia"
com.stellaops.crypto.provider: "openssl.gost,pkcs11.gost,cryptopro.gost"
# ---------------------------------------------------------------------------
# Scheduler Worker - Russia crypto overlay
# ---------------------------------------------------------------------------
scheduler-worker:
image: registry.stella-ops.org/stellaops/scheduler-worker:russia
environment:
<<: *crypto-env
volumes:
- ../../etc/appsettings.crypto.russia.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "russia"
com.stellaops.crypto.provider: "openssl.gost,pkcs11.gost,cryptopro.gost"
# ---------------------------------------------------------------------------
# Notify Web - Russia crypto overlay
# ---------------------------------------------------------------------------
notify-web:
image: registry.stella-ops.org/stellaops/notify-web:russia
environment:
<<: *crypto-env
volumes:
- ../../etc/notify:/app/etc/notify:ro
- ../../etc/appsettings.crypto.russia.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "russia"
com.stellaops.crypto.provider: "openssl.gost,pkcs11.gost,cryptopro.gost"
# ---------------------------------------------------------------------------
# Excititor - Russia crypto overlay
# ---------------------------------------------------------------------------
excititor:
image: registry.stella-ops.org/stellaops/excititor:russia
environment:
<<: *crypto-env
volumes:
- ../../etc/appsettings.crypto.russia.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "russia"
com.stellaops.crypto.provider: "openssl.gost,pkcs11.gost,cryptopro.gost"
# ---------------------------------------------------------------------------
# Advisory AI Web - Russia crypto overlay
# ---------------------------------------------------------------------------
advisory-ai-web:
image: registry.stella-ops.org/stellaops/advisory-ai-web:russia
environment:
<<: *crypto-env
volumes:
- ../../etc/llm-providers:/app/etc/llm-providers:ro
- advisory-ai-queue:/var/lib/advisory-ai/queue
- advisory-ai-plans:/var/lib/advisory-ai/plans
- advisory-ai-outputs:/var/lib/advisory-ai/outputs
- ../../etc/appsettings.crypto.russia.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "russia"
com.stellaops.crypto.provider: "openssl.gost,pkcs11.gost,cryptopro.gost"
# ---------------------------------------------------------------------------
# Advisory AI Worker - Russia crypto overlay
# ---------------------------------------------------------------------------
advisory-ai-worker:
image: registry.stella-ops.org/stellaops/advisory-ai-worker:russia
environment:
<<: *crypto-env
volumes:
- ../../etc/llm-providers:/app/etc/llm-providers:ro
- advisory-ai-queue:/var/lib/advisory-ai/queue
- advisory-ai-plans:/var/lib/advisory-ai/plans
- advisory-ai-outputs:/var/lib/advisory-ai/outputs
- ../../etc/appsettings.crypto.russia.yaml:/app/etc/appsettings.crypto.yaml:ro
- ../../etc/crypto-plugins-manifest.json:/app/etc/crypto-plugins-manifest.json:ro
labels:
com.stellaops.crypto.profile: "russia"
com.stellaops.crypto.provider: "openssl.gost,pkcs11.gost,cryptopro.gost"
# ---------------------------------------------------------------------------
# Web UI - Russia crypto overlay
# ---------------------------------------------------------------------------
web-ui:
image: registry.stella-ops.org/stellaops/web-ui:russia
labels:
com.stellaops.crypto.profile: "russia"

View File

@@ -1,42 +0,0 @@
# =============================================================================
# CORPUS - FUNCTION BEHAVIOR DATABASE
# =============================================================================
# PostgreSQL database for function behavior corpus analysis.
#
# Usage:
# docker compose -f docker-compose.corpus.yml up -d
#
# Environment:
# CORPUS_DB_PASSWORD - PostgreSQL password for corpus database
# =============================================================================
services:
corpus-postgres:
image: postgres:18.1-alpine
container_name: stellaops-corpus-db
environment:
POSTGRES_DB: stellaops_corpus
POSTGRES_USER: corpus_user
POSTGRES_PASSWORD: ${CORPUS_DB_PASSWORD:-stellaops_corpus_dev}
POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C"
volumes:
- corpus-data:/var/lib/postgresql/data
- ../../docs/db/schemas/corpus.sql:/docker-entrypoint-initdb.d/10-corpus-schema.sql:ro
- ../docker/corpus/scripts/init-test-data.sql:/docker-entrypoint-initdb.d/20-test-data.sql:ro
ports:
- "${CORPUS_DB_PORT:-5435}:5432"
networks:
- stellaops-corpus
healthcheck:
test: ["CMD-SHELL", "pg_isready -U corpus_user -d stellaops_corpus"]
interval: 10s
timeout: 5s
retries: 5
restart: unless-stopped
volumes:
corpus-data:
networks:
stellaops-corpus:
driver: bridge

View File

@@ -1,119 +0,0 @@
# =============================================================================
# STELLA OPS - CRYPTO SIMULATION OVERLAY
# =============================================================================
# Universal crypto simulation service for testing sovereign crypto without
# licensed hardware or certified modules.
#
# This overlay provides the sim-crypto-service which simulates:
# - GOST R 34.10-2012 (Russia): GOST12-256, GOST12-512, ru.magma.sim, ru.kuznyechik.sim
# - SM2/SM3/SM4 (China): SM2, sm.sim, sm2.sim
# - Post-Quantum: DILITHIUM3, FALCON512, pq.sim
# - FIPS/eIDAS/KCMVP: fips.sim, eidas.sim, kcmvp.sim, world.sim
#
# Usage with China compliance:
# docker compose -f docker-compose.stella-ops.yml \
# -f docker-compose.compliance-china.yml \
# -f docker-compose.crypto-sim.yml up -d
#
# Usage with Russia compliance:
# docker compose -f docker-compose.stella-ops.yml \
# -f docker-compose.compliance-russia.yml \
# -f docker-compose.crypto-sim.yml up -d
#
# Usage with EU compliance:
# docker compose -f docker-compose.stella-ops.yml \
# -f docker-compose.compliance-eu.yml \
# -f docker-compose.crypto-sim.yml up -d
#
# IMPORTANT: This is for TESTING/DEVELOPMENT ONLY.
# - Uses deterministic HMAC-SHA256 for SM/GOST/PQ (not real algorithms)
# - Uses static ECDSA P-256 key for FIPS/eIDAS/KCMVP
# - NOT suitable for production or compliance certification
#
# =============================================================================
x-crypto-sim-labels: &crypto-sim-labels
com.stellaops.component: "crypto-sim"
com.stellaops.profile: "simulation"
com.stellaops.production: "false"
x-sim-crypto-env: &sim-crypto-env
STELLAOPS_CRYPTO_ENABLE_SIM: "1"
STELLAOPS_CRYPTO_SIM_URL: "http://sim-crypto:8080"
networks:
stellaops:
external: true
name: stellaops
services:
# ---------------------------------------------------------------------------
# Sim Crypto Service - Universal sovereign crypto simulator
# ---------------------------------------------------------------------------
sim-crypto:
build:
context: ../services/crypto/sim-crypto-service
dockerfile: Dockerfile
image: registry.stella-ops.org/stellaops/sim-crypto:dev
container_name: stellaops-sim-crypto
restart: unless-stopped
environment:
ASPNETCORE_URLS: "http://0.0.0.0:8080"
ASPNETCORE_ENVIRONMENT: "Development"
ports:
- "${SIM_CRYPTO_PORT:-18090}:8080"
networks:
- stellaops
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/keys"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
labels: *crypto-sim-labels
# ---------------------------------------------------------------------------
# Override services to use sim-crypto
# ---------------------------------------------------------------------------
# Authority - Enable sim crypto
authority:
environment:
<<: *sim-crypto-env
labels:
com.stellaops.crypto.simulator: "enabled"
# Signer - Enable sim crypto
signer:
environment:
<<: *sim-crypto-env
labels:
com.stellaops.crypto.simulator: "enabled"
# Attestor - Enable sim crypto
attestor:
environment:
<<: *sim-crypto-env
labels:
com.stellaops.crypto.simulator: "enabled"
# Scanner Web - Enable sim crypto
scanner-web:
environment:
<<: *sim-crypto-env
labels:
com.stellaops.crypto.simulator: "enabled"
# Scanner Worker - Enable sim crypto
scanner-worker:
environment:
<<: *sim-crypto-env
labels:
com.stellaops.crypto.simulator: "enabled"
# Excititor - Enable sim crypto
excititor:
environment:
<<: *sim-crypto-env
labels:
com.stellaops.crypto.simulator: "enabled"

View File

@@ -1,149 +0,0 @@
# =============================================================================
# STELLA OPS - CRYPTOPRO CSP OVERLAY (Russia)
# =============================================================================
# CryptoPro CSP licensed provider overlay for compliance-russia.yml.
# Adds real CryptoPro CSP service for certified GOST R 34.10-2012 operations.
#
# IMPORTANT: Requires EULA acceptance before use.
#
# Usage (MUST be combined with stella-ops AND compliance-russia):
# CRYPTOPRO_ACCEPT_EULA=1 docker compose \
# -f docker-compose.stella-ops.yml \
# -f docker-compose.compliance-russia.yml \
# -f docker-compose.cryptopro.yml up -d
#
# For development/testing without CryptoPro license, use crypto-sim.yml instead:
# docker compose \
# -f docker-compose.stella-ops.yml \
# -f docker-compose.compliance-russia.yml \
# -f docker-compose.crypto-sim.yml up -d
#
# Requirements:
# - CryptoPro CSP license files in opt/cryptopro/downloads/
# - CRYPTOPRO_ACCEPT_EULA=1 environment variable
# - CryptoPro container images with GOST engine
#
# GOST Algorithms Provided:
# - GOST R 34.10-2012: Digital signature (256/512-bit)
# - GOST R 34.11-2012: Hash function (Streebog, 256/512-bit)
# - GOST R 34.12-2015: Block cipher (Kuznyechik, Magma)
#
# =============================================================================
x-cryptopro-labels: &cryptopro-labels
com.stellaops.component: "cryptopro-csp"
com.stellaops.crypto.provider: "cryptopro"
com.stellaops.crypto.profile: "russia"
com.stellaops.crypto.certified: "true"
x-cryptopro-env: &cryptopro-env
STELLAOPS_CRYPTO_PROVIDERS: "cryptopro.gost"
STELLAOPS_CRYPTO_CRYPTOPRO_URL: "http://cryptopro-csp:8080"
STELLAOPS_CRYPTO_CRYPTOPRO_ENABLED: "true"
networks:
stellaops:
external: true
name: stellaops
services:
# ---------------------------------------------------------------------------
# CryptoPro CSP - Certified GOST cryptography provider
# ---------------------------------------------------------------------------
cryptopro-csp:
build:
context: ../..
dockerfile: devops/services/cryptopro/linux-csp-service/Dockerfile
args:
CRYPTOPRO_ACCEPT_EULA: "${CRYPTOPRO_ACCEPT_EULA:-0}"
image: registry.stella-ops.org/stellaops/cryptopro-csp:2025.10.0
container_name: stellaops-cryptopro-csp
restart: unless-stopped
environment:
ASPNETCORE_URLS: "http://0.0.0.0:8080"
CRYPTOPRO_ACCEPT_EULA: "${CRYPTOPRO_ACCEPT_EULA:-0}"
# GOST algorithm configuration
CRYPTOPRO_GOST_SIGNATURE_ALGORITHM: "GOST R 34.10-2012"
CRYPTOPRO_GOST_HASH_ALGORITHM: "GOST R 34.11-2012"
# Container and key store settings
CRYPTOPRO_CONTAINER_NAME: "${CRYPTOPRO_CONTAINER_NAME:-stellaops-signing}"
CRYPTOPRO_USE_MACHINE_STORE: "${CRYPTOPRO_USE_MACHINE_STORE:-true}"
CRYPTOPRO_PROVIDER_TYPE: "${CRYPTOPRO_PROVIDER_TYPE:-80}"
volumes:
- ../../opt/cryptopro/downloads:/opt/cryptopro/downloads:ro
- ../../etc/cryptopro:/app/etc/cryptopro:ro
# Optional: Mount key containers
- cryptopro-keys:/var/opt/cprocsp/keys
ports:
- "${CRYPTOPRO_PORT:-18080}:8080"
networks:
- stellaops
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
labels: *cryptopro-labels
# ---------------------------------------------------------------------------
# Override services to use CryptoPro
# ---------------------------------------------------------------------------
# Authority - Use CryptoPro for GOST signatures
authority:
environment:
<<: *cryptopro-env
depends_on:
- cryptopro-csp
labels:
com.stellaops.crypto.provider: "cryptopro"
# Signer - Use CryptoPro for GOST signatures
signer:
environment:
<<: *cryptopro-env
depends_on:
- cryptopro-csp
labels:
com.stellaops.crypto.provider: "cryptopro"
# Attestor - Use CryptoPro for GOST signatures
attestor:
environment:
<<: *cryptopro-env
depends_on:
- cryptopro-csp
labels:
com.stellaops.crypto.provider: "cryptopro"
# Scanner Web - Use CryptoPro for verification
scanner-web:
environment:
<<: *cryptopro-env
depends_on:
- cryptopro-csp
labels:
com.stellaops.crypto.provider: "cryptopro"
# Scanner Worker - Use CryptoPro for verification
scanner-worker:
environment:
<<: *cryptopro-env
depends_on:
- cryptopro-csp
labels:
com.stellaops.crypto.provider: "cryptopro"
# Excititor - Use CryptoPro for VEX signing
excititor:
environment:
<<: *cryptopro-env
depends_on:
- cryptopro-csp
labels:
com.stellaops.crypto.provider: "cryptopro"
volumes:
cryptopro-keys:
name: stellaops-cryptopro-keys

View File

@@ -1,73 +0,0 @@
# =============================================================================
# DEVELOPMENT STACK - MINIMAL LOCAL DEVELOPMENT
# =============================================================================
# Minimal infrastructure for local development. Use this when you only need
# the core infrastructure without all application services.
#
# For full platform, use docker-compose.stella-ops.yml instead.
#
# Usage:
# docker compose -f docker-compose.dev.yml up -d
#
# This provides:
# - PostgreSQL 18.1 on port 5432
# - Valkey 9.0.1 on port 6379
# - RustFS on port 8080
# =============================================================================
services:
postgres:
image: postgres:18.1-alpine
container_name: stellaops-dev-postgres
restart: unless-stopped
environment:
POSTGRES_USER: ${POSTGRES_USER:-stellaops}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-stellaops}
POSTGRES_DB: ${POSTGRES_DB:-stellaops_dev}
volumes:
- postgres-data:/var/lib/postgresql/data
ports:
- "${POSTGRES_PORT:-5432}:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-stellaops}"]
interval: 10s
timeout: 5s
retries: 5
valkey:
image: valkey/valkey:9.0.1-alpine
container_name: stellaops-dev-valkey
restart: unless-stopped
command: ["valkey-server", "--appendonly", "yes"]
volumes:
- valkey-data:/data
ports:
- "${VALKEY_PORT:-6379}:6379"
healthcheck:
test: ["CMD", "valkey-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
rustfs:
image: registry.stella-ops.org/stellaops/rustfs:2025.09.2
container_name: stellaops-dev-rustfs
restart: unless-stopped
command: ["serve", "--listen", "0.0.0.0:8080", "--root", "/data"]
environment:
RUSTFS__LOG__LEVEL: info
RUSTFS__STORAGE__PATH: /data
volumes:
- rustfs-data:/data
ports:
- "${RUSTFS_PORT:-8080}:8080"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
volumes:
postgres-data:
valkey-data:
rustfs-data:

View File

@@ -1,40 +0,0 @@
# =============================================================================
# STELLA OPS GPU OVERLAY
# =============================================================================
# Enables NVIDIA GPU acceleration for Advisory AI inference services.
#
# Prerequisites:
# - NVIDIA GPU with CUDA support
# - nvidia-container-toolkit installed
# - Docker configured with nvidia runtime
#
# Usage:
# docker compose -f docker-compose.stella-ops.yml \
# -f docker-compose.gpu.yaml up -d
#
# =============================================================================
services:
advisory-ai-worker:
deploy:
resources:
reservations:
devices:
- capabilities: [gpu]
driver: nvidia
count: 1
environment:
ADVISORY_AI_INFERENCE_GPU: "true"
runtime: nvidia
advisory-ai-web:
deploy:
resources:
reservations:
devices:
- capabilities: [gpu]
driver: nvidia
count: 1
environment:
ADVISORY_AI_INFERENCE_GPU: "true"
runtime: nvidia

View File

@@ -1,121 +0,0 @@
# =============================================================================
# SEALED CI - AIR-GAPPED TESTING ENVIRONMENT
# =============================================================================
# Sealed/air-gapped CI environment for testing offline functionality.
# All services run in isolated network with no external egress.
#
# Usage:
# docker compose -f docker-compose.sealed-ci.yml up -d
# =============================================================================
x-release-labels: &release-labels
com.stellaops.profile: 'sealed-ci'
com.stellaops.airgap.mode: 'sealed'
networks:
sealed-ci:
driver: bridge
volumes:
sealed-postgres-data:
sealed-valkey-data:
services:
postgres:
image: docker.io/library/postgres@sha256:8e97b8526ed19304b144f7478bc9201646acf0723cdc6e4b19bc9eb34879a27e
restart: unless-stopped
environment:
POSTGRES_USER: sealedci
POSTGRES_PASSWORD: sealedci-secret
POSTGRES_DB: stellaops
volumes:
- sealed-postgres-data:/var/lib/postgresql/data
networks:
- sealed-ci
healthcheck:
test: ["CMD-SHELL", "pg_isready -U sealedci -d stellaops"]
interval: 10s
timeout: 5s
retries: 5
labels: *release-labels
valkey:
image: docker.io/valkey/valkey:9.0.1-alpine
restart: unless-stopped
command: ["valkey-server", "--appendonly", "yes"]
volumes:
- sealed-valkey-data:/data
networks:
- sealed-ci
healthcheck:
test: ["CMD", "valkey-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
labels: *release-labels
authority:
image: registry.stella-ops.org/stellaops/authority@sha256:a8e8faec44a579aa5714e58be835f25575710430b1ad2ccd1282a018cd9ffcdd
depends_on:
postgres:
condition: service_healthy
valkey:
condition: service_healthy
restart: unless-stopped
environment:
ASPNETCORE_URLS: http://+:5088
STELLAOPS_AUTHORITY__ISSUER: http://authority.sealed-ci.local
STELLAOPS_AUTHORITY__STORAGE__DRIVER: postgres
STELLAOPS_AUTHORITY__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=postgres;Port=5432;Database=authority;Username=sealedci;Password=sealedci-secret"
STELLAOPS_AUTHORITY__CACHE__REDIS__CONNECTIONSTRING: "valkey:6379"
STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0: /app/plugins
STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY: /app/plugins
STELLAOPS_AUTHORITY__SECURITY__SENDERCONSTRAINTS__DPOP__ENABLED: 'true'
STELLAOPS_AUTHORITY__SECURITY__SENDERCONSTRAINTS__MTLS__ENABLED: 'true'
STELLAOPS_AUTHORITY__AIRGAP__EGRESS__MODE: Sealed
volumes:
- ../services/sealed-mode-ci/authority.harness.yaml:/etc/authority.yaml:ro
- ../services/sealed-mode-ci/plugins:/app/plugins:ro
- ../../certificates:/certificates:ro
ports:
- '5088:5088'
networks:
- sealed-ci
labels: *release-labels
signer:
image: registry.stella-ops.org/stellaops/signer@sha256:8bfef9a75783883d49fc18e3566553934e970b00ee090abee9cb110d2d5c3298
depends_on:
- authority
restart: unless-stopped
environment:
ASPNETCORE_URLS: http://+:6088
SIGNER__AUTHORITY__BASEURL: http://authority:5088
SIGNER__POE__INTROSPECTURL: http://authority:5088/device-code
SIGNER__STORAGE__DRIVER: postgres
SIGNER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=postgres;Port=5432;Database=signer;Username=sealedci;Password=sealedci-secret"
SIGNER__CACHE__REDIS__CONNECTIONSTRING: "valkey:6379"
SIGNER__SEALED__MODE: Enabled
ports:
- '6088:6088'
networks:
- sealed-ci
labels: *release-labels
attestor:
image: registry.stella-ops.org/stellaops/attestor@sha256:5cc417948c029da01dccf36e4645d961a3f6d8de7e62fe98d845f07cd2282114
depends_on:
- signer
restart: unless-stopped
environment:
ASPNETCORE_URLS: http://+:7088
ATTESTOR__SIGNER__BASEURL: http://signer:6088
ATTESTOR__STORAGE__DRIVER: postgres
ATTESTOR__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=postgres;Port=5432;Database=attestor;Username=sealedci;Password=sealedci-secret"
ATTESTOR__CACHE__REDIS__CONNECTIONSTRING: "valkey:6379"
ATTESTOR__SEALED__MODE: Enabled
ports:
- '7088:7088'
networks:
- sealed-ci
labels: *release-labels

View File

@@ -1,153 +0,0 @@
# =============================================================================
# STELLA OPS - SM REMOTE OVERLAY (China)
# =============================================================================
# SM Remote service overlay for compliance-china.yml.
# Provides SM2/SM3/SM4 (ShangMi) cryptographic operations via software provider
# or integration with OSCCA-certified hardware security modules.
#
# Usage (MUST be combined with stella-ops AND compliance-china):
# docker compose \
# -f docker-compose.stella-ops.yml \
# -f docker-compose.compliance-china.yml \
# -f docker-compose.sm-remote.yml up -d
#
# For development/testing without SM hardware, use crypto-sim.yml instead:
# docker compose \
# -f docker-compose.stella-ops.yml \
# -f docker-compose.compliance-china.yml \
# -f docker-compose.crypto-sim.yml up -d
#
# SM Algorithms Provided:
# - SM2: Public key cryptography (ECDSA-like, 256-bit curve) - GM/T 0003-2012
# - SM3: Cryptographic hash function (256-bit output) - GM/T 0004-2012
# - SM4: Block cipher (128-bit key/block, AES-like) - GM/T 0002-2012
# - SM9: Identity-based cryptography - GM/T 0044-2016
#
# Providers:
# - cn.sm.soft: Software-only implementation using BouncyCastle
# - cn.sm.remote.http: Remote HSM integration via HTTP API
#
# OSCCA Compliance:
# - All cryptographic operations use SM algorithms exclusively
# - Hardware Security Modules should be OSCCA-certified
# - Certificates comply with GM/T 0015 (Certificate Profile)
#
# =============================================================================
x-sm-remote-labels: &sm-remote-labels
com.stellaops.component: "sm-remote"
com.stellaops.crypto.provider: "sm"
com.stellaops.crypto.profile: "china"
com.stellaops.crypto.jurisdiction: "china"
x-sm-remote-env: &sm-remote-env
STELLAOPS_CRYPTO_PROVIDERS: "cn.sm.soft,cn.sm.remote.http"
STELLAOPS_CRYPTO_SM_REMOTE_URL: "http://sm-remote:56080"
STELLAOPS_CRYPTO_SM_ENABLED: "true"
SM_SOFT_ALLOWED: "1"
networks:
stellaops:
external: true
name: stellaops
services:
# ---------------------------------------------------------------------------
# SM Remote Service - ShangMi cryptography provider
# ---------------------------------------------------------------------------
sm-remote:
build:
context: ../..
dockerfile: devops/services/sm-remote/Dockerfile
image: registry.stella-ops.org/stellaops/sm-remote:2025.10.0
container_name: stellaops-sm-remote
restart: unless-stopped
environment:
ASPNETCORE_URLS: "http://0.0.0.0:56080"
ASPNETCORE_ENVIRONMENT: "Production"
# Enable software-only SM2 provider (for testing/development)
SM_SOFT_ALLOWED: "${SM_SOFT_ALLOWED:-1}"
# Optional: Remote HSM configuration (for production with OSCCA-certified HSM)
SM_REMOTE_HSM_URL: "${SM_REMOTE_HSM_URL:-}"
SM_REMOTE_HSM_API_KEY: "${SM_REMOTE_HSM_API_KEY:-}"
SM_REMOTE_HSM_TIMEOUT: "${SM_REMOTE_HSM_TIMEOUT:-30000}"
# Optional: Client certificate authentication for HSM
SM_REMOTE_CLIENT_CERT_PATH: "${SM_REMOTE_CLIENT_CERT_PATH:-}"
SM_REMOTE_CLIENT_CERT_PASSWORD: "${SM_REMOTE_CLIENT_CERT_PASSWORD:-}"
volumes:
- ../../etc/sm-remote:/app/etc/sm-remote:ro
# Optional: Mount SM key containers
- sm-remote-keys:/var/lib/stellaops/sm-keys
ports:
- "${SM_REMOTE_PORT:-56080}:56080"
networks:
- stellaops
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:56080/status"]
interval: 30s
timeout: 10s
retries: 3
start_period: 15s
labels: *sm-remote-labels
# ---------------------------------------------------------------------------
# Override services to use SM Remote
# ---------------------------------------------------------------------------
# Authority - Use SM Remote for SM2 signatures
authority:
environment:
<<: *sm-remote-env
depends_on:
- sm-remote
labels:
com.stellaops.crypto.provider: "sm"
# Signer - Use SM Remote for SM2 signatures
signer:
environment:
<<: *sm-remote-env
depends_on:
- sm-remote
labels:
com.stellaops.crypto.provider: "sm"
# Attestor - Use SM Remote for SM2 signatures
attestor:
environment:
<<: *sm-remote-env
depends_on:
- sm-remote
labels:
com.stellaops.crypto.provider: "sm"
# Scanner Web - Use SM Remote for verification
scanner-web:
environment:
<<: *sm-remote-env
depends_on:
- sm-remote
labels:
com.stellaops.crypto.provider: "sm"
# Scanner Worker - Use SM Remote for verification
scanner-worker:
environment:
<<: *sm-remote-env
depends_on:
- sm-remote
labels:
com.stellaops.crypto.provider: "sm"
# Excititor - Use SM Remote for VEX signing
excititor:
environment:
<<: *sm-remote-env
depends_on:
- sm-remote
labels:
com.stellaops.crypto.provider: "sm"
volumes:
sm-remote-keys:
name: stellaops-sm-remote-keys

View File

@@ -1,90 +0,0 @@
# =============================================================================
# TELEMETRY OFFLINE - AIR-GAPPED OBSERVABILITY
# =============================================================================
# Offline-compatible telemetry stack for air-gapped deployments.
# Does not require external connectivity.
#
# Usage:
# docker compose -f docker-compose.telemetry-offline.yml up -d
#
# For online deployments, use docker-compose.telemetry.yml instead.
# =============================================================================
services:
loki:
image: grafana/loki:3.0.1
container_name: stellaops-loki-offline
command: ["-config.file=/etc/loki/local-config.yaml"]
volumes:
- loki-data:/loki
- ../offline/airgap/observability/loki-config.yaml:/etc/loki/local-config.yaml:ro
ports:
- "${LOKI_PORT:-3100}:3100"
networks:
- sealed
restart: unless-stopped
promtail:
image: grafana/promtail:3.0.1
container_name: stellaops-promtail-offline
command: ["-config.file=/etc/promtail/config.yml"]
volumes:
- promtail-data:/var/log
- ../offline/airgap/promtail-config.yaml:/etc/promtail/config.yml:ro
networks:
- sealed
restart: unless-stopped
otel-collector:
image: otel/opentelemetry-collector-contrib:0.97.0
container_name: stellaops-otel-offline
command: ["--config=/etc/otel/config.yaml"]
volumes:
- ../offline/airgap/otel-offline.yaml:/etc/otel/config.yaml:ro
- otel-data:/var/otel
ports:
- "${OTEL_GRPC_PORT:-4317}:4317"
- "${OTEL_HTTP_PORT:-4318}:4318"
networks:
- sealed
restart: unless-stopped
tempo:
image: grafana/tempo:2.4.1
container_name: stellaops-tempo-offline
command: ["-config.file=/etc/tempo/config.yaml"]
volumes:
- tempo-data:/var/tempo
- ../offline/airgap/observability/tempo-config.yaml:/etc/tempo/config.yaml:ro
ports:
- "${TEMPO_PORT:-3200}:3200"
networks:
- sealed
restart: unless-stopped
prometheus:
image: prom/prometheus:v2.51.0
container_name: stellaops-prometheus-offline
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--storage.tsdb.retention.time=15d'
volumes:
- prometheus-data:/prometheus
- ../offline/airgap/observability/prometheus.yml:/etc/prometheus/prometheus.yml:ro
ports:
- "${PROMETHEUS_PORT:-9090}:9090"
networks:
- sealed
restart: unless-stopped
networks:
sealed:
driver: bridge
volumes:
loki-data:
promtail-data:
otel-data:
tempo-data:
prometheus-data:

View File

@@ -1,144 +0,0 @@
# =============================================================================
# STELLA OPS - TELEMETRY STACK
# =============================================================================
# All-in-one observability: OpenTelemetry Collector, Prometheus, Tempo, Loki
#
# Usage:
# docker compose -f devops/compose/docker-compose.telemetry.yml up -d
#
# With main stack:
# docker compose -f devops/compose/docker-compose.stella-ops.yml \
# -f devops/compose/docker-compose.telemetry.yml up -d
#
# =============================================================================
x-telemetry-labels: &telemetry-labels
com.stellaops.component: "telemetry"
com.stellaops.profile: "observability"
networks:
stellaops-telemetry:
driver: bridge
name: stellaops-telemetry
stellaops:
external: true
name: stellaops
volumes:
prometheus-data:
tempo-data:
loki-data:
services:
# ---------------------------------------------------------------------------
# OpenTelemetry Collector - Unified telemetry ingestion
# ---------------------------------------------------------------------------
otel-collector:
image: otel/opentelemetry-collector:0.105.0
container_name: stellaops-otel-collector
restart: unless-stopped
command:
- "--config=/etc/otel-collector/config.yaml"
environment:
STELLAOPS_OTEL_TLS_CERT: /etc/otel-collector/tls/collector.crt
STELLAOPS_OTEL_TLS_KEY: /etc/otel-collector/tls/collector.key
STELLAOPS_OTEL_TLS_CA: /etc/otel-collector/tls/ca.crt
STELLAOPS_OTEL_PROMETHEUS_ENDPOINT: 0.0.0.0:9464
STELLAOPS_OTEL_REQUIRE_CLIENT_CERT: "true"
STELLAOPS_TENANT_ID: ${STELLAOPS_TENANT_ID:-default}
STELLAOPS_TEMPO_ENDPOINT: http://tempo:3200
STELLAOPS_TEMPO_TLS_CERT_FILE: /etc/otel-collector/tls/client.crt
STELLAOPS_TEMPO_TLS_KEY_FILE: /etc/otel-collector/tls/client.key
STELLAOPS_TEMPO_TLS_CA_FILE: /etc/otel-collector/tls/ca.crt
STELLAOPS_LOKI_ENDPOINT: http://loki:3100/loki/api/v1/push
STELLAOPS_LOKI_TLS_CERT_FILE: /etc/otel-collector/tls/client.crt
STELLAOPS_LOKI_TLS_KEY_FILE: /etc/otel-collector/tls/client.key
STELLAOPS_LOKI_TLS_CA_FILE: /etc/otel-collector/tls/ca.crt
volumes:
- ../telemetry/otel-collector-config.yaml:/etc/otel-collector/config.yaml:ro
- ../telemetry/certs:/etc/otel-collector/tls:ro
ports:
- "${OTEL_GRPC_PORT:-4317}:4317" # OTLP gRPC
- "${OTEL_HTTP_PORT:-4318}:4318" # OTLP HTTP
- "${OTEL_PROMETHEUS_PORT:-9464}:9464" # Prometheus exporter
- "${OTEL_HEALTH_PORT:-13133}:13133" # Health check
- "${OTEL_PPROF_PORT:-1777}:1777" # pprof
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:13133/healthz"]
interval: 30s
start_period: 15s
timeout: 5s
retries: 3
networks:
- stellaops-telemetry
- stellaops
labels: *telemetry-labels
# ---------------------------------------------------------------------------
# Prometheus - Metrics storage
# ---------------------------------------------------------------------------
prometheus:
image: prom/prometheus:v2.53.0
container_name: stellaops-prometheus
restart: unless-stopped
command:
- "--config.file=/etc/prometheus/prometheus.yaml"
- "--storage.tsdb.path=/prometheus"
- "--storage.tsdb.retention.time=${PROMETHEUS_RETENTION:-15d}"
- "--web.enable-lifecycle"
volumes:
- ../telemetry/storage/prometheus.yaml:/etc/prometheus/prometheus.yaml:ro
- prometheus-data:/prometheus
- ../telemetry/certs:/etc/telemetry/tls:ro
- ../telemetry/storage/auth:/etc/telemetry/auth:ro
environment:
PROMETHEUS_COLLECTOR_TARGET: otel-collector:9464
ports:
- "${PROMETHEUS_PORT:-9090}:9090"
depends_on:
- otel-collector
networks:
- stellaops-telemetry
labels: *telemetry-labels
# ---------------------------------------------------------------------------
# Tempo - Distributed tracing backend
# ---------------------------------------------------------------------------
tempo:
image: grafana/tempo:2.5.0
container_name: stellaops-tempo
restart: unless-stopped
command:
- "-config.file=/etc/tempo/tempo.yaml"
volumes:
- ../telemetry/storage/tempo.yaml:/etc/tempo/tempo.yaml:ro
- ../telemetry/storage/tenants/tempo-overrides.yaml:/etc/telemetry/tenants/tempo-overrides.yaml:ro
- ../telemetry/certs:/etc/telemetry/tls:ro
- tempo-data:/var/tempo
environment:
TEMPO_ZONE: docker
ports:
- "${TEMPO_PORT:-3200}:3200"
networks:
- stellaops-telemetry
labels: *telemetry-labels
# ---------------------------------------------------------------------------
# Loki - Log aggregation
# ---------------------------------------------------------------------------
loki:
image: grafana/loki:3.1.0
container_name: stellaops-loki
restart: unless-stopped
command:
- "-config.file=/etc/loki/loki.yaml"
volumes:
- ../telemetry/storage/loki.yaml:/etc/loki/loki.yaml:ro
- ../telemetry/storage/tenants/loki-overrides.yaml:/etc/telemetry/tenants/loki-overrides.yaml:ro
- ../telemetry/certs:/etc/telemetry/tls:ro
- loki-data:/var/loki
ports:
- "${LOKI_PORT:-3100}:3100"
networks:
- stellaops-telemetry
labels: *telemetry-labels

View File

@@ -1,327 +0,0 @@
# =============================================================================
# STELLA OPS - TESTING STACK
# =============================================================================
# Consolidated CI, mock services, and Gitea for integration testing.
# Uses different ports to avoid conflicts with development/production services.
#
# Usage:
# docker compose -f devops/compose/docker-compose.testing.yml up -d
#
# CI infrastructure only:
# docker compose -f devops/compose/docker-compose.testing.yml --profile ci up -d
#
# Mock services only:
# docker compose -f devops/compose/docker-compose.testing.yml --profile mock up -d
#
# Gitea only:
# docker compose -f devops/compose/docker-compose.testing.yml --profile gitea up -d
#
# =============================================================================
x-testing-labels: &testing-labels
com.stellaops.profile: "testing"
com.stellaops.environment: "ci"
networks:
testing-net:
driver: bridge
name: stellaops-testing
volumes:
# CI volumes
ci-postgres-data:
name: stellaops-ci-postgres
ci-valkey-data:
name: stellaops-ci-valkey
ci-rustfs-data:
name: stellaops-ci-rustfs
# Gitea volumes
gitea-data:
gitea-config:
services:
# ===========================================================================
# CI INFRASTRUCTURE (different ports to avoid conflicts)
# ===========================================================================
# ---------------------------------------------------------------------------
# PostgreSQL 18.1 - Test database (port 5433)
# ---------------------------------------------------------------------------
postgres-test:
image: postgres:18.1-alpine
container_name: stellaops-postgres-test
profiles: ["ci", "all"]
environment:
POSTGRES_USER: stellaops_ci
POSTGRES_PASSWORD: ci_test_password
POSTGRES_DB: stellaops_test
POSTGRES_INITDB_ARGS: "--data-checksums"
ports:
- "${TEST_POSTGRES_PORT:-5433}:5432"
volumes:
- ci-postgres-data:/var/lib/postgresql/data
networks:
- testing-net
healthcheck:
test: ["CMD-SHELL", "pg_isready -U stellaops_ci -d stellaops_test"]
interval: 5s
timeout: 5s
retries: 10
start_period: 10s
restart: unless-stopped
labels: *testing-labels
# ---------------------------------------------------------------------------
# Valkey 9.0.1 - Test cache/queue (port 6380)
# ---------------------------------------------------------------------------
valkey-test:
image: valkey/valkey:9.0.1-alpine
container_name: stellaops-valkey-test
profiles: ["ci", "all"]
command: ["valkey-server", "--appendonly", "yes", "--maxmemory", "256mb", "--maxmemory-policy", "allkeys-lru"]
ports:
- "${TEST_VALKEY_PORT:-6380}:6379"
volumes:
- ci-valkey-data:/data
networks:
- testing-net
healthcheck:
test: ["CMD", "valkey-cli", "ping"]
interval: 5s
timeout: 5s
retries: 5
restart: unless-stopped
labels: *testing-labels
# ---------------------------------------------------------------------------
# RustFS - Test artifact storage (port 8180)
# ---------------------------------------------------------------------------
rustfs-test:
image: registry.stella-ops.org/stellaops/rustfs:2025.09.2
container_name: stellaops-rustfs-test
profiles: ["ci", "all"]
command: ["serve", "--listen", "0.0.0.0:8080", "--root", "/data"]
environment:
RUSTFS__LOG__LEVEL: info
RUSTFS__STORAGE__PATH: /data
ports:
- "${TEST_RUSTFS_PORT:-8180}:8080"
volumes:
- ci-rustfs-data:/data
networks:
- testing-net
restart: unless-stopped
labels: *testing-labels
# ---------------------------------------------------------------------------
# Mock Container Registry (port 5001)
# ---------------------------------------------------------------------------
mock-registry:
image: registry:2
container_name: stellaops-registry-test
profiles: ["ci", "all"]
ports:
- "${TEST_REGISTRY_PORT:-5001}:5000"
environment:
REGISTRY_STORAGE_DELETE_ENABLED: "true"
networks:
- testing-net
restart: unless-stopped
labels: *testing-labels
# ---------------------------------------------------------------------------
# Sigstore CLI tools (on-demand)
# ---------------------------------------------------------------------------
rekor-cli:
image: ghcr.io/sigstore/rekor-cli:v1.4.3
entrypoint: ["rekor-cli"]
command: ["version"]
profiles: ["sigstore"]
networks:
- testing-net
labels: *testing-labels
cosign:
image: ghcr.io/sigstore/cosign:v3.0.4
entrypoint: ["cosign"]
command: ["version"]
profiles: ["sigstore"]
networks:
- testing-net
labels: *testing-labels
# ===========================================================================
# MOCK SERVICES (for extended integration testing)
# ===========================================================================
# ---------------------------------------------------------------------------
# Orchestrator mock
# ---------------------------------------------------------------------------
orchestrator:
image: registry.stella-ops.org/stellaops/orchestrator@sha256:97f12856ce870bafd3328bda86833bcccbf56d255941d804966b5557f6610119
container_name: stellaops-orchestrator-mock
profiles: ["mock", "all"]
command: ["dotnet", "StellaOps.Orchestrator.WebService.dll"]
depends_on:
- postgres-test
- valkey-test
environment:
ORCHESTRATOR__STORAGE__DRIVER: "postgres"
ORCHESTRATOR__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=postgres-test;Port=5432;Database=stellaops_test;Username=stellaops_ci;Password=ci_test_password"
ORCHESTRATOR__QUEUE__DRIVER: "valkey"
ORCHESTRATOR__QUEUE__VALKEY__URL: "valkey-test:6379"
networks:
- testing-net
labels: *testing-labels
# ---------------------------------------------------------------------------
# Policy Registry mock
# ---------------------------------------------------------------------------
policy-registry:
image: registry.stella-ops.org/stellaops/policy-registry@sha256:c6cad8055e9827ebcbebb6ad4d6866dce4b83a0a49b0a8a6500b736a5cb26fa7
container_name: stellaops-policy-registry-mock
profiles: ["mock", "all"]
command: ["dotnet", "StellaOps.Policy.Engine.dll"]
depends_on:
- postgres-test
environment:
POLICY__STORAGE__DRIVER: "postgres"
POLICY__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=postgres-test;Port=5432;Database=stellaops_test;Username=stellaops_ci;Password=ci_test_password"
networks:
- testing-net
labels: *testing-labels
# ---------------------------------------------------------------------------
# VEX Lens mock
# ---------------------------------------------------------------------------
vex-lens:
image: registry.stella-ops.org/stellaops/vex-lens@sha256:b44e63ecfeebc345a70c073c1ce5ace709c58be0ffaad0e2862758aeee3092fb
container_name: stellaops-vex-lens-mock
profiles: ["mock", "all"]
command: ["dotnet", "StellaOps.VexLens.dll"]
depends_on:
- postgres-test
environment:
VEXLENS__STORAGE__DRIVER: "postgres"
VEXLENS__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=postgres-test;Port=5432;Database=stellaops_test;Username=stellaops_ci;Password=ci_test_password"
networks:
- testing-net
labels: *testing-labels
# ---------------------------------------------------------------------------
# Findings Ledger mock
# ---------------------------------------------------------------------------
findings-ledger:
image: registry.stella-ops.org/stellaops/findings-ledger@sha256:71d4c361ba8b2f8b69d652597bc3f2efc8a64f93fab854ce25272a88506df49c
container_name: stellaops-findings-ledger-mock
profiles: ["mock", "all"]
command: ["dotnet", "StellaOps.Findings.Ledger.WebService.dll"]
depends_on:
- postgres-test
environment:
FINDINGSLEDGER__STORAGE__DRIVER: "postgres"
FINDINGSLEDGER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=postgres-test;Port=5432;Database=stellaops_test;Username=stellaops_ci;Password=ci_test_password"
networks:
- testing-net
labels: *testing-labels
# ---------------------------------------------------------------------------
# Vuln Explorer API mock
# ---------------------------------------------------------------------------
vuln-explorer-api:
image: registry.stella-ops.org/stellaops/vuln-explorer-api@sha256:7fc7e43a05cbeb0106ce7d4d634612e83de6fdc119aaab754a71c1d60b82841d
container_name: stellaops-vuln-explorer-mock
profiles: ["mock", "all"]
command: ["dotnet", "StellaOps.VulnExplorer.Api.dll"]
depends_on:
- findings-ledger
networks:
- testing-net
labels: *testing-labels
# ---------------------------------------------------------------------------
# Packs Registry mock
# ---------------------------------------------------------------------------
packs-registry:
image: registry.stella-ops.org/stellaops/packs-registry@sha256:1f5e9416c4dc608594ad6fad87c24d72134427f899c192b494e22b268499c791
container_name: stellaops-packs-registry-mock
profiles: ["mock", "all"]
command: ["dotnet", "StellaOps.PacksRegistry.dll"]
depends_on:
- postgres-test
environment:
PACKSREGISTRY__STORAGE__DRIVER: "postgres"
PACKSREGISTRY__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=postgres-test;Port=5432;Database=stellaops_test;Username=stellaops_ci;Password=ci_test_password"
networks:
- testing-net
labels: *testing-labels
# ---------------------------------------------------------------------------
# Task Runner mock
# ---------------------------------------------------------------------------
task-runner:
image: registry.stella-ops.org/stellaops/task-runner@sha256:eb5ad992b49a41554f41516be1a6afcfa6522faf2111c08ff2b3664ad2fc954b
container_name: stellaops-task-runner-mock
profiles: ["mock", "all"]
command: ["dotnet", "StellaOps.TaskRunner.WebService.dll"]
depends_on:
- packs-registry
- postgres-test
environment:
TASKRUNNER__STORAGE__DRIVER: "postgres"
TASKRUNNER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=postgres-test;Port=5432;Database=stellaops_test;Username=stellaops_ci;Password=ci_test_password"
networks:
- testing-net
labels: *testing-labels
# ===========================================================================
# GITEA (SCM integration testing)
# ===========================================================================
# ---------------------------------------------------------------------------
# Gitea - Git hosting with package registry
# ---------------------------------------------------------------------------
gitea:
image: gitea/gitea:1.21
container_name: stellaops-gitea-test
profiles: ["gitea", "all"]
environment:
- USER_UID=1000
- USER_GID=1000
# Enable package registry
- GITEA__packages__ENABLED=true
- GITEA__packages__CHUNKED_UPLOAD_PATH=/data/tmp/package-upload
# Enable NuGet
- GITEA__packages__NUGET_ENABLED=true
# Enable Container registry
- GITEA__packages__CONTAINER_ENABLED=true
# Database (SQLite for simplicity)
- GITEA__database__DB_TYPE=sqlite3
- GITEA__database__PATH=/data/gitea/gitea.db
# Server config
- GITEA__server__ROOT_URL=http://localhost:${TEST_GITEA_PORT:-3000}/
- GITEA__server__HTTP_PORT=3000
# Disable metrics/telemetry
- GITEA__metrics__ENABLED=false
# Session config
- GITEA__session__PROVIDER=memory
# Cache config
- GITEA__cache__ADAPTER=memory
# Log level
- GITEA__log__LEVEL=Warn
volumes:
- gitea-data:/data
- gitea-config:/etc/gitea
ports:
- "${TEST_GITEA_PORT:-3000}:3000"
- "${TEST_GITEA_SSH_PORT:-3022}:22"
networks:
- testing-net
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/api/healthz"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
labels: *testing-labels

View File

@@ -1,80 +0,0 @@
# =============================================================================
# STELLA OPS TILE PROXY OVERLAY
# =============================================================================
# Rekor tile caching proxy for air-gapped and offline deployments.
# Caches tiles from upstream Rekor (public Sigstore or private) locally.
#
# Use Cases:
# - Air-gapped deployments with periodic sync
# - Reduce latency by caching frequently-accessed tiles
# - Offline verification when upstream is unavailable
#
# Note: This is an ALTERNATIVE to running your own rekor-v2 instance.
# Use tile-proxy when you want to cache from public Sigstore.
# Use rekor-v2 (--profile sigstore) when running your own transparency log.
#
# Usage:
# docker compose -f docker-compose.stella-ops.yml \
# -f docker-compose.tile-proxy.yml up -d
#
# =============================================================================
x-release-labels: &release-labels
com.stellaops.release.version: "2025.10.0"
com.stellaops.release.channel: "stable"
com.stellaops.component: "tile-proxy"
volumes:
tile-cache:
driver: local
tuf-cache:
driver: local
services:
tile-proxy:
build:
context: ../..
dockerfile: src/Attestor/StellaOps.Attestor.TileProxy/Dockerfile
image: registry.stella-ops.org/stellaops/tile-proxy:2025.10.0
container_name: stellaops-tile-proxy
restart: unless-stopped
ports:
- "${TILE_PROXY_PORT:-8090}:8080"
volumes:
- tile-cache:/var/cache/stellaops/tiles
- tuf-cache:/var/cache/stellaops/tuf
environment:
# Upstream Rekor configuration
TILE_PROXY__UPSTREAMURL: "${REKOR_SERVER_URL:-https://rekor.sigstore.dev}"
TILE_PROXY__ORIGIN: "${REKOR_ORIGIN:-rekor.sigstore.dev - 1985497715}"
# TUF configuration (optional - for checkpoint signature validation)
TILE_PROXY__TUF__ENABLED: "${TILE_PROXY_TUF_ENABLED:-false}"
TILE_PROXY__TUF__URL: "${TILE_PROXY_TUF_ROOT_URL:-}"
TILE_PROXY__TUF__VALIDATECHECKPOINTSIGNATURE: "${TILE_PROXY_TUF_VALIDATE_CHECKPOINT:-true}"
# Cache configuration
TILE_PROXY__CACHE__BASEPATH: /var/cache/stellaops/tiles
TILE_PROXY__CACHE__MAXSIZEGB: "${TILE_PROXY_CACHE_MAX_SIZE_GB:-10}"
TILE_PROXY__CACHE__CHECKPOINTTTLMINUTES: "${TILE_PROXY_CHECKPOINT_TTL_MINUTES:-5}"
# Sync job configuration (for air-gapped pre-fetching)
TILE_PROXY__SYNC__ENABLED: "${TILE_PROXY_SYNC_ENABLED:-true}"
TILE_PROXY__SYNC__SCHEDULE: "${TILE_PROXY_SYNC_SCHEDULE:-0 */6 * * *}"
TILE_PROXY__SYNC__DEPTH: "${TILE_PROXY_SYNC_DEPTH:-10000}"
# Request handling
TILE_PROXY__REQUEST__COALESCINGENABLED: "${TILE_PROXY_COALESCING_ENABLED:-true}"
TILE_PROXY__REQUEST__TIMEOUTSECONDS: "${TILE_PROXY_REQUEST_TIMEOUT_SECONDS:-30}"
# Logging
Serilog__MinimumLevel__Default: "${TILE_PROXY_LOG_LEVEL:-Information}"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/_admin/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 5s
networks:
- stellaops
labels: *release-labels

View File

@@ -1,118 +0,0 @@
# CAS (Content Addressable Storage) Environment Configuration
# Copy to .env and customize for your deployment
#
# Aligned with best-in-class vulnerability scanner retention policies:
# - Trivy: 7 days vulnerability DB
# - Grype: 5 days DB, configurable
# - Anchore Enterprise: 90-365 days typical
# - Snyk Enterprise: 365 days
# =============================================================================
# DATA PATHS (ensure directories exist with proper permissions)
# =============================================================================
CAS_DATA_PATH=/var/lib/stellaops/cas
CAS_EVIDENCE_PATH=/var/lib/stellaops/evidence
CAS_ATTESTATION_PATH=/var/lib/stellaops/attestations
# =============================================================================
# RUSTFS CONFIGURATION
# =============================================================================
RUSTFS_LOG_LEVEL=info
RUSTFS_COMPRESSION=zstd
RUSTFS_COMPRESSION_LEVEL=3
# =============================================================================
# PORTS
# =============================================================================
RUSTFS_CAS_PORT=8180
RUSTFS_EVIDENCE_PORT=8181
RUSTFS_ATTESTATION_PORT=8182
# =============================================================================
# ACCESS CONTROL - API KEYS
# IMPORTANT: Change these in production!
# =============================================================================
# CAS Storage (mutable, lifecycle-managed)
RUSTFS_CAS_API_KEY=cas-api-key-CHANGE-IN-PRODUCTION
RUSTFS_CAS_READONLY_KEY=cas-readonly-key-CHANGE-IN-PRODUCTION
# Evidence Storage (immutable)
RUSTFS_EVIDENCE_API_KEY=evidence-api-key-CHANGE-IN-PRODUCTION
RUSTFS_EVIDENCE_READONLY_KEY=evidence-readonly-key-CHANGE-IN-PRODUCTION
# Attestation Storage (immutable)
RUSTFS_ATTESTATION_API_KEY=attestation-api-key-CHANGE-IN-PRODUCTION
RUSTFS_ATTESTATION_READONLY_KEY=attestation-readonly-key-CHANGE-IN-PRODUCTION
# =============================================================================
# SERVICE ACCOUNT KEYS
# Each service has its own key for fine-grained access control
# IMPORTANT: Generate unique keys per environment!
# =============================================================================
# Scanner service - access to scanner artifacts, surface cache, runtime facts
RUSTFS_SCANNER_KEY=scanner-svc-key-GENERATE-UNIQUE
# Bucket access: scanner-artifacts (rw), surface-cache (rw), runtime-facts (rw)
# Signals service - access to runtime facts, signals data, provenance feed
RUSTFS_SIGNALS_KEY=signals-svc-key-GENERATE-UNIQUE
# Bucket access: runtime-facts (rw), signals-data (rw), provenance-feed (rw)
# Replay service - access to replay bundles, inputs lock files
RUSTFS_REPLAY_KEY=replay-svc-key-GENERATE-UNIQUE
# Bucket access: replay-bundles (rw), inputs-lock (rw)
# Ledger service - access to evidence bundles, merkle roots, hash chains
RUSTFS_LEDGER_KEY=ledger-svc-key-GENERATE-UNIQUE
# Bucket access: evidence-bundles (rw), merkle-roots (rw), hash-chains (rw)
# Exporter service - read-only access to evidence bundles
RUSTFS_EXPORTER_KEY=exporter-svc-key-GENERATE-UNIQUE
# Bucket access: evidence-bundles (r)
# Attestor service - access to attestations, DSSE envelopes, Rekor receipts
RUSTFS_ATTESTOR_KEY=attestor-svc-key-GENERATE-UNIQUE
# Bucket access: attestations (rw), dsse-envelopes (rw), rekor-receipts (rw)
# Verifier service - read-only access to attestations
RUSTFS_VERIFIER_KEY=verifier-svc-key-GENERATE-UNIQUE
# Bucket access: attestations (r), dsse-envelopes (r), rekor-receipts (r)
# Global read-only key (for debugging/auditing)
RUSTFS_READONLY_KEY=readonly-global-key-GENERATE-UNIQUE
# Bucket access: * (r)
# =============================================================================
# LIFECYCLE MANAGEMENT
# =============================================================================
# Cron schedule for retention policy enforcement (default: 3 AM daily)
LIFECYCLE_CRON=0 3 * * *
LIFECYCLE_TELEMETRY=true
# =============================================================================
# RETENTION POLICIES (days, 0 = indefinite)
# Aligned with enterprise vulnerability scanner best practices
# =============================================================================
# Vulnerability DB: 7 days (matches Trivy default, Grype uses 5)
CAS_RETENTION_VULNERABILITY_DB_DAYS=7
# SBOM artifacts: 365 days (audit compliance - SOC2, ISO27001, FedRAMP)
CAS_RETENTION_SBOM_ARTIFACTS_DAYS=365
# Scan results: 90 days (common compliance window)
CAS_RETENTION_SCAN_RESULTS_DAYS=90
# Evidence bundles: indefinite (content-addressed, immutable, audit trail)
CAS_RETENTION_EVIDENCE_BUNDLES_DAYS=0
# Attestations: indefinite (signed, immutable, verifiable)
CAS_RETENTION_ATTESTATIONS_DAYS=0
# Temporary artifacts: 1 day (work-in-progress, intermediate files)
CAS_RETENTION_TEMP_ARTIFACTS_DAYS=1
# =============================================================================
# TELEMETRY (optional)
# =============================================================================
OTLP_ENDPOINT=

View File

@@ -1,48 +0,0 @@
# =============================================================================
# STELLA OPS CHINA COMPLIANCE ENVIRONMENT
# =============================================================================
# Environment template for China (SM2/SM3/SM4) compliance deployments.
#
# Usage with simulation:
# cp env/compliance-china.env.example .env
# docker compose -f docker-compose.stella-ops.yml \
# -f docker-compose.compliance-china.yml \
# -f docker-compose.crypto-sim.yml up -d
#
# Usage with SM Remote (production):
# docker compose -f docker-compose.stella-ops.yml \
# -f docker-compose.compliance-china.yml \
# -f docker-compose.sm-remote.yml up -d
#
# =============================================================================
# Crypto profile
STELLAOPS_CRYPTO_PROFILE=china
# =============================================================================
# SM REMOTE SERVICE CONFIGURATION
# =============================================================================
SM_REMOTE_PORT=56080
# Software-only SM2 provider (for testing/development)
SM_SOFT_ALLOWED=1
# OSCCA-certified HSM configuration (for production)
# Set these when using a certified hardware security module
SM_REMOTE_HSM_URL=
SM_REMOTE_HSM_API_KEY=
SM_REMOTE_HSM_TIMEOUT=30000
# Client certificate authentication for HSM (optional)
SM_REMOTE_CLIENT_CERT_PATH=
SM_REMOTE_CLIENT_CERT_PASSWORD=
# =============================================================================
# CRYPTO SIMULATION (for testing only)
# =============================================================================
# Enable simulation mode
STELLAOPS_CRYPTO_ENABLE_SIM=1
STELLAOPS_CRYPTO_SIM_URL=http://sim-crypto:8080
SIM_CRYPTO_PORT=18090

View File

@@ -1,40 +0,0 @@
# =============================================================================
# STELLA OPS EU COMPLIANCE ENVIRONMENT
# =============================================================================
# Environment template for EU (eIDAS) compliance deployments.
#
# Usage with simulation:
# cp env/compliance-eu.env.example .env
# docker compose -f docker-compose.stella-ops.yml \
# -f docker-compose.compliance-eu.yml \
# -f docker-compose.crypto-sim.yml up -d
#
# Usage for production:
# docker compose -f docker-compose.stella-ops.yml \
# -f docker-compose.compliance-eu.yml up -d
#
# Note: EU eIDAS deployments typically integrate with external Qualified Trust
# Service Providers (QTSPs) rather than hosting crypto locally.
#
# =============================================================================
# Crypto profile
STELLAOPS_CRYPTO_PROFILE=eu
# =============================================================================
# eIDAS / QTSP CONFIGURATION
# =============================================================================
# Qualified Trust Service Provider integration (configure in application settings)
# EIDAS_QTSP_URL=https://qtsp.example.eu
# EIDAS_QTSP_CLIENT_ID=
# EIDAS_QTSP_CLIENT_SECRET=
# =============================================================================
# CRYPTO SIMULATION (for testing only)
# =============================================================================
# Enable simulation mode
STELLAOPS_CRYPTO_ENABLE_SIM=1
STELLAOPS_CRYPTO_SIM_URL=http://sim-crypto:8080
SIM_CRYPTO_PORT=18090

View File

@@ -1,51 +0,0 @@
# =============================================================================
# STELLA OPS RUSSIA COMPLIANCE ENVIRONMENT
# =============================================================================
# Environment template for Russia (GOST R 34.10-2012) compliance deployments.
#
# Usage with simulation:
# cp env/compliance-russia.env.example .env
# docker compose -f docker-compose.stella-ops.yml \
# -f docker-compose.compliance-russia.yml \
# -f docker-compose.crypto-sim.yml up -d
#
# Usage with CryptoPro CSP (production):
# CRYPTOPRO_ACCEPT_EULA=1 docker compose -f docker-compose.stella-ops.yml \
# -f docker-compose.compliance-russia.yml \
# -f docker-compose.cryptopro.yml up -d
#
# =============================================================================
# Crypto profile
STELLAOPS_CRYPTO_PROFILE=russia
# =============================================================================
# CRYPTOPRO CSP CONFIGURATION
# =============================================================================
CRYPTOPRO_PORT=18080
# IMPORTANT: Set to 1 to accept CryptoPro EULA (required for production)
CRYPTOPRO_ACCEPT_EULA=0
# CryptoPro container settings
CRYPTOPRO_CONTAINER_NAME=stellaops-signing
CRYPTOPRO_USE_MACHINE_STORE=true
CRYPTOPRO_PROVIDER_TYPE=80
# =============================================================================
# GOST ALGORITHM CONFIGURATION
# =============================================================================
# Default GOST algorithms
CRYPTOPRO_GOST_SIGNATURE_ALGORITHM=GOST R 34.10-2012
CRYPTOPRO_GOST_HASH_ALGORITHM=GOST R 34.11-2012
# =============================================================================
# CRYPTO SIMULATION (for testing only)
# =============================================================================
# Enable simulation mode
STELLAOPS_CRYPTO_ENABLE_SIM=1
STELLAOPS_CRYPTO_SIM_URL=http://sim-crypto:8080
SIM_CRYPTO_PORT=18090

View File

@@ -1,171 +0,0 @@
# =============================================================================
# STELLA OPS ENVIRONMENT CONFIGURATION
# =============================================================================
# Main environment template for docker-compose.stella-ops.yml
# Copy to .env and customize for your deployment.
#
# Usage:
# cp env/stellaops.env.example .env
# docker compose -f docker-compose.stella-ops.yml up -d
#
# =============================================================================
# =============================================================================
# INFRASTRUCTURE
# =============================================================================
# PostgreSQL Database
POSTGRES_USER=stellaops
POSTGRES_PASSWORD=REPLACE_WITH_STRONG_PASSWORD
POSTGRES_DB=stellaops_platform
POSTGRES_PORT=5432
# Valkey (Redis-compatible cache and messaging)
VALKEY_PORT=6379
# RustFS Object Storage
RUSTFS_HTTP_PORT=8080
# =============================================================================
# CORE SERVICES
# =============================================================================
# Authority (OAuth2/OIDC)
AUTHORITY_ISSUER=https://authority.example.com
AUTHORITY_PORT=8440
AUTHORITY_OFFLINE_CACHE_TOLERANCE=00:30:00
# Signer
SIGNER_POE_INTROSPECT_URL=https://licensing.example.com/introspect
SIGNER_PORT=8441
# Attestor
ATTESTOR_PORT=8442
# Issuer Directory
ISSUER_DIRECTORY_PORT=8447
ISSUER_DIRECTORY_SEED_CSAF=true
# Concelier
CONCELIER_PORT=8445
# Notify
NOTIFY_WEB_PORT=8446
# Web UI
UI_PORT=8443
# =============================================================================
# SCANNER CONFIGURATION
# =============================================================================
SCANNER_WEB_PORT=8444
# Queue configuration (Valkey only - NATS removed)
SCANNER__QUEUE__BROKER=valkey://valkey:6379
# Event streaming
SCANNER_EVENTS_ENABLED=false
SCANNER_EVENTS_DRIVER=valkey
SCANNER_EVENTS_DSN=valkey:6379
SCANNER_EVENTS_STREAM=stella.events
SCANNER_EVENTS_PUBLISH_TIMEOUT_SECONDS=5
SCANNER_EVENTS_MAX_STREAM_LENGTH=10000
# Surface cache configuration
SCANNER_SURFACE_FS_ENDPOINT=http://rustfs:8080
SCANNER_SURFACE_FS_BUCKET=surface-cache
SCANNER_SURFACE_CACHE_ROOT=/var/lib/stellaops/surface
SCANNER_SURFACE_CACHE_QUOTA_MB=4096
SCANNER_SURFACE_PREFETCH_ENABLED=false
SCANNER_SURFACE_TENANT=default
SCANNER_SURFACE_FEATURES=
SCANNER_SURFACE_SECRETS_PROVIDER=file
SCANNER_SURFACE_SECRETS_NAMESPACE=
SCANNER_SURFACE_SECRETS_ROOT=/etc/stellaops/secrets
SCANNER_SURFACE_SECRETS_FALLBACK_PROVIDER=
SCANNER_SURFACE_SECRETS_ALLOW_INLINE=false
SURFACE_SECRETS_HOST_PATH=./offline/surface-secrets
# Offline Kit configuration
SCANNER_OFFLINEKIT_ENABLED=false
SCANNER_OFFLINEKIT_REQUIREDSSE=true
SCANNER_OFFLINEKIT_REKOROFFLINEMODE=true
SCANNER_OFFLINEKIT_TRUSTROOTDIRECTORY=/etc/stellaops/trust-roots
SCANNER_OFFLINEKIT_REKORSNAPSHOTDIRECTORY=/var/lib/stellaops/rekor-snapshot
SCANNER_OFFLINEKIT_TRUSTROOTS_HOST_PATH=./offline/trust-roots
SCANNER_OFFLINEKIT_REKOR_SNAPSHOT_HOST_PATH=./offline/rekor-snapshot
# =============================================================================
# SCHEDULER CONFIGURATION
# =============================================================================
# Queue configuration (Valkey only - NATS removed)
SCHEDULER__QUEUE__KIND=Valkey
SCHEDULER__QUEUE__VALKEY__URL=valkey:6379
SCHEDULER_SCANNER_BASEADDRESS=http://scanner-web:8444
# =============================================================================
# REKOR / SIGSTORE CONFIGURATION
# =============================================================================
# Rekor server URL (default: public Sigstore, use http://rekor-v2:3000 for local)
REKOR_SERVER_URL=https://rekor.sigstore.dev
REKOR_VERSION=V2
REKOR_TILE_BASE_URL=
REKOR_LOG_ID=c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d
REKOR_TILES_IMAGE=ghcr.io/sigstore/rekor-tiles:latest
# =============================================================================
# ADVISORY AI CONFIGURATION
# =============================================================================
ADVISORY_AI_WEB_PORT=8448
ADVISORY_AI_SBOM_BASEADDRESS=http://scanner-web:8444
ADVISORY_AI_INFERENCE_MODE=Local
ADVISORY_AI_REMOTE_BASEADDRESS=
ADVISORY_AI_REMOTE_APIKEY=
# =============================================================================
# CRYPTO CONFIGURATION
# =============================================================================
# Crypto profile: default, china, russia, eu
STELLAOPS_CRYPTO_PROFILE=default
# Enable crypto simulation (for testing)
STELLAOPS_CRYPTO_ENABLE_SIM=0
STELLAOPS_CRYPTO_SIM_URL=http://sim-crypto:8080
# CryptoPro (Russia only) - requires EULA acceptance
CRYPTOPRO_PORT=18080
CRYPTOPRO_ACCEPT_EULA=0
CRYPTOPRO_CONTAINER_NAME=stellaops-signing
CRYPTOPRO_USE_MACHINE_STORE=true
CRYPTOPRO_PROVIDER_TYPE=80
# SM Remote (China only)
SM_REMOTE_PORT=56080
SM_SOFT_ALLOWED=1
SM_REMOTE_HSM_URL=
SM_REMOTE_HSM_API_KEY=
SM_REMOTE_HSM_TIMEOUT=30000
# =============================================================================
# NETWORKING
# =============================================================================
# External reverse proxy network (Traefik, Envoy, etc.)
FRONTDOOR_NETWORK=stellaops_frontdoor
# =============================================================================
# TELEMETRY (optional)
# =============================================================================
OTEL_GRPC_PORT=4317
OTEL_HTTP_PORT=4318
OTEL_PROMETHEUS_PORT=9464
PROMETHEUS_PORT=9090
TEMPO_PORT=3200
LOKI_PORT=3100
PROMETHEUS_RETENTION=15d

View File

@@ -1,45 +0,0 @@
# =============================================================================
# STELLA OPS TESTING ENVIRONMENT CONFIGURATION
# =============================================================================
# Environment template for docker-compose.testing.yml
# Uses different ports to avoid conflicts with development/production.
#
# Usage:
# cp env/testing.env.example .env
# docker compose -f docker-compose.testing.yml --profile ci up -d
#
# =============================================================================
# =============================================================================
# CI INFRASTRUCTURE (different ports to avoid conflicts)
# =============================================================================
# PostgreSQL Test Database (port 5433)
TEST_POSTGRES_PORT=5433
TEST_POSTGRES_USER=stellaops_ci
TEST_POSTGRES_PASSWORD=ci_test_password
TEST_POSTGRES_DB=stellaops_test
# Valkey Test (port 6380)
TEST_VALKEY_PORT=6380
# RustFS Test (port 8180)
TEST_RUSTFS_PORT=8180
# Mock Registry (port 5001)
TEST_REGISTRY_PORT=5001
# =============================================================================
# GITEA CONFIGURATION
# =============================================================================
TEST_GITEA_PORT=3000
TEST_GITEA_SSH_PORT=3022
# =============================================================================
# SIGSTORE TOOLS
# =============================================================================
# Rekor CLI and Cosign versions (for sigstore profile)
REKOR_CLI_VERSION=v1.4.3
COSIGN_VERSION=v3.0.4

View File

@@ -1,28 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
echo "StellaOps Compose Backup"
echo "This will create a tar.gz of PostgreSQL, RustFS (object-store), and Valkey data volumes."
read -rp "Proceed? [y/N] " ans
[[ ${ans:-N} =~ ^[Yy]$ ]] || { echo "Aborted."; exit 1; }
TS=$(date -u +%Y%m%dT%H%M%SZ)
OUT_DIR=${BACKUP_DIR:-backups}
mkdir -p "$OUT_DIR"
docker compose ps >/dev/null
echo "Pausing worker containers for consistency..."
docker compose pause scanner-worker scheduler-worker taskrunner-worker || true
echo "Backing up volumes..."
docker run --rm \
-v stellaops-postgres:/data/postgres:ro \
-v stellaops-rustfs:/data/rustfs:ro \
-v stellaops-valkey:/data/valkey:ro \
-v "$PWD/$OUT_DIR":/out \
alpine sh -c "cd / && tar czf /out/stellaops-backup-$TS.tar.gz data"
docker compose unpause scanner-worker scheduler-worker taskrunner-worker || true
echo "Backup written to $OUT_DIR/stellaops-backup-$TS.tar.gz"

View File

@@ -1,25 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
COMPOSE_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
ENV_FILE="${1:-$COMPOSE_DIR/env/dev.env.example}"
USE_MOCK="${USE_MOCK:-0}"
FILES=(-f "$COMPOSE_DIR/docker-compose.dev.yaml")
ENV_FILES=(--env-file "$ENV_FILE")
if [[ "$USE_MOCK" == "1" ]]; then
FILES+=(-f "$COMPOSE_DIR/docker-compose.mock.yaml")
ENV_FILES+=(--env-file "$COMPOSE_DIR/env/mock.env.example")
fi
echo "Validating compose config..."
docker compose "${ENV_FILES[@]}" "${FILES[@]}" config > /tmp/compose-validated.yaml
echo "Config written to /tmp/compose-validated.yaml"
echo "Starting stack..."
docker compose "${ENV_FILES[@]}" "${FILES[@]}" up -d
echo "Stack started. To stop: docker compose ${ENV_FILES[*]} ${FILES[*]} down"

View File

@@ -1,15 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
echo "WARNING: This will stop the stack and wipe PostgreSQL, RustFS, and Valkey volumes."
read -rp "Type 'RESET' to continue: " ans
[[ ${ans:-} == "RESET" ]] || { echo "Aborted."; exit 1; }
docker compose down
for vol in stellaops-postgres stellaops-rustfs stellaops-valkey; do
echo "Removing volume $vol"
docker volume rm "$vol" || true
done
echo "Reset complete. Re-run compose with your env file to recreate volumes."

View File

@@ -1,69 +0,0 @@
-- -----------------------------------------------------------------------------
-- 005_timestamp_evidence.sql
-- Sprint: SPRINT_20260119_009 Evidence Storage for Timestamps
-- Task: EVT-002 - PostgreSQL Schema Extension
-- Description: Schema for storing timestamp and revocation evidence.
-- -----------------------------------------------------------------------------
-- Ensure the evidence schema exists
CREATE SCHEMA IF NOT EXISTS evidence;
-- Timestamp evidence storage
CREATE TABLE IF NOT EXISTS evidence.timestamp_tokens (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
artifact_digest TEXT NOT NULL,
digest_algorithm TEXT NOT NULL,
tst_blob BYTEA NOT NULL,
generation_time TIMESTAMPTZ NOT NULL,
tsa_name TEXT NOT NULL,
tsa_policy_oid TEXT NOT NULL,
serial_number TEXT NOT NULL,
tsa_chain_pem TEXT NOT NULL,
ocsp_response BYTEA,
crl_snapshot BYTEA,
captured_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
provider_name TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT uq_timestamp_artifact_time UNIQUE (artifact_digest, generation_time)
);
-- Indexes for timestamp queries
CREATE INDEX IF NOT EXISTS idx_timestamp_artifact ON evidence.timestamp_tokens(artifact_digest);
CREATE INDEX IF NOT EXISTS idx_timestamp_generation ON evidence.timestamp_tokens(generation_time);
CREATE INDEX IF NOT EXISTS idx_timestamp_provider ON evidence.timestamp_tokens(provider_name);
CREATE INDEX IF NOT EXISTS idx_timestamp_created ON evidence.timestamp_tokens(created_at);
-- Revocation evidence storage
CREATE TABLE IF NOT EXISTS evidence.revocation_snapshots (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
certificate_fingerprint TEXT NOT NULL,
source TEXT NOT NULL CHECK (source IN ('Ocsp', 'Crl', 'None')),
raw_response BYTEA NOT NULL,
response_time TIMESTAMPTZ NOT NULL,
valid_until TIMESTAMPTZ NOT NULL,
status TEXT NOT NULL CHECK (status IN ('Good', 'Revoked', 'Unknown')),
revocation_time TIMESTAMPTZ,
reason TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes for revocation queries
CREATE INDEX IF NOT EXISTS idx_revocation_cert ON evidence.revocation_snapshots(certificate_fingerprint);
CREATE INDEX IF NOT EXISTS idx_revocation_valid ON evidence.revocation_snapshots(valid_until);
CREATE INDEX IF NOT EXISTS idx_revocation_status ON evidence.revocation_snapshots(status);
CREATE INDEX IF NOT EXISTS idx_revocation_created ON evidence.revocation_snapshots(created_at);
-- Comments
COMMENT ON TABLE evidence.timestamp_tokens IS 'RFC-3161 TimeStampToken evidence for long-term validation';
COMMENT ON TABLE evidence.revocation_snapshots IS 'OCSP/CRL certificate revocation evidence snapshots';
COMMENT ON COLUMN evidence.timestamp_tokens.artifact_digest IS 'SHA-256 digest of the timestamped artifact';
COMMENT ON COLUMN evidence.timestamp_tokens.tst_blob IS 'Raw DER-encoded RFC 3161 TimeStampToken';
COMMENT ON COLUMN evidence.timestamp_tokens.tsa_chain_pem IS 'PEM-encoded TSA certificate chain for LTV';
COMMENT ON COLUMN evidence.timestamp_tokens.ocsp_response IS 'Stapled OCSP response at signing time';
COMMENT ON COLUMN evidence.timestamp_tokens.crl_snapshot IS 'CRL snapshot at signing time (fallback for OCSP)';
COMMENT ON COLUMN evidence.revocation_snapshots.certificate_fingerprint IS 'SHA-256 fingerprint of the certificate';
COMMENT ON COLUMN evidence.revocation_snapshots.raw_response IS 'Raw OCSP response or CRL bytes';
COMMENT ON COLUMN evidence.revocation_snapshots.response_time IS 'thisUpdate from the response';
COMMENT ON COLUMN evidence.revocation_snapshots.valid_until IS 'nextUpdate from the response';

View File

@@ -1,21 +0,0 @@
-- -----------------------------------------------------------------------------
-- 005_timestamp_evidence_rollback.sql
-- Sprint: SPRINT_20260119_009 Evidence Storage for Timestamps
-- Task: EVT-002 - PostgreSQL Schema Extension
-- Description: Rollback migration for timestamp and revocation evidence.
-- -----------------------------------------------------------------------------
-- Drop indexes first
DROP INDEX IF EXISTS evidence.idx_timestamp_artifact;
DROP INDEX IF EXISTS evidence.idx_timestamp_generation;
DROP INDEX IF EXISTS evidence.idx_timestamp_provider;
DROP INDEX IF EXISTS evidence.idx_timestamp_created;
DROP INDEX IF EXISTS evidence.idx_revocation_cert;
DROP INDEX IF EXISTS evidence.idx_revocation_valid;
DROP INDEX IF EXISTS evidence.idx_revocation_status;
DROP INDEX IF EXISTS evidence.idx_revocation_created;
-- Drop tables
DROP TABLE IF EXISTS evidence.revocation_snapshots;
DROP TABLE IF EXISTS evidence.timestamp_tokens;

View File

@@ -1,120 +0,0 @@
-- Validation harness schema for tracking validation runs and match results
-- Migration: 005_validation_harness.sql
-- Validation runs table
CREATE TABLE IF NOT EXISTS groundtruth.validation_runs (
run_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
name TEXT NOT NULL,
description TEXT,
status TEXT NOT NULL DEFAULT 'pending',
-- Configuration (stored as JSONB)
config JSONB NOT NULL,
-- Timestamps
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
started_at TIMESTAMPTZ,
completed_at TIMESTAMPTZ,
-- Metrics (populated after completion)
total_pairs INT,
total_functions INT,
true_positives INT,
false_positives INT,
true_negatives INT,
false_negatives INT,
match_rate DOUBLE PRECISION,
precision_score DOUBLE PRECISION,
recall_score DOUBLE PRECISION,
f1_score DOUBLE PRECISION,
average_match_score DOUBLE PRECISION,
-- Mismatch counts by bucket (JSONB map)
mismatch_counts JSONB,
-- Metadata
corpus_snapshot_id TEXT,
matcher_version TEXT,
error_message TEXT,
tags TEXT[] DEFAULT '{}',
-- Constraints
CONSTRAINT valid_status CHECK (status IN ('pending', 'running', 'completed', 'failed', 'cancelled'))
);
-- Indexes for validation runs
CREATE INDEX IF NOT EXISTS idx_validation_runs_status ON groundtruth.validation_runs(status);
CREATE INDEX IF NOT EXISTS idx_validation_runs_created_at ON groundtruth.validation_runs(created_at DESC);
CREATE INDEX IF NOT EXISTS idx_validation_runs_tags ON groundtruth.validation_runs USING GIN (tags);
-- Match results table
CREATE TABLE IF NOT EXISTS groundtruth.match_results (
result_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
run_id UUID NOT NULL REFERENCES groundtruth.validation_runs(run_id) ON DELETE CASCADE,
security_pair_id UUID NOT NULL,
-- Source function
source_name TEXT NOT NULL,
source_demangled_name TEXT,
source_address BIGINT NOT NULL,
source_size BIGINT,
source_build_id TEXT NOT NULL,
source_binary_name TEXT NOT NULL,
-- Expected target
expected_name TEXT NOT NULL,
expected_demangled_name TEXT,
expected_address BIGINT NOT NULL,
expected_size BIGINT,
expected_build_id TEXT NOT NULL,
expected_binary_name TEXT NOT NULL,
-- Actual matched target (nullable if no match found)
actual_name TEXT,
actual_demangled_name TEXT,
actual_address BIGINT,
actual_size BIGINT,
actual_build_id TEXT,
actual_binary_name TEXT,
-- Outcome
outcome TEXT NOT NULL,
match_score DOUBLE PRECISION,
confidence TEXT,
-- Mismatch analysis
inferred_cause TEXT,
mismatch_detail JSONB,
-- Performance
match_duration_ms DOUBLE PRECISION,
-- Constraints
CONSTRAINT valid_outcome CHECK (outcome IN ('true_positive', 'false_positive', 'true_negative', 'false_negative'))
);
-- Indexes for match results
CREATE INDEX IF NOT EXISTS idx_match_results_run_id ON groundtruth.match_results(run_id);
CREATE INDEX IF NOT EXISTS idx_match_results_security_pair_id ON groundtruth.match_results(security_pair_id);
CREATE INDEX IF NOT EXISTS idx_match_results_outcome ON groundtruth.match_results(outcome);
CREATE INDEX IF NOT EXISTS idx_match_results_inferred_cause ON groundtruth.match_results(inferred_cause) WHERE inferred_cause IS NOT NULL;
-- View for run summaries
CREATE OR REPLACE VIEW groundtruth.validation_run_summaries AS
SELECT
run_id AS id,
name,
status,
created_at,
completed_at,
match_rate,
f1_score,
total_pairs AS pair_count,
total_functions AS function_count,
tags
FROM groundtruth.validation_runs;
-- Comments
COMMENT ON TABLE groundtruth.validation_runs IS 'Validation harness runs with aggregate metrics';
COMMENT ON TABLE groundtruth.match_results IS 'Per-function match results from validation runs';
COMMENT ON VIEW groundtruth.validation_run_summaries IS 'Summary view for listing validation runs';

View File

@@ -1,27 +0,0 @@
-- -----------------------------------------------------------------------------
-- 006_timestamp_supersession.sql
-- Sprint: SPRINT_20260119_009 Evidence Storage for Timestamps
-- Task: EVT-005 - Re-Timestamping Support
-- Description: Schema extension for timestamp supersession chain.
-- -----------------------------------------------------------------------------
-- Add supersession column for re-timestamping chain
ALTER TABLE evidence.timestamp_tokens
ADD COLUMN IF NOT EXISTS supersedes_id UUID REFERENCES evidence.timestamp_tokens(id);
-- Index for finding superseding timestamps
CREATE INDEX IF NOT EXISTS idx_timestamp_supersedes ON evidence.timestamp_tokens(supersedes_id);
-- Index for finding timestamps by expiry (for re-timestamp scheduling)
-- Note: We need to track TSA certificate expiry separately - for now use generation_time + typical cert lifetime
CREATE INDEX IF NOT EXISTS idx_timestamp_for_retimestamp
ON evidence.timestamp_tokens(generation_time)
WHERE supersedes_id IS NULL; -- Only query leaf timestamps (not already superseded)
-- Comments
COMMENT ON COLUMN evidence.timestamp_tokens.supersedes_id IS 'ID of the timestamp this supersedes (for re-timestamping chain)';
-- Rollback script (execute separately if needed):
-- ALTER TABLE evidence.timestamp_tokens DROP COLUMN IF EXISTS supersedes_id;
-- DROP INDEX IF EXISTS evidence.idx_timestamp_supersedes;
-- DROP INDEX IF EXISTS evidence.idx_timestamp_for_retimestamp;

View File

@@ -1,108 +0,0 @@
-- OpsMemory and AdvisoryAI PostgreSQL Schema Migration
-- Version: 20260108
-- Author: StellaOps Agent
-- Sprint: SPRINT_20260107_006_004 (OpsMemory), SPRINT_20260107_006_003 (AdvisoryAI)
-- ============================================================================
-- OpsMemory Schema
-- ============================================================================
CREATE SCHEMA IF NOT EXISTS opsmemory;
-- Decision records table
CREATE TABLE IF NOT EXISTS opsmemory.decisions (
memory_id TEXT PRIMARY KEY,
tenant_id TEXT NOT NULL,
recorded_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Situation context
cve_id TEXT,
component_purl TEXT,
severity TEXT,
reachability TEXT,
epss_score DECIMAL(5, 4),
cvss_score DECIMAL(3, 1),
context_tags TEXT[],
similarity_vector DOUBLE PRECISION[],
-- Decision details
action TEXT NOT NULL,
rationale TEXT,
decided_by TEXT NOT NULL,
policy_reference TEXT,
mitigation_type TEXT,
mitigation_details TEXT,
-- Outcome (nullable until recorded)
outcome_status TEXT,
resolution_time INTERVAL,
actual_impact TEXT,
lessons_learned TEXT,
outcome_recorded_by TEXT,
outcome_recorded_at TIMESTAMPTZ
);
-- Indexes for querying
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_tenant ON opsmemory.decisions(tenant_id);
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_cve ON opsmemory.decisions(cve_id);
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_component ON opsmemory.decisions(component_purl);
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_recorded ON opsmemory.decisions(recorded_at);
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_action ON opsmemory.decisions(action);
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_outcome ON opsmemory.decisions(outcome_status);
-- ============================================================================
-- AdvisoryAI Schema
-- ============================================================================
CREATE SCHEMA IF NOT EXISTS advisoryai;
-- Conversations table
CREATE TABLE IF NOT EXISTS advisoryai.conversations (
conversation_id TEXT PRIMARY KEY,
tenant_id TEXT NOT NULL,
user_id TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
context JSONB,
metadata JSONB
);
-- Conversation turns table
CREATE TABLE IF NOT EXISTS advisoryai.turns (
turn_id TEXT PRIMARY KEY,
conversation_id TEXT NOT NULL REFERENCES advisoryai.conversations(conversation_id) ON DELETE CASCADE,
role TEXT NOT NULL,
content TEXT NOT NULL,
timestamp TIMESTAMPTZ NOT NULL DEFAULT NOW(),
evidence_links JSONB,
proposed_actions JSONB,
metadata JSONB
);
-- Indexes for querying
CREATE INDEX IF NOT EXISTS idx_advisoryai_conv_tenant ON advisoryai.conversations(tenant_id);
CREATE INDEX IF NOT EXISTS idx_advisoryai_conv_user ON advisoryai.conversations(user_id);
CREATE INDEX IF NOT EXISTS idx_advisoryai_conv_updated ON advisoryai.conversations(updated_at);
CREATE INDEX IF NOT EXISTS idx_advisoryai_turns_conv ON advisoryai.turns(conversation_id);
CREATE INDEX IF NOT EXISTS idx_advisoryai_turns_timestamp ON advisoryai.turns(timestamp);
-- ============================================================================
-- Comments for documentation
-- ============================================================================
COMMENT ON SCHEMA opsmemory IS 'OpsMemory: Decision ledger for security playbook learning';
COMMENT ON SCHEMA advisoryai IS 'AdvisoryAI: Chat conversation storage';
COMMENT ON TABLE opsmemory.decisions IS 'Stores security decisions and their outcomes for playbook suggestions';
COMMENT ON TABLE advisoryai.conversations IS 'Stores AI chat conversations with context';
COMMENT ON TABLE advisoryai.turns IS 'Individual messages in conversations';
-- ============================================================================
-- Grants (adjust as needed for your environment)
-- ============================================================================
-- GRANT USAGE ON SCHEMA opsmemory TO stellaops_app;
-- GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA opsmemory TO stellaops_app;
-- GRANT USAGE ON SCHEMA advisoryai TO stellaops_app;
-- GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA advisoryai TO stellaops_app;

View File

@@ -1,220 +0,0 @@
-- CVE-Symbol Mapping PostgreSQL Schema Migration
-- Version: 20260110
-- Author: StellaOps Agent
-- Sprint: SPRINT_20260109_009_003_BE_cve_symbol_mapping
-- ============================================================================
-- Reachability Schema
-- ============================================================================
CREATE SCHEMA IF NOT EXISTS reachability;
-- ============================================================================
-- CVE-Symbol Mapping Tables
-- ============================================================================
-- Mapping source enumeration type
CREATE TYPE reachability.mapping_source AS ENUM (
'patch_analysis',
'osv_advisory',
'nvd_cpe',
'manual_curation',
'fuzzing_corpus',
'exploit_database',
'unknown'
);
-- Vulnerability type enumeration (for taint analysis)
CREATE TYPE reachability.vulnerability_type AS ENUM (
'source',
'sink',
'gadget',
'both_source_and_sink',
'unknown'
);
-- Main CVE-symbol mapping table
CREATE TABLE IF NOT EXISTS reachability.cve_symbol_mappings (
mapping_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
-- CVE identification
cve_id TEXT NOT NULL,
cve_id_normalized TEXT NOT NULL GENERATED ALWAYS AS (UPPER(cve_id)) STORED,
-- Affected package (PURL format)
purl TEXT NOT NULL,
affected_versions TEXT[], -- Version ranges like [">=1.0.0,<2.0.0"]
fixed_versions TEXT[], -- Versions where fix is applied
-- Vulnerable symbol details
symbol_name TEXT NOT NULL,
canonical_id TEXT, -- Normalized symbol ID from canonicalization service
file_path TEXT,
start_line INTEGER,
end_line INTEGER,
-- Metadata
source reachability.mapping_source NOT NULL DEFAULT 'unknown',
vulnerability_type reachability.vulnerability_type NOT NULL DEFAULT 'unknown',
confidence DECIMAL(3, 2) NOT NULL DEFAULT 0.5 CHECK (confidence >= 0 AND confidence <= 1),
-- Provenance
evidence_uri TEXT, -- stella:// URI to evidence
source_commit_url TEXT,
patch_url TEXT,
-- Timestamps
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
verified_at TIMESTAMPTZ,
verified_by TEXT,
-- Tenant support
tenant_id TEXT NOT NULL DEFAULT 'default'
);
-- Vulnerable symbol detail records (for additional symbol metadata)
CREATE TABLE IF NOT EXISTS reachability.vulnerable_symbols (
symbol_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
mapping_id UUID NOT NULL REFERENCES reachability.cve_symbol_mappings(mapping_id) ON DELETE CASCADE,
-- Symbol identification
symbol_name TEXT NOT NULL,
canonical_id TEXT,
symbol_type TEXT, -- 'function', 'method', 'class', 'module'
-- Location
file_path TEXT,
start_line INTEGER,
end_line INTEGER,
-- Code context
signature TEXT, -- Function signature
containing_class TEXT,
namespace TEXT,
-- Vulnerability context
vulnerability_type reachability.vulnerability_type NOT NULL DEFAULT 'unknown',
is_entry_point BOOLEAN DEFAULT FALSE,
requires_control_flow BOOLEAN DEFAULT FALSE,
-- Metadata
confidence DECIMAL(3, 2) NOT NULL DEFAULT 0.5,
notes TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Patch analysis results (cached)
CREATE TABLE IF NOT EXISTS reachability.patch_analysis (
analysis_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
-- Source identification
commit_url TEXT NOT NULL UNIQUE,
repository_url TEXT,
commit_sha TEXT,
-- Analysis results (stored as JSONB for flexibility)
diff_content TEXT,
extracted_symbols JSONB NOT NULL DEFAULT '[]',
language_detected TEXT,
-- Metadata
analyzed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
analyzer_version TEXT,
-- Error tracking
analysis_status TEXT NOT NULL DEFAULT 'pending',
error_message TEXT
);
-- ============================================================================
-- Indexes
-- ============================================================================
-- CVE lookup indexes
CREATE INDEX IF NOT EXISTS idx_cve_mapping_cve_normalized ON reachability.cve_symbol_mappings(cve_id_normalized);
CREATE INDEX IF NOT EXISTS idx_cve_mapping_purl ON reachability.cve_symbol_mappings(purl);
CREATE INDEX IF NOT EXISTS idx_cve_mapping_symbol ON reachability.cve_symbol_mappings(symbol_name);
CREATE INDEX IF NOT EXISTS idx_cve_mapping_canonical ON reachability.cve_symbol_mappings(canonical_id) WHERE canonical_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_cve_mapping_tenant ON reachability.cve_symbol_mappings(tenant_id);
CREATE INDEX IF NOT EXISTS idx_cve_mapping_source ON reachability.cve_symbol_mappings(source);
CREATE INDEX IF NOT EXISTS idx_cve_mapping_confidence ON reachability.cve_symbol_mappings(confidence);
CREATE INDEX IF NOT EXISTS idx_cve_mapping_created ON reachability.cve_symbol_mappings(created_at);
-- Composite index for common queries
CREATE INDEX IF NOT EXISTS idx_cve_mapping_cve_purl ON reachability.cve_symbol_mappings(cve_id_normalized, purl);
-- Symbol indexes
CREATE INDEX IF NOT EXISTS idx_vuln_symbol_mapping ON reachability.vulnerable_symbols(mapping_id);
CREATE INDEX IF NOT EXISTS idx_vuln_symbol_name ON reachability.vulnerable_symbols(symbol_name);
CREATE INDEX IF NOT EXISTS idx_vuln_symbol_canonical ON reachability.vulnerable_symbols(canonical_id) WHERE canonical_id IS NOT NULL;
-- Patch analysis indexes
CREATE INDEX IF NOT EXISTS idx_patch_analysis_commit ON reachability.patch_analysis(commit_sha);
CREATE INDEX IF NOT EXISTS idx_patch_analysis_repo ON reachability.patch_analysis(repository_url);
-- ============================================================================
-- Full-text search
-- ============================================================================
-- Add tsvector column for symbol search
ALTER TABLE reachability.cve_symbol_mappings
ADD COLUMN IF NOT EXISTS symbol_search_vector tsvector
GENERATED ALWAYS AS (to_tsvector('simple', coalesce(symbol_name, '') || ' ' || coalesce(file_path, ''))) STORED;
CREATE INDEX IF NOT EXISTS idx_cve_mapping_fts ON reachability.cve_symbol_mappings USING GIN(symbol_search_vector);
-- ============================================================================
-- Trigger for updated_at
-- ============================================================================
CREATE OR REPLACE FUNCTION reachability.update_modified_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER update_cve_mapping_modtime
BEFORE UPDATE ON reachability.cve_symbol_mappings
FOR EACH ROW
EXECUTE FUNCTION reachability.update_modified_column();
-- ============================================================================
-- Comments for documentation
-- ============================================================================
COMMENT ON SCHEMA reachability IS 'Hybrid reachability analysis: CVE-symbol mappings, static/runtime evidence';
COMMENT ON TABLE reachability.cve_symbol_mappings IS 'Maps CVE IDs to vulnerable symbols with confidence scores';
COMMENT ON COLUMN reachability.cve_symbol_mappings.cve_id_normalized IS 'Uppercase normalized CVE ID for case-insensitive lookup';
COMMENT ON COLUMN reachability.cve_symbol_mappings.canonical_id IS 'Symbol canonical ID from canonicalization service';
COMMENT ON COLUMN reachability.cve_symbol_mappings.evidence_uri IS 'stella:// URI pointing to evidence bundle';
COMMENT ON TABLE reachability.vulnerable_symbols IS 'Additional symbol details for a CVE mapping';
COMMENT ON TABLE reachability.patch_analysis IS 'Cached patch analysis results for commit URLs';
-- ============================================================================
-- Initial data / seed (optional well-known CVEs for testing)
-- ============================================================================
-- Example: Log4Shell (CVE-2021-44228)
INSERT INTO reachability.cve_symbol_mappings (cve_id, purl, symbol_name, file_path, source, confidence, vulnerability_type)
VALUES
('CVE-2021-44228', 'pkg:maven/org.apache.logging.log4j/log4j-core@2.14.1', 'JndiLookup.lookup', 'log4j-core/src/main/java/org/apache/logging/log4j/core/lookup/JndiLookup.java', 'manual_curation', 0.99, 'sink'),
('CVE-2021-44228', 'pkg:maven/org.apache.logging.log4j/log4j-core@2.14.1', 'JndiManager.lookup', 'log4j-core/src/main/java/org/apache/logging/log4j/core/net/JndiManager.java', 'manual_curation', 0.95, 'sink')
ON CONFLICT DO NOTHING;
-- Example: Spring4Shell (CVE-2022-22965)
INSERT INTO reachability.cve_symbol_mappings (cve_id, purl, symbol_name, file_path, source, confidence, vulnerability_type)
VALUES
('CVE-2022-22965', 'pkg:maven/org.springframework/spring-beans@5.3.17', 'CachedIntrospectionResults.getBeanInfo', 'spring-beans/src/main/java/org/springframework/beans/CachedIntrospectionResults.java', 'patch_analysis', 0.90, 'source')
ON CONFLICT DO NOTHING;
-- Example: polyfill.io supply chain (CVE-2024-38526)
INSERT INTO reachability.cve_symbol_mappings (cve_id, purl, symbol_name, source, confidence, vulnerability_type)
VALUES
('CVE-2024-38526', 'pkg:npm/polyfill.io', 'window.polyfill', 'manual_curation', 0.85, 'source')
ON CONFLICT DO NOTHING;

View File

@@ -1,38 +0,0 @@
-- -----------------------------------------------------------------------------
-- V20260117__create_doctor_reports_table.sql
-- Sprint: SPRINT_20260117_025_Doctor_coverage_expansion
-- Task: DOC-EXP-005 - Persistent Report Storage
-- Description: Migration to create doctor_reports table for persistent storage
-- -----------------------------------------------------------------------------
-- Doctor reports table for persistent storage
CREATE TABLE IF NOT EXISTS doctor_reports (
run_id VARCHAR(64) PRIMARY KEY,
started_at TIMESTAMPTZ NOT NULL,
completed_at TIMESTAMPTZ,
overall_severity VARCHAR(16) NOT NULL,
passed_count INTEGER NOT NULL DEFAULT 0,
warning_count INTEGER NOT NULL DEFAULT 0,
failed_count INTEGER NOT NULL DEFAULT 0,
skipped_count INTEGER NOT NULL DEFAULT 0,
info_count INTEGER NOT NULL DEFAULT 0,
total_count INTEGER NOT NULL DEFAULT 0,
report_json_compressed BYTEA NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Index for listing reports by date
CREATE INDEX IF NOT EXISTS idx_doctor_reports_started_at
ON doctor_reports (started_at DESC);
-- Index for retention cleanup
CREATE INDEX IF NOT EXISTS idx_doctor_reports_created_at
ON doctor_reports (created_at);
-- Index for filtering by severity
CREATE INDEX IF NOT EXISTS idx_doctor_reports_severity
ON doctor_reports (overall_severity);
-- Comment on table
COMMENT ON TABLE doctor_reports IS 'Stores Doctor diagnostic reports with compression for audit trail';
COMMENT ON COLUMN doctor_reports.report_json_compressed IS 'GZip compressed JSON report data';

View File

@@ -1,153 +0,0 @@
-- Migration: V20260117__vex_rekor_linkage.sql
-- Sprint: SPRINT_20260117_002_EXCITITOR_vex_rekor_linkage
-- Task: VRL-004, VRL-005 - Create Excititor and VexHub database migrations
-- Description: Add Rekor transparency log linkage columns to VEX tables
-- Author: StellaOps
-- Date: 2026-01-17
-- ============================================================================
-- EXCITITOR SCHEMA: vex_observations table
-- ============================================================================
-- Add Rekor linkage columns to vex_observations
ALTER TABLE IF EXISTS excititor.vex_observations
ADD COLUMN IF NOT EXISTS rekor_uuid TEXT,
ADD COLUMN IF NOT EXISTS rekor_log_index BIGINT,
ADD COLUMN IF NOT EXISTS rekor_integrated_time TIMESTAMPTZ,
ADD COLUMN IF NOT EXISTS rekor_log_url TEXT,
ADD COLUMN IF NOT EXISTS rekor_tree_root TEXT,
ADD COLUMN IF NOT EXISTS rekor_tree_size BIGINT,
ADD COLUMN IF NOT EXISTS rekor_inclusion_proof JSONB,
ADD COLUMN IF NOT EXISTS rekor_entry_body_hash TEXT,
ADD COLUMN IF NOT EXISTS rekor_entry_kind TEXT,
ADD COLUMN IF NOT EXISTS rekor_linked_at TIMESTAMPTZ;
-- Index for Rekor queries by UUID
CREATE INDEX IF NOT EXISTS idx_vex_observations_rekor_uuid
ON excititor.vex_observations(rekor_uuid)
WHERE rekor_uuid IS NOT NULL;
-- Index for Rekor queries by log index (for ordered traversal)
CREATE INDEX IF NOT EXISTS idx_vex_observations_rekor_log_index
ON excititor.vex_observations(rekor_log_index DESC)
WHERE rekor_log_index IS NOT NULL;
-- Index for finding unlinked observations (for retry/backfill)
CREATE INDEX IF NOT EXISTS idx_vex_observations_pending_rekor
ON excititor.vex_observations(created_at)
WHERE rekor_uuid IS NULL;
-- Comment on columns
COMMENT ON COLUMN excititor.vex_observations.rekor_uuid IS 'Rekor entry UUID (64-char hex)';
COMMENT ON COLUMN excititor.vex_observations.rekor_log_index IS 'Monotonically increasing log position';
COMMENT ON COLUMN excititor.vex_observations.rekor_integrated_time IS 'Time entry was integrated into Rekor log';
COMMENT ON COLUMN excititor.vex_observations.rekor_log_url IS 'Rekor server URL where entry was submitted';
COMMENT ON COLUMN excititor.vex_observations.rekor_tree_root IS 'Merkle tree root hash at submission time (base64)';
COMMENT ON COLUMN excititor.vex_observations.rekor_tree_size IS 'Tree size at submission time';
COMMENT ON COLUMN excititor.vex_observations.rekor_inclusion_proof IS 'RFC 6962 inclusion proof for offline verification';
COMMENT ON COLUMN excititor.vex_observations.rekor_entry_body_hash IS 'SHA-256 hash of entry body';
COMMENT ON COLUMN excititor.vex_observations.rekor_entry_kind IS 'Entry kind (dsse, intoto, hashedrekord)';
COMMENT ON COLUMN excititor.vex_observations.rekor_linked_at IS 'When linkage was recorded locally';
-- ============================================================================
-- EXCITITOR SCHEMA: vex_statement_change_events table
-- ============================================================================
-- Add Rekor linkage to change events
ALTER TABLE IF EXISTS excititor.vex_statement_change_events
ADD COLUMN IF NOT EXISTS rekor_entry_id TEXT,
ADD COLUMN IF NOT EXISTS rekor_log_index BIGINT;
-- Index for Rekor queries on change events
CREATE INDEX IF NOT EXISTS idx_vex_change_events_rekor
ON excititor.vex_statement_change_events(rekor_entry_id)
WHERE rekor_entry_id IS NOT NULL;
COMMENT ON COLUMN excititor.vex_statement_change_events.rekor_entry_id IS 'Rekor entry UUID for change attestation';
COMMENT ON COLUMN excititor.vex_statement_change_events.rekor_log_index IS 'Rekor log index for change attestation';
-- ============================================================================
-- VEXHUB SCHEMA: vex_statements table
-- ============================================================================
-- Add Rekor linkage columns to vex_statements
ALTER TABLE IF EXISTS vexhub.vex_statements
ADD COLUMN IF NOT EXISTS rekor_uuid TEXT,
ADD COLUMN IF NOT EXISTS rekor_log_index BIGINT,
ADD COLUMN IF NOT EXISTS rekor_integrated_time TIMESTAMPTZ,
ADD COLUMN IF NOT EXISTS rekor_inclusion_proof JSONB;
-- Index for Rekor queries
CREATE INDEX IF NOT EXISTS idx_vexhub_statements_rekor_uuid
ON vexhub.vex_statements(rekor_uuid)
WHERE rekor_uuid IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_vexhub_statements_rekor_log_index
ON vexhub.vex_statements(rekor_log_index DESC)
WHERE rekor_log_index IS NOT NULL;
COMMENT ON COLUMN vexhub.vex_statements.rekor_uuid IS 'Rekor entry UUID for statement attestation';
COMMENT ON COLUMN vexhub.vex_statements.rekor_log_index IS 'Rekor log index for statement attestation';
COMMENT ON COLUMN vexhub.vex_statements.rekor_integrated_time IS 'Time statement was integrated into Rekor log';
COMMENT ON COLUMN vexhub.vex_statements.rekor_inclusion_proof IS 'RFC 6962 inclusion proof for offline verification';
-- ============================================================================
-- ATTESTOR SCHEMA: rekor_entries verification tracking
-- Sprint: SPRINT_20260117_001_ATTESTOR_periodic_rekor_verification (PRV-003)
-- ============================================================================
-- Add verification tracking columns to existing rekor_entries table
ALTER TABLE IF EXISTS attestor.rekor_entries
ADD COLUMN IF NOT EXISTS last_verified_at TIMESTAMPTZ,
ADD COLUMN IF NOT EXISTS verification_count INT NOT NULL DEFAULT 0,
ADD COLUMN IF NOT EXISTS last_verification_result TEXT;
-- Index for verification queries (find entries needing verification)
CREATE INDEX IF NOT EXISTS idx_rekor_entries_verification
ON attestor.rekor_entries(created_at DESC, last_verified_at NULLS FIRST)
WHERE last_verification_result IS DISTINCT FROM 'invalid';
-- Index for finding never-verified entries
CREATE INDEX IF NOT EXISTS idx_rekor_entries_unverified
ON attestor.rekor_entries(created_at DESC)
WHERE last_verified_at IS NULL;
COMMENT ON COLUMN attestor.rekor_entries.last_verified_at IS 'Timestamp of last successful verification';
COMMENT ON COLUMN attestor.rekor_entries.verification_count IS 'Number of times entry has been verified';
COMMENT ON COLUMN attestor.rekor_entries.last_verification_result IS 'Result of last verification: valid, invalid, skipped';
-- ============================================================================
-- ATTESTOR SCHEMA: rekor_root_checkpoints table
-- Stores tree root checkpoints for consistency verification
-- ============================================================================
CREATE TABLE IF NOT EXISTS attestor.rekor_root_checkpoints (
id BIGSERIAL PRIMARY KEY,
tree_root TEXT NOT NULL,
tree_size BIGINT NOT NULL,
log_id TEXT NOT NULL,
log_url TEXT,
checkpoint_envelope TEXT,
captured_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
verified_at TIMESTAMPTZ,
is_consistent BOOLEAN,
inconsistency_reason TEXT,
CONSTRAINT uq_root_checkpoint UNIQUE (log_id, tree_root, tree_size)
);
-- Index for finding latest checkpoints per log
CREATE INDEX IF NOT EXISTS idx_rekor_root_checkpoints_latest
ON attestor.rekor_root_checkpoints(log_id, captured_at DESC);
-- Index for consistency verification
CREATE INDEX IF NOT EXISTS idx_rekor_root_checkpoints_unverified
ON attestor.rekor_root_checkpoints(captured_at DESC)
WHERE verified_at IS NULL;
COMMENT ON TABLE attestor.rekor_root_checkpoints IS 'Stores Rekor tree root checkpoints for consistency verification';
COMMENT ON COLUMN attestor.rekor_root_checkpoints.tree_root IS 'Merkle tree root hash (base64)';
COMMENT ON COLUMN attestor.rekor_root_checkpoints.tree_size IS 'Tree size at checkpoint';
COMMENT ON COLUMN attestor.rekor_root_checkpoints.log_id IS 'Rekor log identifier (hash of public key)';
COMMENT ON COLUMN attestor.rekor_root_checkpoints.checkpoint_envelope IS 'Signed checkpoint in note format';
COMMENT ON COLUMN attestor.rekor_root_checkpoints.is_consistent IS 'Whether checkpoint was consistent with previous';
COMMENT ON COLUMN attestor.rekor_root_checkpoints.inconsistency_reason IS 'Reason for inconsistency if detected';

View File

@@ -1,139 +0,0 @@
-- -----------------------------------------------------------------------------
-- V20260119_001__Add_UnderReview_Escalated_Rejected_States.sql
-- Sprint: SPRINT_20260118_018_Unknowns_queue_enhancement
-- Task: UQ-005 - Migration for existing entries (map to new states)
-- Description: Adds new state machine states and required columns
-- -----------------------------------------------------------------------------
-- Add new columns for UnderReview and Escalated states
ALTER TABLE grey_queue_entries
ADD COLUMN IF NOT EXISTS assignee VARCHAR(255) NULL,
ADD COLUMN IF NOT EXISTS assigned_at TIMESTAMPTZ NULL,
ADD COLUMN IF NOT EXISTS escalated_at TIMESTAMPTZ NULL,
ADD COLUMN IF NOT EXISTS escalation_reason TEXT NULL;
-- Add new enum values to grey_queue_status
-- Note: PostgreSQL requires special handling for enum additions
-- First, check if we need to add the values (idempotent)
DO $$
BEGIN
-- Add 'under_review' if not exists
IF NOT EXISTS (
SELECT 1 FROM pg_enum
WHERE enumlabel = 'under_review'
AND enumtypid = 'grey_queue_status'::regtype
) THEN
ALTER TYPE grey_queue_status ADD VALUE 'under_review' AFTER 'retrying';
END IF;
-- Add 'escalated' if not exists
IF NOT EXISTS (
SELECT 1 FROM pg_enum
WHERE enumlabel = 'escalated'
AND enumtypid = 'grey_queue_status'::regtype
) THEN
ALTER TYPE grey_queue_status ADD VALUE 'escalated' AFTER 'under_review';
END IF;
-- Add 'rejected' if not exists
IF NOT EXISTS (
SELECT 1 FROM pg_enum
WHERE enumlabel = 'rejected'
AND enumtypid = 'grey_queue_status'::regtype
) THEN
ALTER TYPE grey_queue_status ADD VALUE 'rejected' AFTER 'resolved';
END IF;
EXCEPTION
WHEN others THEN
-- Enum values may already exist, which is fine
NULL;
END $$;
-- Add indexes for new query patterns
CREATE INDEX IF NOT EXISTS idx_grey_queue_assignee
ON grey_queue_entries(assignee)
WHERE assignee IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_grey_queue_status_assignee
ON grey_queue_entries(status, assignee)
WHERE status IN ('under_review', 'escalated');
CREATE INDEX IF NOT EXISTS idx_grey_queue_escalated_at
ON grey_queue_entries(escalated_at DESC)
WHERE escalated_at IS NOT NULL;
-- Add audit trigger for state transitions
CREATE TABLE IF NOT EXISTS grey_queue_state_transitions (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
entry_id UUID NOT NULL REFERENCES grey_queue_entries(id),
tenant_id VARCHAR(128) NOT NULL,
from_state VARCHAR(32) NOT NULL,
to_state VARCHAR(32) NOT NULL,
transitioned_by VARCHAR(255),
reason TEXT,
transitioned_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
metadata JSONB
);
CREATE INDEX IF NOT EXISTS idx_grey_queue_transitions_entry
ON grey_queue_state_transitions(entry_id);
CREATE INDEX IF NOT EXISTS idx_grey_queue_transitions_tenant_time
ON grey_queue_state_transitions(tenant_id, transitioned_at DESC);
-- Function to record state transitions
CREATE OR REPLACE FUNCTION record_grey_queue_transition()
RETURNS TRIGGER AS $$
BEGIN
IF OLD.status IS DISTINCT FROM NEW.status THEN
INSERT INTO grey_queue_state_transitions (
entry_id, tenant_id, from_state, to_state,
transitioned_by, transitioned_at
) VALUES (
NEW.id,
NEW.tenant_id,
OLD.status::text,
NEW.status::text,
COALESCE(NEW.assignee, current_user),
NOW()
);
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Create trigger if not exists
DROP TRIGGER IF EXISTS trg_grey_queue_state_transition ON grey_queue_entries;
CREATE TRIGGER trg_grey_queue_state_transition
AFTER UPDATE ON grey_queue_entries
FOR EACH ROW
EXECUTE FUNCTION record_grey_queue_transition();
-- Update summary view to include new states
CREATE OR REPLACE VIEW grey_queue_summary AS
SELECT
tenant_id,
COUNT(*) FILTER (WHERE status = 'pending') as pending_count,
COUNT(*) FILTER (WHERE status = 'processing') as processing_count,
COUNT(*) FILTER (WHERE status = 'retrying') as retrying_count,
COUNT(*) FILTER (WHERE status = 'under_review') as under_review_count,
COUNT(*) FILTER (WHERE status = 'escalated') as escalated_count,
COUNT(*) FILTER (WHERE status = 'resolved') as resolved_count,
COUNT(*) FILTER (WHERE status = 'rejected') as rejected_count,
COUNT(*) FILTER (WHERE status = 'failed') as failed_count,
COUNT(*) FILTER (WHERE status = 'expired') as expired_count,
COUNT(*) FILTER (WHERE status = 'dismissed') as dismissed_count,
COUNT(*) as total_count
FROM grey_queue_entries
GROUP BY tenant_id;
-- Comment for documentation
COMMENT ON COLUMN grey_queue_entries.assignee IS
'Assignee for entries in UnderReview state (Sprint UQ-005)';
COMMENT ON COLUMN grey_queue_entries.assigned_at IS
'When the entry was assigned for review (Sprint UQ-005)';
COMMENT ON COLUMN grey_queue_entries.escalated_at IS
'When the entry was escalated to security team (Sprint UQ-005)';
COMMENT ON COLUMN grey_queue_entries.escalation_reason IS
'Reason for escalation (Sprint UQ-005)';

View File

@@ -1,130 +0,0 @@
-- Migration: Add diff_id column to scanner layers table
-- Sprint: SPRINT_025_Scanner_layer_manifest_infrastructure
-- Task: TASK-025-03
-- Add diff_id column to layers table (sha256:64hex = 71 chars)
ALTER TABLE scanner.layers
ADD COLUMN IF NOT EXISTS diff_id VARCHAR(71);
-- Add timestamp for when diffID was computed
ALTER TABLE scanner.layers
ADD COLUMN IF NOT EXISTS diff_id_computed_at_utc TIMESTAMP;
-- Create index on diff_id for fast lookups
CREATE INDEX IF NOT EXISTS idx_layers_diff_id
ON scanner.layers (diff_id)
WHERE diff_id IS NOT NULL;
-- Create image_layers junction table if it doesn't exist
-- This tracks which layers belong to which images
CREATE TABLE IF NOT EXISTS scanner.image_layers (
image_reference VARCHAR(512) NOT NULL,
layer_digest VARCHAR(71) NOT NULL,
layer_index INT NOT NULL,
created_at_utc TIMESTAMP NOT NULL DEFAULT NOW(),
PRIMARY KEY (image_reference, layer_digest)
);
CREATE INDEX IF NOT EXISTS idx_image_layers_digest
ON scanner.image_layers (layer_digest);
-- DiffID cache table for resolved diffIDs
CREATE TABLE IF NOT EXISTS scanner.scanner_diffid_cache (
layer_digest VARCHAR(71) PRIMARY KEY,
diff_id VARCHAR(71) NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Base image fingerprint tables for layer reuse detection
CREATE TABLE IF NOT EXISTS scanner.scanner_base_image_fingerprints (
image_reference VARCHAR(512) PRIMARY KEY,
layer_count INT NOT NULL,
registered_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
detection_count BIGINT NOT NULL DEFAULT 0
);
CREATE TABLE IF NOT EXISTS scanner.scanner_base_image_layers (
image_reference VARCHAR(512) NOT NULL REFERENCES scanner.scanner_base_image_fingerprints(image_reference) ON DELETE CASCADE,
layer_index INT NOT NULL,
diff_id VARCHAR(71) NOT NULL,
PRIMARY KEY (image_reference, layer_index)
);
CREATE INDEX IF NOT EXISTS idx_base_image_layers_diff_id
ON scanner.scanner_base_image_layers (diff_id);
-- Manifest snapshots table for IOciManifestSnapshotService
CREATE TABLE IF NOT EXISTS scanner.manifest_snapshots (
id UUID PRIMARY KEY,
image_reference VARCHAR(512) NOT NULL,
registry VARCHAR(256) NOT NULL,
repository VARCHAR(256) NOT NULL,
tag VARCHAR(128),
manifest_digest VARCHAR(71) NOT NULL,
config_digest VARCHAR(71) NOT NULL,
media_type VARCHAR(128) NOT NULL,
layers JSONB NOT NULL,
diff_ids JSONB NOT NULL,
platform JSONB,
total_size BIGINT NOT NULL,
captured_at TIMESTAMPTZ NOT NULL,
snapshot_version VARCHAR(32),
UNIQUE (manifest_digest)
);
CREATE INDEX IF NOT EXISTS idx_manifest_snapshots_image_ref
ON scanner.manifest_snapshots (image_reference);
CREATE INDEX IF NOT EXISTS idx_manifest_snapshots_repository
ON scanner.manifest_snapshots (registry, repository);
CREATE INDEX IF NOT EXISTS idx_manifest_snapshots_captured_at
ON scanner.manifest_snapshots (captured_at DESC);
-- Layer scan history for reuse detection (TASK-025-04)
CREATE TABLE IF NOT EXISTS scanner.layer_scans (
diff_id VARCHAR(71) PRIMARY KEY,
scanned_at TIMESTAMPTZ NOT NULL,
finding_count INT,
scanned_by VARCHAR(128) NOT NULL,
scanner_version VARCHAR(64)
);
-- Layer reuse counts for statistics
CREATE TABLE IF NOT EXISTS scanner.layer_reuse_counts (
diff_id VARCHAR(71) PRIMARY KEY,
reuse_count INT NOT NULL DEFAULT 1,
first_seen_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_layer_reuse_counts_count
ON scanner.layer_reuse_counts (reuse_count DESC);
COMMENT ON COLUMN scanner.layers.diff_id IS 'Uncompressed layer content hash (sha256:hex64). Immutable once computed.';
COMMENT ON TABLE scanner.scanner_diffid_cache IS 'Cache of layer digest to diffID mappings. Layer digests are immutable so cache entries never expire.';
COMMENT ON TABLE scanner.scanner_base_image_fingerprints IS 'Known base image fingerprints for layer reuse detection.';
COMMENT ON TABLE scanner.manifest_snapshots IS 'Point-in-time captures of OCI image manifests for delta scanning.';
COMMENT ON TABLE scanner.layer_scans IS 'History of layer scans for deduplication. One entry per diffID.';
COMMENT ON TABLE scanner.layer_reuse_counts IS 'Counts of how many times each layer appears across images.';
-- Layer SBOM CAS for per-layer SBOM storage (TASK-026-02)
CREATE TABLE IF NOT EXISTS scanner.layer_sbom_cas (
diff_id VARCHAR(71) NOT NULL,
format VARCHAR(20) NOT NULL,
content BYTEA NOT NULL,
size_bytes BIGINT NOT NULL,
compressed BOOLEAN NOT NULL DEFAULT TRUE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
last_accessed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
PRIMARY KEY (diff_id, format)
);
CREATE INDEX IF NOT EXISTS idx_layer_sbom_cas_last_accessed
ON scanner.layer_sbom_cas (last_accessed_at);
CREATE INDEX IF NOT EXISTS idx_layer_sbom_cas_format
ON scanner.layer_sbom_cas (format);
COMMENT ON TABLE scanner.layer_sbom_cas IS 'Content-addressable storage for per-layer SBOMs. Keyed by diffID (immutable).';
COMMENT ON COLUMN scanner.layer_sbom_cas.content IS 'Compressed (gzip) SBOM content.';
COMMENT ON COLUMN scanner.layer_sbom_cas.last_accessed_at IS 'For TTL-based eviction of cold entries.';

View File

@@ -1,561 +0,0 @@
-- Partitioning Infrastructure Migration 001: Foundation
-- Sprint: SPRINT_3422_0001_0001 - Time-Based Partitioning
-- Category: C (infrastructure setup, requires planned maintenance)
--
-- Purpose: Create partition management infrastructure including:
-- - Helper functions for partition creation and maintenance
-- - Utility functions for BRIN index optimization
-- - Partition maintenance scheduling support
--
-- This migration creates the foundation; table conversion is done in separate migrations.
BEGIN;
-- ============================================================================
-- Step 1: Create partition management schema
-- ============================================================================
CREATE SCHEMA IF NOT EXISTS partition_mgmt;
COMMENT ON SCHEMA partition_mgmt IS
'Partition management utilities for time-series tables';
-- ============================================================================
-- Step 2: Managed table registration
-- ============================================================================
CREATE TABLE IF NOT EXISTS partition_mgmt.managed_tables (
schema_name TEXT NOT NULL,
table_name TEXT NOT NULL,
partition_key TEXT NOT NULL,
partition_type TEXT NOT NULL,
retention_months INT NOT NULL DEFAULT 0,
months_ahead INT NOT NULL DEFAULT 3,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
PRIMARY KEY (schema_name, table_name)
);
COMMENT ON TABLE partition_mgmt.managed_tables IS
'Tracks partitioned tables with retention and creation settings';
-- ============================================================================
-- Step 3: Partition creation function
-- ============================================================================
-- Creates a new partition for a given table and date range
CREATE OR REPLACE FUNCTION partition_mgmt.create_partition(
p_schema_name TEXT,
p_table_name TEXT,
p_partition_column TEXT,
p_start_date DATE,
p_end_date DATE,
p_partition_suffix TEXT DEFAULT NULL
)
RETURNS TEXT
LANGUAGE plpgsql
AS $$
DECLARE
v_partition_name TEXT;
v_parent_table TEXT;
v_sql TEXT;
BEGIN
v_parent_table := format('%I.%I', p_schema_name, p_table_name);
-- Generate partition name: tablename_YYYY_MM or tablename_YYYY_Q#
IF p_partition_suffix IS NOT NULL THEN
v_partition_name := format('%s_%s', p_table_name, p_partition_suffix);
ELSE
v_partition_name := format('%s_%s', p_table_name, to_char(p_start_date, 'YYYY_MM'));
END IF;
-- Check if partition already exists
IF EXISTS (
SELECT 1 FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = p_schema_name AND c.relname = v_partition_name
) THEN
RAISE NOTICE 'Partition % already exists, skipping', v_partition_name;
RETURN v_partition_name;
END IF;
-- Create partition
v_sql := format(
'CREATE TABLE %I.%I PARTITION OF %s FOR VALUES FROM (%L) TO (%L)',
p_schema_name,
v_partition_name,
v_parent_table,
p_start_date,
p_end_date
);
EXECUTE v_sql;
RAISE NOTICE 'Created partition %.%', p_schema_name, v_partition_name;
RETURN v_partition_name;
END;
$$;
-- ============================================================================
-- Step 4: Monthly partition creation helper
-- ============================================================================
CREATE OR REPLACE FUNCTION partition_mgmt.create_monthly_partitions(
p_schema_name TEXT,
p_table_name TEXT,
p_partition_column TEXT,
p_start_month DATE,
p_months_ahead INT DEFAULT 3
)
RETURNS SETOF TEXT
LANGUAGE plpgsql
AS $$
DECLARE
v_current_month DATE;
v_end_month DATE;
v_partition_name TEXT;
BEGIN
v_current_month := date_trunc('month', p_start_month)::DATE;
v_end_month := date_trunc('month', NOW() + (p_months_ahead || ' months')::INTERVAL)::DATE;
WHILE v_current_month <= v_end_month LOOP
v_partition_name := partition_mgmt.create_partition(
p_schema_name,
p_table_name,
p_partition_column,
v_current_month,
(v_current_month + INTERVAL '1 month')::DATE
);
RETURN NEXT v_partition_name;
v_current_month := (v_current_month + INTERVAL '1 month')::DATE;
END LOOP;
END;
$$;
-- ============================================================================
-- Step 5: Quarterly partition creation helper
-- ============================================================================
CREATE OR REPLACE FUNCTION partition_mgmt.create_quarterly_partitions(
p_schema_name TEXT,
p_table_name TEXT,
p_partition_column TEXT,
p_start_quarter DATE,
p_quarters_ahead INT DEFAULT 2
)
RETURNS SETOF TEXT
LANGUAGE plpgsql
AS $$
DECLARE
v_current_quarter DATE;
v_end_quarter DATE;
v_partition_name TEXT;
v_suffix TEXT;
BEGIN
v_current_quarter := date_trunc('quarter', p_start_quarter)::DATE;
v_end_quarter := date_trunc('quarter', NOW() + (p_quarters_ahead * 3 || ' months')::INTERVAL)::DATE;
WHILE v_current_quarter <= v_end_quarter LOOP
-- Generate suffix like 2025_Q1, 2025_Q2, etc.
v_suffix := to_char(v_current_quarter, 'YYYY') || '_Q' ||
EXTRACT(QUARTER FROM v_current_quarter)::TEXT;
v_partition_name := partition_mgmt.create_partition(
p_schema_name,
p_table_name,
p_partition_column,
v_current_quarter,
(v_current_quarter + INTERVAL '3 months')::DATE,
v_suffix
);
RETURN NEXT v_partition_name;
v_current_quarter := (v_current_quarter + INTERVAL '3 months')::DATE;
END LOOP;
END;
$$;
-- ============================================================================
-- Step 6: Ensure future partitions exist
-- ============================================================================
CREATE OR REPLACE FUNCTION partition_mgmt.ensure_future_partitions(
p_schema_name TEXT,
p_table_name TEXT,
p_months_ahead INT
)
RETURNS INT
LANGUAGE plpgsql
AS $$
DECLARE
v_partition_key TEXT;
v_partition_type TEXT;
v_months_ahead INT;
v_created INT := 0;
v_current DATE;
v_end DATE;
v_suffix TEXT;
v_partition_name TEXT;
BEGIN
SELECT partition_key, partition_type, months_ahead
INTO v_partition_key, v_partition_type, v_months_ahead
FROM partition_mgmt.managed_tables
WHERE schema_name = p_schema_name
AND table_name = p_table_name;
IF v_partition_key IS NULL THEN
RETURN 0;
END IF;
IF p_months_ahead IS NOT NULL AND p_months_ahead > 0 THEN
v_months_ahead := p_months_ahead;
END IF;
IF v_months_ahead IS NULL OR v_months_ahead <= 0 THEN
RETURN 0;
END IF;
v_partition_type := lower(coalesce(v_partition_type, 'monthly'));
IF v_partition_type = 'monthly' THEN
v_current := date_trunc('month', NOW())::DATE;
v_end := date_trunc('month', NOW() + (v_months_ahead || ' months')::INTERVAL)::DATE;
WHILE v_current <= v_end LOOP
v_partition_name := format('%s_%s', p_table_name, to_char(v_current, 'YYYY_MM'));
IF NOT EXISTS (
SELECT 1 FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = p_schema_name AND c.relname = v_partition_name
) THEN
PERFORM partition_mgmt.create_partition(
p_schema_name,
p_table_name,
v_partition_key,
v_current,
(v_current + INTERVAL '1 month')::DATE
);
v_created := v_created + 1;
END IF;
v_current := (v_current + INTERVAL '1 month')::DATE;
END LOOP;
ELSIF v_partition_type = 'quarterly' THEN
v_current := date_trunc('quarter', NOW())::DATE;
v_end := date_trunc('quarter', NOW() + (v_months_ahead || ' months')::INTERVAL)::DATE;
WHILE v_current <= v_end LOOP
v_suffix := to_char(v_current, 'YYYY') || '_Q' ||
EXTRACT(QUARTER FROM v_current)::TEXT;
v_partition_name := format('%s_%s', p_table_name, v_suffix);
IF NOT EXISTS (
SELECT 1 FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = p_schema_name AND c.relname = v_partition_name
) THEN
PERFORM partition_mgmt.create_partition(
p_schema_name,
p_table_name,
v_partition_key,
v_current,
(v_current + INTERVAL '3 months')::DATE,
v_suffix
);
v_created := v_created + 1;
END IF;
v_current := (v_current + INTERVAL '3 months')::DATE;
END LOOP;
END IF;
RETURN v_created;
END;
$$;
-- ============================================================================
-- Step 7: Retention enforcement function
-- ============================================================================
CREATE OR REPLACE FUNCTION partition_mgmt.enforce_retention(
p_schema_name TEXT,
p_table_name TEXT,
p_retention_months INT
)
RETURNS INT
LANGUAGE plpgsql
AS $$
DECLARE
v_retention_months INT;
v_cutoff_date DATE;
v_partition RECORD;
v_dropped INT := 0;
BEGIN
SELECT retention_months
INTO v_retention_months
FROM partition_mgmt.managed_tables
WHERE schema_name = p_schema_name
AND table_name = p_table_name;
IF p_retention_months IS NOT NULL AND p_retention_months > 0 THEN
v_retention_months := p_retention_months;
END IF;
IF v_retention_months IS NULL OR v_retention_months <= 0 THEN
RETURN 0;
END IF;
v_cutoff_date := (NOW() - (v_retention_months || ' months')::INTERVAL)::DATE;
FOR v_partition IN
SELECT partition_name, partition_end
FROM partition_mgmt.partition_stats
WHERE schema_name = p_schema_name
AND table_name = p_table_name
LOOP
IF v_partition.partition_end IS NOT NULL AND v_partition.partition_end < v_cutoff_date THEN
EXECUTE format('DROP TABLE IF EXISTS %I.%I', p_schema_name, v_partition.partition_name);
v_dropped := v_dropped + 1;
END IF;
END LOOP;
RETURN v_dropped;
END;
$$;
-- ============================================================================
-- Step 8: Partition detach and archive function
-- ============================================================================
CREATE OR REPLACE FUNCTION partition_mgmt.detach_partition(
p_schema_name TEXT,
p_table_name TEXT,
p_partition_name TEXT,
p_archive_schema TEXT DEFAULT 'archive'
)
RETURNS BOOLEAN
LANGUAGE plpgsql
AS $$
DECLARE
v_parent_table TEXT;
v_partition_full TEXT;
v_archive_table TEXT;
BEGIN
v_parent_table := format('%I.%I', p_schema_name, p_table_name);
v_partition_full := format('%I.%I', p_schema_name, p_partition_name);
v_archive_table := format('%I.%I', p_archive_schema, p_partition_name);
-- Create archive schema if not exists
EXECUTE format('CREATE SCHEMA IF NOT EXISTS %I', p_archive_schema);
-- Detach partition
EXECUTE format(
'ALTER TABLE %s DETACH PARTITION %s',
v_parent_table,
v_partition_full
);
-- Move to archive schema
EXECUTE format(
'ALTER TABLE %s SET SCHEMA %I',
v_partition_full,
p_archive_schema
);
RAISE NOTICE 'Detached and archived partition % to %', p_partition_name, v_archive_table;
RETURN TRUE;
EXCEPTION
WHEN OTHERS THEN
RAISE WARNING 'Failed to detach partition %: %', p_partition_name, SQLERRM;
RETURN FALSE;
END;
$$;
-- ============================================================================
-- Step 9: Partition retention cleanup function
-- ============================================================================
CREATE OR REPLACE FUNCTION partition_mgmt.cleanup_old_partitions(
p_schema_name TEXT,
p_table_name TEXT,
p_retention_months INT,
p_archive_schema TEXT DEFAULT 'archive',
p_dry_run BOOLEAN DEFAULT TRUE
)
RETURNS TABLE(partition_name TEXT, action TEXT)
LANGUAGE plpgsql
AS $$
DECLARE
v_cutoff_date DATE;
v_partition RECORD;
v_partition_end DATE;
BEGIN
v_cutoff_date := (NOW() - (p_retention_months || ' months')::INTERVAL)::DATE;
FOR v_partition IN
SELECT c.relname as name,
pg_get_expr(c.relpartbound, c.oid) as bound_expr
FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
JOIN pg_inherits i ON c.oid = i.inhrelid
JOIN pg_class parent ON i.inhparent = parent.oid
WHERE n.nspname = p_schema_name
AND parent.relname = p_table_name
AND c.relkind = 'r'
LOOP
-- Parse the partition bound to get end date
-- Format: FOR VALUES FROM ('2024-01-01') TO ('2024-02-01')
v_partition_end := (regexp_match(v_partition.bound_expr,
'TO \(''([^'']+)''\)'))[1]::DATE;
IF v_partition_end IS NOT NULL AND v_partition_end < v_cutoff_date THEN
partition_name := v_partition.name;
IF p_dry_run THEN
action := 'WOULD_ARCHIVE';
ELSE
IF partition_mgmt.detach_partition(
p_schema_name, p_table_name, v_partition.name, p_archive_schema
) THEN
action := 'ARCHIVED';
ELSE
action := 'FAILED';
END IF;
END IF;
RETURN NEXT;
END IF;
END LOOP;
END;
$$;
-- ============================================================================
-- Step 10: Partition statistics view
-- ============================================================================
CREATE OR REPLACE VIEW partition_mgmt.partition_stats AS
SELECT
n.nspname AS schema_name,
parent.relname AS table_name,
c.relname AS partition_name,
pg_get_expr(c.relpartbound, c.oid) AS partition_range,
(regexp_match(pg_get_expr(c.relpartbound, c.oid), 'FROM \(''([^'']+)''\)'))[1]::DATE AS partition_start,
(regexp_match(pg_get_expr(c.relpartbound, c.oid), 'TO \(''([^'']+)''\)'))[1]::DATE AS partition_end,
pg_size_pretty(pg_relation_size(c.oid)) AS size,
pg_relation_size(c.oid) AS size_bytes,
COALESCE(s.n_live_tup, 0) AS estimated_rows,
s.last_vacuum,
s.last_autovacuum,
s.last_analyze,
s.last_autoanalyze
FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
JOIN pg_inherits i ON c.oid = i.inhrelid
JOIN pg_class parent ON i.inhparent = parent.oid
LEFT JOIN pg_stat_user_tables s ON c.oid = s.relid
WHERE c.relkind = 'r'
AND parent.relkind = 'p'
ORDER BY n.nspname, parent.relname, c.relname;
COMMENT ON VIEW partition_mgmt.partition_stats IS
'Statistics for all partitioned tables in the database';
-- ============================================================================
-- Step 11: BRIN index optimization helper
-- ============================================================================
CREATE OR REPLACE FUNCTION partition_mgmt.create_brin_index_if_not_exists(
p_schema_name TEXT,
p_table_name TEXT,
p_column_name TEXT,
p_pages_per_range INT DEFAULT 128
)
RETURNS BOOLEAN
LANGUAGE plpgsql
AS $$
DECLARE
v_index_name TEXT;
v_sql TEXT;
BEGIN
v_index_name := format('brin_%s_%s', p_table_name, p_column_name);
-- Check if index exists
IF EXISTS (
SELECT 1 FROM pg_indexes
WHERE schemaname = p_schema_name AND indexname = v_index_name
) THEN
RAISE NOTICE 'BRIN index % already exists', v_index_name;
RETURN FALSE;
END IF;
v_sql := format(
'CREATE INDEX %I ON %I.%I USING brin (%I) WITH (pages_per_range = %s)',
v_index_name,
p_schema_name,
p_table_name,
p_column_name,
p_pages_per_range
);
EXECUTE v_sql;
RAISE NOTICE 'Created BRIN index % on %.%(%)',
v_index_name, p_schema_name, p_table_name, p_column_name;
RETURN TRUE;
END;
$$;
-- ============================================================================
-- Step 12: Maintenance job tracking table
-- ============================================================================
CREATE TABLE IF NOT EXISTS partition_mgmt.maintenance_log (
id BIGSERIAL PRIMARY KEY,
operation TEXT NOT NULL,
schema_name TEXT NOT NULL,
table_name TEXT NOT NULL,
partition_name TEXT,
status TEXT NOT NULL DEFAULT 'started',
details JSONB NOT NULL DEFAULT '{}',
started_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
completed_at TIMESTAMPTZ,
error_message TEXT
);
CREATE INDEX idx_maintenance_log_table ON partition_mgmt.maintenance_log(schema_name, table_name);
CREATE INDEX idx_maintenance_log_status ON partition_mgmt.maintenance_log(status, started_at);
-- ============================================================================
-- Step 13: Archive schema for detached partitions
-- ============================================================================
CREATE SCHEMA IF NOT EXISTS archive;
COMMENT ON SCHEMA archive IS
'Storage for detached/archived partitions awaiting deletion or offload';
COMMIT;
-- ============================================================================
-- Usage Examples (commented out)
-- ============================================================================
/*
-- Create monthly partitions for audit table, 3 months ahead
SELECT partition_mgmt.create_monthly_partitions(
'scheduler', 'audit', 'created_at', '2024-01-01'::DATE, 3
);
-- Preview old partitions that would be archived (dry run)
SELECT * FROM partition_mgmt.cleanup_old_partitions(
'scheduler', 'audit', 12, 'archive', TRUE
);
-- Actually archive old partitions
SELECT * FROM partition_mgmt.cleanup_old_partitions(
'scheduler', 'audit', 12, 'archive', FALSE
);
-- View partition statistics
SELECT * FROM partition_mgmt.partition_stats
WHERE schema_name = 'scheduler'
ORDER BY table_name, partition_name;
*/

View File

@@ -1,143 +0,0 @@
-- Migration: Trust Vector Calibration Schema
-- Sprint: 7100.0002.0002
-- Description: Creates schema and tables for trust vector calibration system
-- Create calibration schema
CREATE SCHEMA IF NOT EXISTS excititor_calibration;
-- Calibration manifests table
-- Stores signed manifests for each calibration epoch
CREATE TABLE IF NOT EXISTS excititor_calibration.calibration_manifests (
manifest_id TEXT PRIMARY KEY,
tenant_id TEXT NOT NULL,
epoch_number INTEGER NOT NULL,
epoch_start_utc TIMESTAMP NOT NULL,
epoch_end_utc TIMESTAMP NOT NULL,
sample_count INTEGER NOT NULL,
learning_rate DOUBLE PRECISION NOT NULL,
policy_hash TEXT,
lattice_version TEXT NOT NULL,
manifest_json JSONB NOT NULL,
signature_envelope JSONB,
created_at_utc TIMESTAMP NOT NULL DEFAULT (NOW() AT TIME ZONE 'UTC'),
created_by TEXT NOT NULL,
CONSTRAINT uq_calibration_manifest_tenant_epoch UNIQUE (tenant_id, epoch_number)
);
CREATE INDEX idx_calibration_manifests_tenant
ON excititor_calibration.calibration_manifests(tenant_id);
CREATE INDEX idx_calibration_manifests_created
ON excititor_calibration.calibration_manifests(created_at_utc DESC);
-- Trust vector adjustments table
-- Records each provider's trust vector changes per epoch
CREATE TABLE IF NOT EXISTS excititor_calibration.trust_vector_adjustments (
adjustment_id BIGSERIAL PRIMARY KEY,
manifest_id TEXT NOT NULL REFERENCES excititor_calibration.calibration_manifests(manifest_id),
source_id TEXT NOT NULL,
old_provenance DOUBLE PRECISION NOT NULL,
old_coverage DOUBLE PRECISION NOT NULL,
old_replayability DOUBLE PRECISION NOT NULL,
new_provenance DOUBLE PRECISION NOT NULL,
new_coverage DOUBLE PRECISION NOT NULL,
new_replayability DOUBLE PRECISION NOT NULL,
adjustment_magnitude DOUBLE PRECISION NOT NULL,
confidence_in_adjustment DOUBLE PRECISION NOT NULL,
sample_count_for_source INTEGER NOT NULL,
created_at_utc TIMESTAMP NOT NULL DEFAULT (NOW() AT TIME ZONE 'UTC'),
CONSTRAINT chk_old_provenance_range CHECK (old_provenance >= 0 AND old_provenance <= 1),
CONSTRAINT chk_old_coverage_range CHECK (old_coverage >= 0 AND old_coverage <= 1),
CONSTRAINT chk_old_replayability_range CHECK (old_replayability >= 0 AND old_replayability <= 1),
CONSTRAINT chk_new_provenance_range CHECK (new_provenance >= 0 AND new_provenance <= 1),
CONSTRAINT chk_new_coverage_range CHECK (new_coverage >= 0 AND new_coverage <= 1),
CONSTRAINT chk_new_replayability_range CHECK (new_replayability >= 0 AND new_replayability <= 1),
CONSTRAINT chk_confidence_range CHECK (confidence_in_adjustment >= 0 AND confidence_in_adjustment <= 1)
);
CREATE INDEX idx_trust_adjustments_manifest
ON excititor_calibration.trust_vector_adjustments(manifest_id);
CREATE INDEX idx_trust_adjustments_source
ON excititor_calibration.trust_vector_adjustments(source_id);
-- Calibration feedback samples table
-- Stores empirical evidence used for calibration
CREATE TABLE IF NOT EXISTS excititor_calibration.calibration_samples (
sample_id BIGSERIAL PRIMARY KEY,
tenant_id TEXT NOT NULL,
source_id TEXT NOT NULL,
cve_id TEXT NOT NULL,
purl TEXT NOT NULL,
expected_status TEXT NOT NULL,
actual_status TEXT NOT NULL,
verdict_confidence DOUBLE PRECISION NOT NULL,
is_match BOOLEAN NOT NULL,
feedback_source TEXT NOT NULL, -- 'reachability', 'customer_feedback', 'integration_tests'
feedback_weight DOUBLE PRECISION NOT NULL DEFAULT 1.0,
scan_id TEXT,
collected_at_utc TIMESTAMP NOT NULL DEFAULT (NOW() AT TIME ZONE 'UTC'),
processed BOOLEAN NOT NULL DEFAULT FALSE,
processed_in_manifest_id TEXT REFERENCES excititor_calibration.calibration_manifests(manifest_id),
CONSTRAINT chk_verdict_confidence_range CHECK (verdict_confidence >= 0 AND verdict_confidence <= 1),
CONSTRAINT chk_feedback_weight_range CHECK (feedback_weight >= 0 AND feedback_weight <= 1)
);
CREATE INDEX idx_calibration_samples_tenant
ON excititor_calibration.calibration_samples(tenant_id);
CREATE INDEX idx_calibration_samples_source
ON excititor_calibration.calibration_samples(source_id);
CREATE INDEX idx_calibration_samples_collected
ON excititor_calibration.calibration_samples(collected_at_utc DESC);
CREATE INDEX idx_calibration_samples_processed
ON excititor_calibration.calibration_samples(processed) WHERE NOT processed;
-- Calibration metrics table
-- Tracks performance metrics per source/severity/status
CREATE TABLE IF NOT EXISTS excititor_calibration.calibration_metrics (
metric_id BIGSERIAL PRIMARY KEY,
manifest_id TEXT NOT NULL REFERENCES excititor_calibration.calibration_manifests(manifest_id),
source_id TEXT,
severity TEXT,
status TEXT,
precision DOUBLE PRECISION NOT NULL,
recall DOUBLE PRECISION NOT NULL,
f1_score DOUBLE PRECISION NOT NULL,
false_positive_rate DOUBLE PRECISION NOT NULL,
false_negative_rate DOUBLE PRECISION NOT NULL,
sample_count INTEGER NOT NULL,
created_at_utc TIMESTAMP NOT NULL DEFAULT (NOW() AT TIME ZONE 'UTC'),
CONSTRAINT chk_precision_range CHECK (precision >= 0 AND precision <= 1),
CONSTRAINT chk_recall_range CHECK (recall >= 0 AND recall <= 1),
CONSTRAINT chk_f1_range CHECK (f1_score >= 0 AND f1_score <= 1),
CONSTRAINT chk_fpr_range CHECK (false_positive_rate >= 0 AND false_positive_rate <= 1),
CONSTRAINT chk_fnr_range CHECK (false_negative_rate >= 0 AND false_negative_rate <= 1)
);
CREATE INDEX idx_calibration_metrics_manifest
ON excititor_calibration.calibration_metrics(manifest_id);
CREATE INDEX idx_calibration_metrics_source
ON excititor_calibration.calibration_metrics(source_id) WHERE source_id IS NOT NULL;
-- Grant permissions to excititor service role
DO $$
BEGIN
IF EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'excititor_service') THEN
GRANT USAGE ON SCHEMA excititor_calibration TO excititor_service;
GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA excititor_calibration TO excititor_service;
GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA excititor_calibration TO excititor_service;
ALTER DEFAULT PRIVILEGES IN SCHEMA excititor_calibration
GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO excititor_service;
ALTER DEFAULT PRIVILEGES IN SCHEMA excititor_calibration
GRANT USAGE, SELECT ON SEQUENCES TO excititor_service;
END IF;
END $$;
-- Comments for documentation
COMMENT ON SCHEMA excititor_calibration IS 'Trust vector calibration data for VEX source scoring';
COMMENT ON TABLE excititor_calibration.calibration_manifests IS 'Signed calibration epoch results';
COMMENT ON TABLE excititor_calibration.trust_vector_adjustments IS 'Per-source trust vector changes per epoch';
COMMENT ON TABLE excititor_calibration.calibration_samples IS 'Empirical feedback samples for calibration';
COMMENT ON TABLE excititor_calibration.calibration_metrics IS 'Performance metrics per calibration epoch';

View File

@@ -1,97 +0,0 @@
-- Provcache schema migration
-- Run as: psql -d stellaops -f create_provcache_schema.sql
-- Create schema
CREATE SCHEMA IF NOT EXISTS provcache;
-- Main cache items table
CREATE TABLE IF NOT EXISTS provcache.provcache_items (
verikey TEXT PRIMARY KEY,
digest_version TEXT NOT NULL DEFAULT 'v1',
verdict_hash TEXT NOT NULL,
proof_root TEXT NOT NULL,
replay_seed JSONB NOT NULL,
policy_hash TEXT NOT NULL,
signer_set_hash TEXT NOT NULL,
feed_epoch TEXT NOT NULL,
trust_score INTEGER NOT NULL CHECK (trust_score >= 0 AND trust_score <= 100),
hit_count BIGINT NOT NULL DEFAULT 0,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
expires_at TIMESTAMPTZ NOT NULL,
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
last_accessed_at TIMESTAMPTZ,
-- Constraint: expires_at must be after created_at
CONSTRAINT provcache_items_expires_check CHECK (expires_at > created_at)
);
-- Indexes for invalidation queries
CREATE INDEX IF NOT EXISTS idx_provcache_policy_hash
ON provcache.provcache_items(policy_hash);
CREATE INDEX IF NOT EXISTS idx_provcache_signer_set_hash
ON provcache.provcache_items(signer_set_hash);
CREATE INDEX IF NOT EXISTS idx_provcache_feed_epoch
ON provcache.provcache_items(feed_epoch);
CREATE INDEX IF NOT EXISTS idx_provcache_expires_at
ON provcache.provcache_items(expires_at);
CREATE INDEX IF NOT EXISTS idx_provcache_created_at
ON provcache.provcache_items(created_at);
-- Evidence chunks table for large evidence storage
CREATE TABLE IF NOT EXISTS provcache.prov_evidence_chunks (
chunk_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
proof_root TEXT NOT NULL,
chunk_index INTEGER NOT NULL,
chunk_hash TEXT NOT NULL,
blob BYTEA NOT NULL,
blob_size INTEGER NOT NULL,
content_type TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT prov_evidence_chunks_unique_index
UNIQUE (proof_root, chunk_index)
);
CREATE INDEX IF NOT EXISTS idx_prov_chunks_proof_root
ON provcache.prov_evidence_chunks(proof_root);
-- Revocation audit log
CREATE TABLE IF NOT EXISTS provcache.prov_revocations (
revocation_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
revocation_type TEXT NOT NULL,
target_hash TEXT NOT NULL,
reason TEXT,
actor TEXT,
entries_affected BIGINT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_prov_revocations_created_at
ON provcache.prov_revocations(created_at);
CREATE INDEX IF NOT EXISTS idx_prov_revocations_target_hash
ON provcache.prov_revocations(target_hash);
-- Function to update updated_at timestamp
CREATE OR REPLACE FUNCTION provcache.update_updated_at_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ language 'plpgsql';
-- Trigger for auto-updating updated_at
DROP TRIGGER IF EXISTS update_provcache_items_updated_at ON provcache.provcache_items;
CREATE TRIGGER update_provcache_items_updated_at
BEFORE UPDATE ON provcache.provcache_items
FOR EACH ROW
EXECUTE FUNCTION provcache.update_updated_at_column();
-- Grant permissions (adjust role as needed)
-- GRANT USAGE ON SCHEMA provcache TO stellaops_app;
-- GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA provcache TO stellaops_app;
-- GRANT USAGE ON ALL SEQUENCES IN SCHEMA provcache TO stellaops_app;
COMMENT ON TABLE provcache.provcache_items IS 'Provenance cache entries for cached security decisions';
COMMENT ON TABLE provcache.prov_evidence_chunks IS 'Chunked evidence storage for large SBOMs and attestations';
COMMENT ON TABLE provcache.prov_revocations IS 'Audit log of cache invalidation events';

View File

@@ -1,159 +0,0 @@
-- RLS Validation Script
-- Sprint: SPRINT_3421_0001_0001 - RLS Expansion
--
-- Purpose: Verify that RLS is properly configured on all tenant-scoped tables
-- Run this script after deploying RLS migrations to validate configuration
-- ============================================================================
-- Part 1: List all tables with RLS status
-- ============================================================================
\echo '=== RLS Status for All Schemas ==='
SELECT
schemaname AS schema,
tablename AS table_name,
rowsecurity AS rls_enabled,
forcerowsecurity AS rls_forced,
CASE
WHEN rowsecurity AND forcerowsecurity THEN 'OK'
WHEN rowsecurity AND NOT forcerowsecurity THEN 'WARN: Not forced'
ELSE 'MISSING'
END AS status
FROM pg_tables
WHERE schemaname IN ('scheduler', 'notify', 'authority', 'vex', 'policy', 'unknowns')
ORDER BY schemaname, tablename;
-- ============================================================================
-- Part 2: List all RLS policies
-- ============================================================================
\echo ''
\echo '=== RLS Policies ==='
SELECT
schemaname AS schema,
tablename AS table_name,
policyname AS policy_name,
permissive,
roles,
cmd AS applies_to,
qual IS NOT NULL AS has_using,
with_check IS NOT NULL AS has_check
FROM pg_policies
WHERE schemaname IN ('scheduler', 'notify', 'authority', 'vex', 'policy', 'unknowns')
ORDER BY schemaname, tablename, policyname;
-- ============================================================================
-- Part 3: Tables missing RLS that should have it (have tenant_id column)
-- ============================================================================
\echo ''
\echo '=== Tables with tenant_id but NO RLS ==='
SELECT
c.table_schema AS schema,
c.table_name AS table_name,
'MISSING RLS' AS issue
FROM information_schema.columns c
JOIN pg_tables t ON c.table_schema = t.schemaname AND c.table_name = t.tablename
WHERE c.column_name IN ('tenant_id', 'tenant')
AND c.table_schema IN ('scheduler', 'notify', 'authority', 'vex', 'policy', 'unknowns')
AND NOT t.rowsecurity
ORDER BY c.table_schema, c.table_name;
-- ============================================================================
-- Part 4: Verify helper functions exist
-- ============================================================================
\echo ''
\echo '=== RLS Helper Functions ==='
SELECT
n.nspname AS schema,
p.proname AS function_name,
CASE
WHEN p.prosecdef THEN 'SECURITY DEFINER'
ELSE 'SECURITY INVOKER'
END AS security,
CASE
WHEN p.provolatile = 's' THEN 'STABLE'
WHEN p.provolatile = 'i' THEN 'IMMUTABLE'
ELSE 'VOLATILE'
END AS volatility
FROM pg_proc p
JOIN pg_namespace n ON p.pronamespace = n.oid
WHERE p.proname = 'require_current_tenant'
AND n.nspname LIKE '%_app'
ORDER BY n.nspname;
-- ============================================================================
-- Part 5: Test RLS enforcement (expect failure without tenant context)
-- ============================================================================
\echo ''
\echo '=== RLS Enforcement Test ==='
\echo 'Testing RLS on scheduler.runs (should fail without tenant context)...'
-- Reset tenant context
SELECT set_config('app.tenant_id', '', false);
DO $$
BEGIN
-- This should raise an exception if RLS is working
PERFORM * FROM scheduler.runs LIMIT 1;
RAISE NOTICE 'WARNING: Query succeeded without tenant context - RLS may not be working!';
EXCEPTION
WHEN OTHERS THEN
RAISE NOTICE 'OK: RLS blocked query without tenant context: %', SQLERRM;
END
$$;
-- ============================================================================
-- Part 6: Admin bypass role verification
-- ============================================================================
\echo ''
\echo '=== Admin Bypass Roles ==='
SELECT
rolname AS role_name,
rolbypassrls AS can_bypass_rls,
rolcanlogin AS can_login
FROM pg_roles
WHERE rolname LIKE '%_admin'
AND rolbypassrls = TRUE
ORDER BY rolname;
-- ============================================================================
-- Summary
-- ============================================================================
\echo ''
\echo '=== Summary ==='
SELECT
'Total Tables' AS metric,
COUNT(*)::TEXT AS value
FROM pg_tables
WHERE schemaname IN ('scheduler', 'notify', 'authority', 'vex', 'policy', 'unknowns')
UNION ALL
SELECT
'Tables with RLS Enabled',
COUNT(*)::TEXT
FROM pg_tables
WHERE schemaname IN ('scheduler', 'notify', 'authority', 'vex', 'policy', 'unknowns')
AND rowsecurity = TRUE
UNION ALL
SELECT
'Tables with RLS Forced',
COUNT(*)::TEXT
FROM pg_tables
WHERE schemaname IN ('scheduler', 'notify', 'authority', 'vex', 'policy', 'unknowns')
AND forcerowsecurity = TRUE
UNION ALL
SELECT
'Active Policies',
COUNT(*)::TEXT
FROM pg_policies
WHERE schemaname IN ('scheduler', 'notify', 'authority', 'vex', 'policy', 'unknowns');

View File

@@ -1,238 +0,0 @@
-- Partition Validation Script
-- Sprint: SPRINT_3422_0001_0001 - Time-Based Partitioning
--
-- Purpose: Verify that partitioned tables are properly configured and healthy
-- ============================================================================
-- Part 1: List all partitioned tables
-- ============================================================================
\echo '=== Partitioned Tables ==='
SELECT
n.nspname AS schema,
c.relname AS table_name,
CASE pt.partstrat
WHEN 'r' THEN 'RANGE'
WHEN 'l' THEN 'LIST'
WHEN 'h' THEN 'HASH'
END AS partition_strategy,
array_to_string(array_agg(a.attname ORDER BY k.col), ', ') AS partition_key
FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
JOIN pg_partitioned_table pt ON c.oid = pt.partrelid
JOIN LATERAL unnest(pt.partattrs) WITH ORDINALITY AS k(col, idx) ON true
LEFT JOIN pg_attribute a ON a.attrelid = c.oid AND a.attnum = k.col
WHERE n.nspname IN ('scheduler', 'notify', 'authority', 'vex', 'policy', 'unknowns', 'vuln')
GROUP BY n.nspname, c.relname, pt.partstrat
ORDER BY n.nspname, c.relname;
-- ============================================================================
-- Part 2: Partition inventory with sizes
-- ============================================================================
\echo ''
\echo '=== Partition Inventory ==='
SELECT
n.nspname AS schema,
parent.relname AS parent_table,
c.relname AS partition_name,
pg_get_expr(c.relpartbound, c.oid) AS bounds,
pg_size_pretty(pg_relation_size(c.oid)) AS size,
s.n_live_tup AS estimated_rows
FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
JOIN pg_inherits i ON c.oid = i.inhrelid
JOIN pg_class parent ON i.inhparent = parent.oid
LEFT JOIN pg_stat_user_tables s ON c.oid = s.relid
WHERE n.nspname IN ('scheduler', 'notify', 'authority', 'vex', 'policy', 'unknowns', 'vuln')
AND c.relkind = 'r'
AND parent.relkind = 'p'
ORDER BY n.nspname, parent.relname, c.relname;
-- ============================================================================
-- Part 3: Check for missing future partitions
-- ============================================================================
\echo ''
\echo '=== Future Partition Coverage ==='
WITH partition_bounds AS (
SELECT
n.nspname AS schema_name,
parent.relname AS table_name,
c.relname AS partition_name,
-- Extract the TO date from partition bound
(regexp_match(pg_get_expr(c.relpartbound, c.oid), 'TO \(''([^'']+)''\)'))[1]::DATE AS end_date
FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
JOIN pg_inherits i ON c.oid = i.inhrelid
JOIN pg_class parent ON i.inhparent = parent.oid
WHERE c.relkind = 'r'
AND parent.relkind = 'p'
AND c.relname NOT LIKE '%_default'
),
max_bounds AS (
SELECT
schema_name,
table_name,
MAX(end_date) AS max_partition_date
FROM partition_bounds
WHERE end_date IS NOT NULL
GROUP BY schema_name, table_name
)
SELECT
schema_name,
table_name,
max_partition_date,
(max_partition_date - CURRENT_DATE) AS days_ahead,
CASE
WHEN (max_partition_date - CURRENT_DATE) < 30 THEN 'CRITICAL: Create partitions!'
WHEN (max_partition_date - CURRENT_DATE) < 60 THEN 'WARNING: Running low'
ELSE 'OK'
END AS status
FROM max_bounds
ORDER BY days_ahead;
-- ============================================================================
-- Part 4: Check for orphaned data in default partitions
-- ============================================================================
\echo ''
\echo '=== Default Partition Data (should be empty) ==='
DO $$
DECLARE
v_schema TEXT;
v_table TEXT;
v_count BIGINT;
v_sql TEXT;
BEGIN
FOR v_schema, v_table IN
SELECT n.nspname, c.relname
FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
WHERE c.relname LIKE '%_default'
AND n.nspname IN ('scheduler', 'notify', 'authority', 'vex', 'policy', 'unknowns', 'vuln')
LOOP
v_sql := format('SELECT COUNT(*) FROM %I.%I', v_schema, v_table);
EXECUTE v_sql INTO v_count;
IF v_count > 0 THEN
RAISE NOTICE 'WARNING: %.% has % rows in default partition!',
v_schema, v_table, v_count;
ELSE
RAISE NOTICE 'OK: %.% is empty', v_schema, v_table;
END IF;
END LOOP;
END
$$;
-- ============================================================================
-- Part 5: Index health on partitions
-- ============================================================================
\echo ''
\echo '=== Partition Index Coverage ==='
SELECT
schemaname AS schema,
tablename AS table_name,
indexname AS index_name,
indexdef
FROM pg_indexes
WHERE schemaname IN ('scheduler', 'notify', 'authority', 'vex', 'policy', 'unknowns', 'vuln')
AND tablename LIKE '%_partitioned' OR tablename LIKE '%_202%'
ORDER BY schemaname, tablename, indexname;
-- ============================================================================
-- Part 6: BRIN index effectiveness check
-- ============================================================================
\echo ''
\echo '=== BRIN Index Statistics ==='
SELECT
schemaname AS schema,
tablename AS table_name,
indexrelname AS index_name,
idx_scan AS scans,
idx_tup_read AS tuples_read,
idx_tup_fetch AS tuples_fetched,
pg_size_pretty(pg_relation_size(indexrelid)) AS index_size
FROM pg_stat_user_indexes
WHERE indexrelname LIKE 'brin_%'
ORDER BY schemaname, tablename;
-- ============================================================================
-- Part 7: Partition maintenance recommendations
-- ============================================================================
\echo ''
\echo '=== Maintenance Recommendations ==='
WITH partition_ages AS (
SELECT
n.nspname AS schema_name,
parent.relname AS table_name,
c.relname AS partition_name,
(regexp_match(pg_get_expr(c.relpartbound, c.oid), 'FROM \(''([^'']+)''\)'))[1]::DATE AS start_date,
(regexp_match(pg_get_expr(c.relpartbound, c.oid), 'TO \(''([^'']+)''\)'))[1]::DATE AS end_date
FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
JOIN pg_inherits i ON c.oid = i.inhrelid
JOIN pg_class parent ON i.inhparent = parent.oid
WHERE c.relkind = 'r'
AND parent.relkind = 'p'
AND c.relname NOT LIKE '%_default'
)
SELECT
schema_name,
table_name,
partition_name,
start_date,
end_date,
(CURRENT_DATE - end_date) AS days_old,
CASE
WHEN (CURRENT_DATE - end_date) > 365 THEN 'Consider archiving (>1 year old)'
WHEN (CURRENT_DATE - end_date) > 180 THEN 'Review retention policy (>6 months old)'
ELSE 'Current'
END AS recommendation
FROM partition_ages
WHERE start_date IS NOT NULL
ORDER BY schema_name, table_name, start_date;
-- ============================================================================
-- Summary
-- ============================================================================
\echo ''
\echo '=== Summary ==='
SELECT
'Partitioned Tables' AS metric,
COUNT(DISTINCT parent.relname)::TEXT AS value
FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
JOIN pg_inherits i ON c.oid = i.inhrelid
JOIN pg_class parent ON i.inhparent = parent.oid
WHERE n.nspname IN ('scheduler', 'notify', 'authority', 'vex', 'policy', 'unknowns', 'vuln')
AND parent.relkind = 'p'
UNION ALL
SELECT
'Total Partitions',
COUNT(*)::TEXT
FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
JOIN pg_inherits i ON c.oid = i.inhrelid
JOIN pg_class parent ON i.inhparent = parent.oid
WHERE n.nspname IN ('scheduler', 'notify', 'authority', 'vex', 'policy', 'unknowns', 'vuln')
AND parent.relkind = 'p'
UNION ALL
SELECT
'BRIN Indexes',
COUNT(*)::TEXT
FROM pg_indexes
WHERE indexname LIKE 'brin_%'
AND schemaname IN ('scheduler', 'notify', 'authority', 'vex', 'policy', 'unknowns', 'vuln');

View File

@@ -1,66 +0,0 @@
# PostgreSQL 16 Cluster (staging / production)
This directory provisions StellaOps PostgreSQL clusters with **CloudNativePG (CNPG)**. It is pinned to Postgres 16.x, includes connection pooling (PgBouncer), Prometheus scraping, and S3-compatible backups. Everything is air-gap friendly: fetch the operator and images once, then render/apply manifests offline.
## Targets
- **Staging:** `stellaops-pg-stg` (2 instances, 200Gi data, WAL 64Gi, PgBouncer x2)
- **Production:** `stellaops-pg-prod` (3 instances, 500Gi data, WAL 128Gi, PgBouncer x3)
- **Namespace:** `platform-postgres`
## Prerequisites
- Kubernetes ≥ 1.27 with CSI storage classes `fast-ssd` (data) and `fast-wal` (WAL) available.
- CloudNativePG operator 1.23.x mirrored or downloaded to `artifacts/cloudnative-pg-1.23.0.yaml`.
- Images mirrored to your registry (example tags):
- `ghcr.io/cloudnative-pg/postgresql:16.4`
- `ghcr.io/cloudnative-pg/postgresql-operator:1.23.0`
- `ghcr.io/cloudnative-pg/pgbouncer:1.23.0`
- Secrets created from the templates under `ops/devops/postgres/secrets/` (superuser, app user, backup credentials).
## Render & Apply (deterministic)
```bash
# 1) Create namespace
kubectl apply -f ops/devops/postgres/namespace.yaml
# 2) Install operator (offline-friendly: use the pinned manifest you mirrored)
kubectl apply -f artifacts/cloudnative-pg-1.23.0.yaml
# 3) Create secrets (replace passwords/keys first)
kubectl apply -f ops/devops/postgres/secrets/example-superuser.yaml
kubectl apply -f ops/devops/postgres/secrets/example-app.yaml
kubectl apply -f ops/devops/postgres/secrets/example-backup-credentials.yaml
# 4) Apply the cluster and pooler for the target environment
kubectl apply -f ops/devops/postgres/cluster-staging.yaml
kubectl apply -f ops/devops/postgres/pooler-staging.yaml
# or
kubectl apply -f ops/devops/postgres/cluster-production.yaml
kubectl apply -f ops/devops/postgres/pooler-production.yaml
```
## Connection Endpoints
- RW service: `<cluster>-rw` (e.g., `stellaops-pg-stg-rw:5432`)
- RO service: `<cluster>-ro`
- PgBouncer pooler: `<pooler-name>` (e.g., `stellaops-pg-stg-pooler:6432`)
**Application connection string (matches library defaults):**
`Host=stellaops-pg-stg-pooler;Port=6432;Username=stellaops_app;Password=<app-password>;Database=stellaops;Pooling=true;Timeout=15;CommandTimeout=30;Ssl Mode=Require;`
## Monitoring & Backups
- `monitoring.enablePodMonitor: true` exposes PodMonitor for Prometheus Operator.
- Barman/S3 backups are enabled by default; set `backup.barmanObjectStore.destinationPath` per env and populate `stellaops-pg-backup` credentials.
- WAL compression is `gzip`; retention is operator-managed (configure via Barman bucket policies).
## Alignment with code defaults
- Session settings: UTC timezone, 30s `statement_timeout`, tenant context via `set_config('app.current_tenant', ...)`.
- Connection pooler uses **transaction** mode with a `server_reset_query` that clears session state, keeping RepositoryBase deterministic.
## Verification checklist
- `kubectl get cluster -n platform-postgres` shows `Ready` replicas matching `instances`.
- `kubectl logs deploy/cnpg-controller-manager -n cnpg-system` has no failing webhooks.
- `kubectl get podmonitor -n platform-postgres` returns entries for the cluster and pooler.
- `psql "<rw-connection-string>" -c 'select 1'` works from CI runner subnet.
- `cnpg` `barman-cloud-backup-list` shows successful full + WAL backups.
## Offline notes
- Mirror the operator manifest and container images to the approved registry first; no live downloads occur at runtime.
- If Prometheus is not present, leave PodMonitor applied; it is inert without the CRD.

View File

@@ -1,57 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: stellaops-pg-prod
namespace: platform-postgres
spec:
instances: 3
imageName: ghcr.io/cloudnative-pg/postgresql:16.4
primaryUpdateStrategy: unsupervised
storage:
size: 500Gi
storageClass: fast-ssd
walStorage:
size: 128Gi
storageClass: fast-wal
superuserSecret:
name: stellaops-pg-superuser
bootstrap:
initdb:
database: stellaops
owner: stellaops_app
secret:
name: stellaops-pg-app
monitoring:
enablePodMonitor: true
postgresql:
parameters:
max_connections: "900"
shared_buffers: "4096MB"
work_mem: "96MB"
maintenance_work_mem: "768MB"
wal_level: "replica"
max_wal_size: "4GB"
timezone: "UTC"
log_min_duration_statement: "250"
statement_timeout: "30000"
resources:
requests:
cpu: "4"
memory: "16Gi"
limits:
cpu: "8"
memory: "24Gi"
backup:
barmanObjectStore:
destinationPath: s3://stellaops-backups/production
s3Credentials:
accessKeyId:
name: stellaops-pg-backup
key: ACCESS_KEY_ID
secretAccessKey:
name: stellaops-pg-backup
key: SECRET_ACCESS_KEY
wal:
compression: gzip
maxParallel: 4
logLevel: info

View File

@@ -1,57 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: stellaops-pg-stg
namespace: platform-postgres
spec:
instances: 2
imageName: ghcr.io/cloudnative-pg/postgresql:16.4
primaryUpdateStrategy: unsupervised
storage:
size: 200Gi
storageClass: fast-ssd
walStorage:
size: 64Gi
storageClass: fast-wal
superuserSecret:
name: stellaops-pg-superuser
bootstrap:
initdb:
database: stellaops
owner: stellaops_app
secret:
name: stellaops-pg-app
monitoring:
enablePodMonitor: true
postgresql:
parameters:
max_connections: "600"
shared_buffers: "2048MB"
work_mem: "64MB"
maintenance_work_mem: "512MB"
wal_level: "replica"
max_wal_size: "2GB"
timezone: "UTC"
log_min_duration_statement: "500"
statement_timeout: "30000"
resources:
requests:
cpu: "2"
memory: "8Gi"
limits:
cpu: "4"
memory: "12Gi"
backup:
barmanObjectStore:
destinationPath: s3://stellaops-backups/staging
s3Credentials:
accessKeyId:
name: stellaops-pg-backup
key: ACCESS_KEY_ID
secretAccessKey:
name: stellaops-pg-backup
key: SECRET_ACCESS_KEY
wal:
compression: gzip
maxParallel: 2
logLevel: info

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: platform-postgres

View File

@@ -1,29 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: Pooler
metadata:
name: stellaops-pg-prod-pooler
namespace: platform-postgres
spec:
cluster:
name: stellaops-pg-prod
instances: 3
type: rw
pgbouncer:
parameters:
pool_mode: transaction
max_client_conn: "1500"
default_pool_size: "80"
server_reset_query: "RESET ALL; SET SESSION AUTHORIZATION DEFAULT; SET TIME ZONE 'UTC';"
authQuerySecret:
name: stellaops-pg-app
template:
spec:
containers:
- name: pgbouncer
resources:
requests:
cpu: "150m"
memory: "192Mi"
limits:
cpu: "750m"
memory: "384Mi"

View File

@@ -1,29 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: Pooler
metadata:
name: stellaops-pg-stg-pooler
namespace: platform-postgres
spec:
cluster:
name: stellaops-pg-stg
instances: 2
type: rw
pgbouncer:
parameters:
pool_mode: transaction
max_client_conn: "800"
default_pool_size: "50"
server_reset_query: "RESET ALL; SET SESSION AUTHORIZATION DEFAULT; SET TIME ZONE 'UTC';"
authQuerySecret:
name: stellaops-pg-app
template:
spec:
containers:
- name: pgbouncer
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "500m"
memory: "256Mi"

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: stellaops-pg-app
namespace: platform-postgres
type: kubernetes.io/basic-auth
stringData:
username: stellaops_app
password: CHANGEME_APP_PASSWORD

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: stellaops-pg-backup
namespace: platform-postgres
type: Opaque
stringData:
ACCESS_KEY_ID: CHANGEME_ACCESS_KEY
SECRET_ACCESS_KEY: CHANGEME_SECRET_KEY

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: stellaops-pg-superuser
namespace: platform-postgres
type: kubernetes.io/basic-auth
stringData:
username: postgres
password: CHANGEME_SUPERUSER_PASSWORD

View File

@@ -1,173 +0,0 @@
# Dockerfile.ci - Local CI testing container matching Gitea runner environment
# Sprint: SPRINT_20251226_006_CICD
#
# Usage:
# docker build -t stellaops-ci:local -f devops/docker/Dockerfile.ci .
# docker run --rm -v $(pwd):/src stellaops-ci:local ./devops/scripts/test-local.sh
FROM ubuntu:22.04
LABEL org.opencontainers.image.title="StellaOps CI"
LABEL org.opencontainers.image.description="Local CI testing environment matching Gitea runner"
LABEL org.opencontainers.image.source="https://git.stella-ops.org/stella-ops.org/git.stella-ops.org"
# Environment variables
ENV DEBIAN_FRONTEND=noninteractive
ENV DOTNET_VERSION=10.0.100
ENV NODE_VERSION=20
ENV HELM_VERSION=3.16.0
ENV COSIGN_VERSION=3.0.4
ENV REKOR_VERSION=1.4.3
ENV TZ=UTC
# Disable .NET telemetry
ENV DOTNET_NOLOGO=1
ENV DOTNET_CLI_TELEMETRY_OPTOUT=1
# .NET paths
ENV DOTNET_ROOT=/usr/share/dotnet
ENV PATH="/usr/share/dotnet:/root/.dotnet/tools:${PATH}"
# ===========================================================================
# BASE DEPENDENCIES
# ===========================================================================
RUN apt-get update && apt-get install -y --no-install-recommends \
# Core utilities
curl \
wget \
gnupg2 \
ca-certificates \
git \
unzip \
jq \
# Build tools
build-essential \
# Cross-compilation
binutils-aarch64-linux-gnu \
# Python (for scripts)
python3 \
python3-pip \
# .NET dependencies
libicu70 \
# Locales
locales \
&& rm -rf /var/lib/apt/lists/*
# ===========================================================================
# DOCKER CLI & COMPOSE (from official Docker repo)
# ===========================================================================
RUN install -m 0755 -d /etc/apt/keyrings \
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc \
&& chmod a+r /etc/apt/keyrings/docker.asc \
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu jammy stable" > /etc/apt/sources.list.d/docker.list \
&& apt-get update \
&& apt-get install -y --no-install-recommends docker-ce-cli docker-compose-plugin \
&& rm -rf /var/lib/apt/lists/* \
&& docker --version
# Set locale
RUN locale-gen en_US.UTF-8
ENV LANG=en_US.UTF-8
ENV LANGUAGE=en_US:en
ENV LC_ALL=en_US.UTF-8
# ===========================================================================
# POSTGRESQL CLIENT 16
# ===========================================================================
RUN curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor -o /usr/share/keyrings/postgresql-archive-keyring.gpg \
&& echo "deb [signed-by=/usr/share/keyrings/postgresql-archive-keyring.gpg] http://apt.postgresql.org/pub/repos/apt jammy-pgdg main" > /etc/apt/sources.list.d/pgdg.list \
&& apt-get update \
&& apt-get install -y --no-install-recommends postgresql-client-16 \
&& rm -rf /var/lib/apt/lists/*
# ===========================================================================
# .NET 10 SDK
# ===========================================================================
RUN curl -fsSL https://dot.net/v1/dotnet-install.sh -o /tmp/dotnet-install.sh \
&& chmod +x /tmp/dotnet-install.sh \
&& /tmp/dotnet-install.sh --version ${DOTNET_VERSION} --install-dir /usr/share/dotnet \
&& rm /tmp/dotnet-install.sh \
&& dotnet --version
# Install common .NET tools
RUN dotnet tool install -g trx2junit \
&& dotnet tool install -g dotnet-reportgenerator-globaltool
# ===========================================================================
# NODE.JS 20
# ===========================================================================
RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - \
&& apt-get install -y --no-install-recommends nodejs \
&& rm -rf /var/lib/apt/lists/* \
&& node --version \
&& npm --version
# ===========================================================================
# HELM 3.16.0
# ===========================================================================
RUN curl -fsSL https://get.helm.sh/helm-v${HELM_VERSION}-linux-amd64.tar.gz | \
tar -xzf - -C /tmp \
&& mv /tmp/linux-amd64/helm /usr/local/bin/helm \
&& rm -rf /tmp/linux-amd64 \
&& helm version
# ===========================================================================
# COSIGN
# ===========================================================================
RUN curl -fsSL https://github.com/sigstore/cosign/releases/download/v${COSIGN_VERSION}/cosign-linux-amd64 \
-o /usr/local/bin/cosign \
&& chmod +x /usr/local/bin/cosign \
&& cosign version
# ===========================================================================
# REKOR CLI
# ===========================================================================
RUN curl -fsSL https://github.com/sigstore/rekor/releases/download/v${REKOR_VERSION}/rekor-cli-linux-amd64 \
-o /usr/local/bin/rekor-cli \
&& chmod +x /usr/local/bin/rekor-cli \
&& rekor-cli version
# ===========================================================================
# SYFT (SBOM generation)
# ===========================================================================
RUN curl -fsSL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin
# ===========================================================================
# SETUP
# ===========================================================================
WORKDIR /src
# Create non-root user for safer execution (optional)
RUN useradd -m -s /bin/bash ciuser \
&& mkdir -p /home/ciuser/.dotnet/tools \
&& chown -R ciuser:ciuser /home/ciuser
# Health check script
RUN printf '%s\n' \
'#!/bin/bash' \
'set -e' \
'echo "=== CI Environment Health Check ==="' \
'echo "OS: $(cat /etc/os-release | grep PRETTY_NAME | cut -d= -f2)"' \
'echo ".NET: $(dotnet --version)"' \
'echo "Node: $(node --version)"' \
'echo "npm: $(npm --version)"' \
'echo "Helm: $(helm version --short)"' \
'echo "Cosign: $(cosign version 2>&1 | head -1)"' \
'echo "Rekor CLI: $(rekor-cli version 2>&1 | head -1)"' \
'echo "Docker: $(docker --version 2>/dev/null || echo Not available)"' \
'echo "PostgreSQL client: $(psql --version)"' \
'echo "=== All checks passed ==="' \
> /usr/local/bin/ci-health-check \
&& chmod +x /usr/local/bin/ci-health-check
ENTRYPOINT ["/bin/bash"]

View File

@@ -1,40 +0,0 @@
# syntax=docker/dockerfile:1.7
# Multi-stage Angular console image with non-root runtime (DOCKER-44-001)
ARG NODE_IMAGE=node:20-bullseye-slim
ARG NGINX_IMAGE=nginxinc/nginx-unprivileged:1.27-alpine
ARG APP_DIR=src/Web/StellaOps.Web
ARG DIST_DIR=dist
ARG APP_PORT=8080
FROM ${NODE_IMAGE} AS build
ENV npm_config_fund=false npm_config_audit=false SOURCE_DATE_EPOCH=1704067200
WORKDIR /app
COPY ${APP_DIR}/package*.json ./
RUN npm ci --prefer-offline --no-progress --cache .npm
COPY ${APP_DIR}/ ./
RUN npm run build -- --configuration=production --output-path=${DIST_DIR}
FROM ${NGINX_IMAGE} AS runtime
ARG APP_PORT
ENV APP_PORT=${APP_PORT}
USER 101
WORKDIR /
COPY --from=build /app/${DIST_DIR}/ /usr/share/nginx/html/
COPY ops/devops/docker/healthcheck-frontend.sh /usr/local/bin/healthcheck-frontend.sh
RUN rm -f /etc/nginx/conf.d/default.conf && \
cat > /etc/nginx/conf.d/default.conf <<CONF
server {
listen ${APP_PORT};
listen [::]:${APP_PORT};
server_name _;
root /usr/share/nginx/html;
location / {
try_files $$uri $$uri/ /index.html;
}
}
CONF
EXPOSE ${APP_PORT}
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
CMD /usr/local/bin/healthcheck-frontend.sh
CMD ["nginx","-g","daemon off;"]

View File

@@ -1,172 +0,0 @@
# syntax=docker/dockerfile:1.4
# StellaOps Regional Crypto Profile
# Selects regional cryptographic configuration at build time
# ============================================================================
# Build Arguments
# ============================================================================
ARG CRYPTO_PROFILE=international
ARG BASE_IMAGE=stellaops/platform:latest
ARG SERVICE_NAME=authority
# ============================================================================
# Regional Crypto Profile Layer
# ============================================================================
FROM ${BASE_IMAGE} AS regional-profile
# Copy regional cryptographic configuration
ARG CRYPTO_PROFILE
COPY etc/appsettings.crypto.${CRYPTO_PROFILE}.yaml /app/etc/appsettings.crypto.yaml
COPY etc/crypto-plugins-manifest.json /app/etc/crypto-plugins-manifest.json
# Set environment variable for runtime verification
ENV STELLAOPS_CRYPTO_PROFILE=${CRYPTO_PROFILE}
ENV STELLAOPS_CRYPTO_CONFIG_PATH=/app/etc/appsettings.crypto.yaml
ENV STELLAOPS_CRYPTO_MANIFEST_PATH=/app/etc/crypto-plugins-manifest.json
# Add labels for metadata
LABEL com.stellaops.crypto.profile="${CRYPTO_PROFILE}"
LABEL com.stellaops.crypto.config="/app/etc/appsettings.crypto.${CRYPTO_PROFILE}.yaml"
LABEL com.stellaops.crypto.runtime-selection="true"
# ============================================================================
# Service-Specific Regional Images
# ============================================================================
# Authority with Regional Crypto
FROM regional-profile AS authority
WORKDIR /app/authority
ENTRYPOINT ["dotnet", "StellaOps.Authority.WebService.dll"]
# Signer with Regional Crypto
FROM regional-profile AS signer
WORKDIR /app/signer
ENTRYPOINT ["dotnet", "StellaOps.Signer.WebService.dll"]
# Attestor with Regional Crypto
FROM regional-profile AS attestor
WORKDIR /app/attestor
ENTRYPOINT ["dotnet", "StellaOps.Attestor.WebService.dll"]
# Concelier with Regional Crypto
FROM regional-profile AS concelier
WORKDIR /app/concelier
ENTRYPOINT ["dotnet", "StellaOps.Concelier.WebService.dll"]
# Scanner with Regional Crypto
FROM regional-profile AS scanner
WORKDIR /app/scanner
ENTRYPOINT ["dotnet", "StellaOps.Scanner.WebService.dll"]
# Excititor with Regional Crypto
FROM regional-profile AS excititor
WORKDIR /app/excititor
ENTRYPOINT ["dotnet", "StellaOps.Excititor.WebService.dll"]
# Policy with Regional Crypto
FROM regional-profile AS policy
WORKDIR /app/policy
ENTRYPOINT ["dotnet", "StellaOps.Policy.WebService.dll"]
# Scheduler with Regional Crypto
FROM regional-profile AS scheduler
WORKDIR /app/scheduler
ENTRYPOINT ["dotnet", "StellaOps.Scheduler.WebService.dll"]
# Notify with Regional Crypto
FROM regional-profile AS notify
WORKDIR /app/notify
ENTRYPOINT ["dotnet", "StellaOps.Notify.WebService.dll"]
# Zastava with Regional Crypto
FROM regional-profile AS zastava
WORKDIR /app/zastava
ENTRYPOINT ["dotnet", "StellaOps.Zastava.WebService.dll"]
# Gateway with Regional Crypto
FROM regional-profile AS gateway
WORKDIR /app/gateway
ENTRYPOINT ["dotnet", "StellaOps.Gateway.WebService.dll"]
# AirGap Importer with Regional Crypto
FROM regional-profile AS airgap-importer
WORKDIR /app/airgap-importer
ENTRYPOINT ["dotnet", "StellaOps.AirGap.Importer.dll"]
# AirGap Exporter with Regional Crypto
FROM regional-profile AS airgap-exporter
WORKDIR /app/airgap-exporter
ENTRYPOINT ["dotnet", "StellaOps.AirGap.Exporter.dll"]
# CLI with Regional Crypto
FROM regional-profile AS cli
WORKDIR /app/cli
ENTRYPOINT ["dotnet", "StellaOps.Cli.dll"]
# ============================================================================
# Build Instructions
# ============================================================================
# Build international profile (default):
# docker build -f deploy/docker/Dockerfile.crypto-profile \
# --build-arg CRYPTO_PROFILE=international \
# --target authority \
# -t stellaops/authority:international .
#
# Build Russia (GOST) profile:
# docker build -f deploy/docker/Dockerfile.crypto-profile \
# --build-arg CRYPTO_PROFILE=russia \
# --target scanner \
# -t stellaops/scanner:russia .
#
# Build EU (eIDAS) profile:
# docker build -f deploy/docker/Dockerfile.crypto-profile \
# --build-arg CRYPTO_PROFILE=eu \
# --target signer \
# -t stellaops/signer:eu .
#
# Build China (SM) profile:
# docker build -f deploy/docker/Dockerfile.crypto-profile \
# --build-arg CRYPTO_PROFILE=china \
# --target attestor \
# -t stellaops/attestor:china .
#
# ============================================================================
# Regional Profile Descriptions
# ============================================================================
# international: Default NIST algorithms (ES256, RS256, SHA-256)
# Uses offline-verification plugin
# Jurisdiction: world
#
# russia: GOST R 34.10-2012, GOST R 34.11-2012
# Uses CryptoPro CSP plugin
# Jurisdiction: russia
# Requires: CryptoPro CSP SDK
#
# eu: eIDAS-compliant qualified trust services
# Uses eIDAS plugin with qualified certificates
# Jurisdiction: eu
# Requires: eIDAS trust service provider integration
#
# china: SM2, SM3, SM4 algorithms
# Uses SM crypto plugin
# Jurisdiction: china
# Requires: GmSSL or BouncyCastle SM extensions
#
# ============================================================================
# Runtime Configuration
# ============================================================================
# The crypto provider is selected at runtime based on:
# 1. STELLAOPS_CRYPTO_PROFILE environment variable
# 2. /app/etc/appsettings.crypto.yaml configuration file
# 3. /app/etc/crypto-plugins-manifest.json plugin metadata
#
# Plugin loading sequence:
# 1. Application starts
# 2. CryptoPluginLoader reads /app/etc/appsettings.crypto.yaml
# 3. Loads enabled plugins from manifest
# 4. Validates platform compatibility
# 5. Validates jurisdiction compliance
# 6. Registers providers with DI container
# 7. Application uses ICryptoProvider abstraction
#
# No cryptographic code is executed until runtime plugin selection completes.

View File

@@ -1,56 +0,0 @@
# syntax=docker/dockerfile:1.7
# Hardened multi-stage template for StellaOps services
# Parameters are build-time ARGs so this file can be re-used across services.
ARG SDK_IMAGE=mcr.microsoft.com/dotnet/sdk:10.0-bookworm-slim
ARG RUNTIME_IMAGE=mcr.microsoft.com/dotnet/aspnet:10.0-bookworm-slim
ARG APP_PROJECT=src/Service/Service.csproj
ARG CONFIGURATION=Release
ARG PUBLISH_DIR=/app/publish
ARG APP_BINARY=StellaOps.Service
ARG APP_USER=stella
ARG APP_UID=10001
ARG APP_GID=10001
ARG APP_PORT=8080
FROM ${SDK_IMAGE} AS build
ENV DOTNET_CLI_TELEMETRY_OPTOUT=1 \
DOTNET_NOLOGO=1 \
SOURCE_DATE_EPOCH=1704067200
WORKDIR /src
# Expect restore sources to be available offline via /.nuget/
COPY . .
RUN dotnet restore ${APP_PROJECT} --packages /.nuget/packages && \
dotnet publish ${APP_PROJECT} -c ${CONFIGURATION} -o ${PUBLISH_DIR} \
/p:UseAppHost=true /p:PublishTrimmed=false
FROM ${RUNTIME_IMAGE} AS runtime
# Create non-root user/group with stable ids for auditability
RUN groupadd -r -g ${APP_GID} ${APP_USER} && \
useradd -r -u ${APP_UID} -g ${APP_GID} -d /var/lib/${APP_USER} ${APP_USER} && \
mkdir -p /app /var/lib/${APP_USER} /var/run/${APP_USER} /tmp && \
chown -R ${APP_UID}:${APP_GID} /app /var/lib/${APP_USER} /var/run/${APP_USER} /tmp
WORKDIR /app
COPY --from=build --chown=${APP_UID}:${APP_GID} ${PUBLISH_DIR}/ ./
# Ship healthcheck helper; callers may override with their own script
COPY --chown=${APP_UID}:${APP_GID} ops/devops/docker/healthcheck.sh /usr/local/bin/healthcheck.sh
ENV ASPNETCORE_URLS=http://+:${APP_PORT} \
DOTNET_EnableDiagnostics=0 \
DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=1 \
COMPlus_EnableDiagnostics=0 \
APP_BINARY=${APP_BINARY}
USER ${APP_UID}:${APP_GID}
EXPOSE ${APP_PORT}
HEALTHCHECK --interval=30s --timeout=5s --start-period=15s --retries=3 \
CMD /usr/local/bin/healthcheck.sh
# Harden filesystem; deploys should also set readOnlyRootFilesystem true
RUN chmod 500 /app && \
find /app -maxdepth 1 -type f -exec chmod 400 {} \; && \
find /app -maxdepth 1 -type d -exec chmod 500 {} \;
# Use shell form so APP_BINARY env can be expanded without duplicating the template per service
ENTRYPOINT ["sh","-c","exec ./\"$APP_BINARY\""]

View File

@@ -1,212 +0,0 @@
# syntax=docker/dockerfile:1.4
# StellaOps Platform Image - Build Once, Deploy Everywhere
# Builds ALL crypto plugins unconditionally for runtime selection
# ============================================================================
# Stage 1: SDK Build - Build ALL Projects and Crypto Plugins
# ============================================================================
FROM mcr.microsoft.com/dotnet/sdk:10.0-preview AS build
WORKDIR /src
# Copy solution and project files for dependency restore
COPY Directory.Build.props Directory.Build.targets nuget.config ./
COPY src/StellaOps.sln ./src/
# Copy all crypto plugin projects
COPY src/__Libraries/StellaOps.Cryptography/ ./src/__Libraries/StellaOps.Cryptography/
COPY src/__Libraries/StellaOps.Cryptography.DependencyInjection/ ./src/__Libraries/StellaOps.Cryptography.DependencyInjection/
COPY src/__Libraries/StellaOps.Cryptography.PluginLoader/ ./src/__Libraries/StellaOps.Cryptography.PluginLoader/
# Crypto plugins - ALL built unconditionally
COPY src/__Libraries/StellaOps.Cryptography.Plugin.OfflineVerification/ ./src/__Libraries/StellaOps.Cryptography.Plugin.OfflineVerification/
# Note: Additional crypto plugins can be added here when available:
# COPY src/__Libraries/StellaOps.Cryptography.Plugin.eIDAS/ ./src/__Libraries/StellaOps.Cryptography.Plugin.eIDAS/
# COPY src/__Libraries/StellaOps.Cryptography.Plugin.CryptoPro/ ./src/__Libraries/StellaOps.Cryptography.Plugin.CryptoPro/
# COPY src/__Libraries/StellaOps.Cryptography.Plugin.SM/ ./src/__Libraries/StellaOps.Cryptography.Plugin.SM/
# Copy all module projects
COPY src/Authority/ ./src/Authority/
COPY src/Signer/ ./src/Signer/
COPY src/Attestor/ ./src/Attestor/
COPY src/Concelier/ ./src/Concelier/
COPY src/Scanner/ ./src/Scanner/
COPY src/AirGap/ ./src/AirGap/
COPY src/Excititor/ ./src/Excititor/
COPY src/Policy/ ./src/Policy/
COPY src/Scheduler/ ./src/Scheduler/
COPY src/Notify/ ./src/Notify/
COPY src/Zastava/ ./src/Zastava/
COPY src/Gateway/ ./src/Gateway/
COPY src/Cli/ ./src/Cli/
# Copy shared libraries
COPY src/__Libraries/ ./src/__Libraries/
# Restore dependencies
RUN dotnet restore src/StellaOps.sln
# Build entire solution (Release configuration)
RUN dotnet build src/StellaOps.sln --configuration Release --no-restore
# Publish all web services and libraries
# This creates /app/publish with all assemblies including crypto plugins
RUN dotnet publish src/Authority/StellaOps.Authority.WebService/StellaOps.Authority.WebService.csproj \
--configuration Release --no-build --output /app/publish/authority
RUN dotnet publish src/Signer/StellaOps.Signer.WebService/StellaOps.Signer.WebService.csproj \
--configuration Release --no-build --output /app/publish/signer
RUN dotnet publish src/Attestor/StellaOps.Attestor.WebService/StellaOps.Attestor.WebService.csproj \
--configuration Release --no-build --output /app/publish/attestor
RUN dotnet publish src/Concelier/StellaOps.Concelier.WebService/StellaOps.Concelier.WebService.csproj \
--configuration Release --no-build --output /app/publish/concelier
RUN dotnet publish src/Scanner/StellaOps.Scanner.WebService/StellaOps.Scanner.WebService.csproj \
--configuration Release --no-build --output /app/publish/scanner
RUN dotnet publish src/Excititor/StellaOps.Excititor.WebService/StellaOps.Excititor.WebService.csproj \
--configuration Release --no-build --output /app/publish/excititor
RUN dotnet publish src/Policy/StellaOps.Policy.WebService/StellaOps.Policy.WebService.csproj \
--configuration Release --no-build --output /app/publish/policy
RUN dotnet publish src/Scheduler/StellaOps.Scheduler.WebService/StellaOps.Scheduler.WebService.csproj \
--configuration Release --no-build --output /app/publish/scheduler
RUN dotnet publish src/Notify/StellaOps.Notify.WebService/StellaOps.Notify.WebService.csproj \
--configuration Release --no-build --output /app/publish/notify
RUN dotnet publish src/Zastava/StellaOps.Zastava.WebService/StellaOps.Zastava.WebService.csproj \
--configuration Release --no-build --output /app/publish/zastava
RUN dotnet publish src/Gateway/StellaOps.Gateway.WebService/StellaOps.Gateway.WebService.csproj \
--configuration Release --no-build --output /app/publish/gateway
RUN dotnet publish src/AirGap/StellaOps.AirGap.Importer/StellaOps.AirGap.Importer.csproj \
--configuration Release --no-build --output /app/publish/airgap-importer
RUN dotnet publish src/AirGap/StellaOps.AirGap.Exporter/StellaOps.AirGap.Exporter.csproj \
--configuration Release --no-build --output /app/publish/airgap-exporter
RUN dotnet publish src/Cli/StellaOps.Cli/StellaOps.Cli.csproj \
--configuration Release --no-build --output /app/publish/cli
# Copy crypto plugin manifest
COPY etc/crypto-plugins-manifest.json /app/publish/etc/
# ============================================================================
# Stage 2: Runtime Base - Contains ALL Crypto Plugins
# ============================================================================
FROM mcr.microsoft.com/dotnet/aspnet:10.0-preview AS runtime-base
WORKDIR /app
# Install dependencies for crypto providers
# PostgreSQL client for Authority/Concelier/etc
RUN apt-get update && apt-get install -y \
postgresql-client \
&& rm -rf /var/lib/apt/lists/*
# Copy all published assemblies (includes all crypto plugins)
COPY --from=build /app/publish /app/
# Expose common ports (these can be overridden by docker-compose)
EXPOSE 8080 8443
# Labels
LABEL com.stellaops.image.type="platform"
LABEL com.stellaops.image.variant="all-plugins"
LABEL com.stellaops.crypto.plugins="offline-verification"
# Additional plugins will be added as they become available:
# LABEL com.stellaops.crypto.plugins="offline-verification,eidas,cryptopro,sm"
# Health check placeholder (can be overridden per service)
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD curl -f http://localhost:8080/health || exit 1
# ============================================================================
# Service-Specific Final Stages
# ============================================================================
# Authority Service
FROM runtime-base AS authority
WORKDIR /app/authority
ENTRYPOINT ["dotnet", "StellaOps.Authority.WebService.dll"]
# Signer Service
FROM runtime-base AS signer
WORKDIR /app/signer
ENTRYPOINT ["dotnet", "StellaOps.Signer.WebService.dll"]
# Attestor Service
FROM runtime-base AS attestor
WORKDIR /app/attestor
ENTRYPOINT ["dotnet", "StellaOps.Attestor.WebService.dll"]
# Concelier Service
FROM runtime-base AS concelier
WORKDIR /app/concelier
ENTRYPOINT ["dotnet", "StellaOps.Concelier.WebService.dll"]
# Scanner Service
FROM runtime-base AS scanner
WORKDIR /app/scanner
ENTRYPOINT ["dotnet", "StellaOps.Scanner.WebService.dll"]
# Excititor Service
FROM runtime-base AS excititor
WORKDIR /app/excititor
ENTRYPOINT ["dotnet", "StellaOps.Excititor.WebService.dll"]
# Policy Service
FROM runtime-base AS policy
WORKDIR /app/policy
ENTRYPOINT ["dotnet", "StellaOps.Policy.WebService.dll"]
# Scheduler Service
FROM runtime-base AS scheduler
WORKDIR /app/scheduler
ENTRYPOINT ["dotnet", "StellaOps.Scheduler.WebService.dll"]
# Notify Service
FROM runtime-base AS notify
WORKDIR /app/notify
ENTRYPOINT ["dotnet", "StellaOps.Notify.WebService.dll"]
# Zastava Service
FROM runtime-base AS zastava
WORKDIR /app/zastava
ENTRYPOINT ["dotnet", "StellaOps.Zastava.WebService.dll"]
# Gateway Service
FROM runtime-base AS gateway
WORKDIR /app/gateway
ENTRYPOINT ["dotnet", "StellaOps.Gateway.WebService.dll"]
# AirGap Importer (CLI tool)
FROM runtime-base AS airgap-importer
WORKDIR /app/airgap-importer
ENTRYPOINT ["dotnet", "StellaOps.AirGap.Importer.dll"]
# AirGap Exporter (CLI tool)
FROM runtime-base AS airgap-exporter
WORKDIR /app/airgap-exporter
ENTRYPOINT ["dotnet", "StellaOps.AirGap.Exporter.dll"]
# CLI Tool
FROM runtime-base AS cli
WORKDIR /app/cli
ENTRYPOINT ["dotnet", "StellaOps.Cli.dll"]
# ============================================================================
# Build Instructions
# ============================================================================
# Build platform image:
# docker build -f deploy/docker/Dockerfile.platform --target runtime-base -t stellaops/platform:latest .
#
# Build specific service:
# docker build -f deploy/docker/Dockerfile.platform --target authority -t stellaops/authority:latest .
# docker build -f deploy/docker/Dockerfile.platform --target scanner -t stellaops/scanner:latest .
#
# The platform image contains ALL crypto plugins.
# Regional selection happens at runtime via configuration (see Dockerfile.crypto-profile).

View File

@@ -1,50 +0,0 @@
#!/usr/bin/env bash
# Build hardened images for the core services using the shared template/matrix (DOCKER-44-001)
set -euo pipefail
ROOT=${ROOT:-"$(git rev-parse --show-toplevel)"}
MATRIX=${MATRIX:-"${ROOT}/ops/devops/docker/services-matrix.env"}
REGISTRY=${REGISTRY:-"stellaops"}
TAG_SUFFIX=${TAG_SUFFIX:-"dev"}
SDK_IMAGE=${SDK_IMAGE:-"mcr.microsoft.com/dotnet/sdk:10.0-bookworm-slim"}
RUNTIME_IMAGE=${RUNTIME_IMAGE:-"mcr.microsoft.com/dotnet/aspnet:10.0-bookworm-slim"}
if [[ ! -f "${MATRIX}" ]]; then
echo "matrix file not found: ${MATRIX}" >&2
exit 1
fi
echo "Building services from ${MATRIX} -> ${REGISTRY}/<service>:${TAG_SUFFIX}" >&2
while IFS='|' read -r service dockerfile project binary port; do
[[ -z "${service}" || "${service}" =~ ^# ]] && continue
image="${REGISTRY}/${service}:${TAG_SUFFIX}"
df_path="${ROOT}/${dockerfile}"
if [[ ! -f "${df_path}" ]]; then
echo "skipping ${service}: dockerfile missing (${df_path})" >&2
continue
fi
if [[ "${dockerfile}" == *"Dockerfile.console"* ]]; then
# Angular console build uses its dedicated Dockerfile
echo "[console] ${service} -> ${image}" >&2
docker build \
-f "${df_path}" "${ROOT}" \
--build-arg APP_DIR="${project}" \
--build-arg APP_PORT="${port}" \
-t "${image}"
else
echo "[service] ${service} -> ${image}" >&2
docker build \
-f "${df_path}" "${ROOT}" \
--build-arg SDK_IMAGE="${SDK_IMAGE}" \
--build-arg RUNTIME_IMAGE="${RUNTIME_IMAGE}" \
--build-arg APP_PROJECT="${project}" \
--build-arg APP_BINARY="${binary}" \
--build-arg APP_PORT="${port}" \
-t "${image}"
fi
done < "${MATRIX}"
echo "Build complete. Remember to enforce readOnlyRootFilesystem at deploy time and run sbom_attest.sh (DOCKER-44-002)." >&2

View File

@@ -1,10 +0,0 @@
#!/bin/sh
set -eu
HOST="${HEALTH_HOST:-127.0.0.1}"
PORT="${HEALTH_PORT:-8080}"
PATH_CHECK="${HEALTH_PATH:-/}"
USER_AGENT="stellaops-frontend-healthcheck"
wget -qO- "http://${HOST}:${PORT}${PATH_CHECK}" \
--header="User-Agent: ${USER_AGENT}" \
--timeout="${HEALTH_TIMEOUT:-4}" >/dev/null

View File

@@ -1,24 +0,0 @@
#!/bin/sh
set -eu
HOST="${HEALTH_HOST:-127.0.0.1}"
PORT="${HEALTH_PORT:-8080}"
LIVENESS_PATH="${LIVENESS_PATH:-/health/liveness}"
READINESS_PATH="${READINESS_PATH:-/health/readiness}"
USER_AGENT="stellaops-healthcheck"
fetch() {
target_path="$1"
# BusyBox wget is available in Alpine; curl not assumed.
wget -qO- "http://${HOST}:${PORT}${target_path}" \
--header="User-Agent: ${USER_AGENT}" \
--timeout="${HEALTH_TIMEOUT:-4}" >/dev/null
}
fail=0
if ! fetch "$LIVENESS_PATH"; then
fail=1
fi
if ! fetch "$READINESS_PATH"; then
fail=1
fi
exit "$fail"

View File

@@ -1,48 +0,0 @@
#!/usr/bin/env bash
# Deterministic SBOM + attestation helper for DOCKER-44-002
# Usage: ./sbom_attest.sh <image-ref> [output-dir] [cosign-key]
# - image-ref: fully qualified image (e.g., ghcr.io/stellaops/policy:1.2.3)
# - output-dir: defaults to ./sbom
# - cosign-key: path to cosign key (PEM). If omitted, uses keyless if allowed (COSIGN_EXPERIMENTAL=1)
set -euo pipefail
IMAGE_REF=${1:?"image ref required"}
OUT_DIR=${2:-sbom}
COSIGN_KEY=${3:-}
mkdir -p "${OUT_DIR}"
# Normalize filename (replace / and : with _)
name_safe() {
echo "$1" | tr '/:' '__'
}
BASENAME=$(name_safe "${IMAGE_REF}")
SPDX_JSON="${OUT_DIR}/${BASENAME}.spdx.json"
CDX_JSON="${OUT_DIR}/${BASENAME}.cdx.json"
ATTESTATION="${OUT_DIR}/${BASENAME}.sbom.att"
# Freeze timestamps for reproducibility
export SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH:-1704067200}
# Generate SPDX 3.0-ish JSON (syft formats are stable and offline-friendly)
syft "${IMAGE_REF}" -o spdx-json > "${SPDX_JSON}"
# Generate CycloneDX 1.6 JSON
syft "${IMAGE_REF}" -o cyclonedx-json > "${CDX_JSON}"
# Attach SBOMs as cosign attestations (one per format)
export COSIGN_EXPERIMENTAL=${COSIGN_EXPERIMENTAL:-1}
COSIGN_ARGS=("attest" "--predicate" "${SPDX_JSON}" "--type" "spdx" "${IMAGE_REF}")
if [[ -n "${COSIGN_KEY}" ]]; then
COSIGN_ARGS+=("--key" "${COSIGN_KEY}")
fi
cosign "${COSIGN_ARGS[@]}"
COSIGN_ARGS=("attest" "--predicate" "${CDX_JSON}" "--type" "cyclonedx" "${IMAGE_REF}")
if [[ -n "${COSIGN_KEY}" ]]; then
COSIGN_ARGS+=("--key" "${COSIGN_KEY}")
fi
cosign "${COSIGN_ARGS[@]}"
echo "SBOMs written to ${SPDX_JSON} and ${CDX_JSON}" >&2
echo "Attestations pushed for ${IMAGE_REF}" >&2

View File

@@ -1,12 +0,0 @@
# service|dockerfile|project|binary|port
# Paths are relative to repo root; dockerfile is usually the shared hardened template.
api|ops/devops/docker/Dockerfile.hardened.template|src/VulnExplorer/StellaOps.VulnExplorer.Api/StellaOps.VulnExplorer.Api.csproj|StellaOps.VulnExplorer.Api|8080
orchestrator|ops/devops/docker/Dockerfile.hardened.template|src/Orchestrator/StellaOps.Orchestrator.WebService/StellaOps.Orchestrator.WebService.csproj|StellaOps.Orchestrator.WebService|8080
task-runner|ops/devops/docker/Dockerfile.hardened.template|src/Orchestrator/StellaOps.Orchestrator.Worker/StellaOps.Orchestrator.Worker.csproj|StellaOps.Orchestrator.Worker|8081
concelier|ops/devops/docker/Dockerfile.hardened.template|src/Concelier/StellaOps.Concelier.WebService/StellaOps.Concelier.WebService.csproj|StellaOps.Concelier.WebService|8080
excititor|ops/devops/docker/Dockerfile.hardened.template|src/Excititor/StellaOps.Excititor.WebService/StellaOps.Excititor.WebService.csproj|StellaOps.Excititor.WebService|8080
policy|ops/devops/docker/Dockerfile.hardened.template|src/Policy/StellaOps.Policy.Gateway/StellaOps.Policy.Gateway.csproj|StellaOps.Policy.Gateway|8084
notify|ops/devops/docker/Dockerfile.hardened.template|src/Notify/StellaOps.Notify.WebService/StellaOps.Notify.WebService.csproj|StellaOps.Notify.WebService|8080
export|ops/devops/docker/Dockerfile.hardened.template|src/ExportCenter/StellaOps.ExportCenter.WebService/StellaOps.ExportCenter.WebService.csproj|StellaOps.ExportCenter.WebService|8080
advisoryai|ops/devops/docker/Dockerfile.hardened.template|src/AdvisoryAI/StellaOps.AdvisoryAI.WebService/StellaOps.AdvisoryAI.WebService.csproj|StellaOps.AdvisoryAI.WebService|8080
console|ops/devops/docker/Dockerfile.console|src/Web/StellaOps.Web|StellaOps.Web|8080

View File

@@ -1,70 +0,0 @@
#!/usr/bin/env bash
# Smoke-check /health and capability endpoints for a built image (DOCKER-44-003)
# Usage: ./verify_health_endpoints.sh <image-ref> [port]
# Requires: docker, curl or wget
set -euo pipefail
IMAGE=${1:?"image ref required"}
PORT=${2:-8080}
CONTAINER_NAME="healthcheck-$$"
TIMEOUT=30
SLEEP=1
have_curl=1
if ! command -v curl >/dev/null 2>&1; then
have_curl=0
fi
req() {
local path=$1
local url="http://127.0.0.1:${PORT}${path}"
if [[ $have_curl -eq 1 ]]; then
curl -fsS --max-time 3 "$url" >/dev/null
else
wget -qO- --timeout=3 "$url" >/dev/null
fi
}
cleanup() {
docker rm -f "$CONTAINER_NAME" >/dev/null 2>&1 || true
}
trap cleanup EXIT
echo "[info] starting container ${IMAGE} on port ${PORT}" >&2
cleanup
if ! docker run -d --rm --name "$CONTAINER_NAME" -p "${PORT}:${PORT}" "$IMAGE" >/dev/null; then
echo "[error] failed to start image ${IMAGE}" >&2
exit 1
fi
# wait for readiness
start=$(date +%s)
while true; do
if req /health/liveness 2>/dev/null; then break; fi
now=$(date +%s)
if (( now - start > TIMEOUT )); then
echo "[error] liveness endpoint did not come up in ${TIMEOUT}s" >&2
exit 1
fi
sleep $SLEEP
done
# verify endpoints
fail=0
for path in /health/liveness /health/readiness /version /metrics; do
if ! req "$path"; then
echo "[error] missing or failing ${path}" >&2
fail=1
fi
done
# capability endpoint optional; if present ensure merge=false for Concelier/Excititor
if req /capabilities 2>/dev/null; then
body="$(curl -fsS "http://127.0.0.1:${PORT}/capabilities" 2>/dev/null || true)"
if echo "$body" | grep -q '"merge"[[:space:]]*:[[:space:]]*false'; then
:
else
echo "[warn] /capabilities present but merge flag not false" >&2
fi
fi
exit $fail

View File

@@ -1,6 +0,0 @@
apiVersion: v2
name: stellaops
description: Stella Ops core stack (authority, signing, scanner, UI) with infrastructure primitives.
type: application
version: 0.1.0
appVersion: "2025.10.0"

View File

@@ -1,64 +0,0 @@
# StellaOps Helm Install Guide
This guide ships with the `stellaops` chart and provides deterministic install steps for **prod** and **airgap** profiles. All images are pinned by digest from `deploy/releases/<channel>.yaml`.
## Prerequisites
- Helm ≥ 3.14 and kubectl configured for the target cluster.
- Pull secrets for `registry.stella-ops.org` (or your mirrored registry in air-gapped mode).
- TLS/ingress secrets created if you enable ingress in the values files.
## Channels and values
- Prod/stable: `deploy/releases/2025.09-stable.yaml` + `values-prod.yaml`
- Airgap: `deploy/releases/2025.09-airgap.yaml` + `values-airgap.yaml`
- Mirror (optional): `values-mirror.yaml` overlays registry endpoints when using a private mirror.
## Quick install (prod)
```bash
export RELEASE_CHANNEL=2025.09-stable
export NAMESPACE=stellaops
helm upgrade --install stellaops ./deploy/helm/stellaops \
--namespace "$NAMESPACE" --create-namespace \
-f deploy/helm/stellaops/values-prod.yaml \
--set global.release.channel=stable \
--set global.release.version="2025.09.2" \
--set global.release.manifestSha256="dc3c8fe1ab83941c838ccc5a8a5862f7ddfa38c2078e580b5649db26554565b7"
```
## Quick install (airgap)
Assumes images are already loaded into your private registry and `values-airgap.yaml` points to that registry.
```bash
export NAMESPACE=stellaops
helm upgrade --install stellaops ./deploy/helm/stellaops \
--namespace "$NAMESPACE" --create-namespace \
-f deploy/helm/stellaops/values-airgap.yaml \
--set global.release.channel=airgap \
--set global.release.version="2025.09.0-airgap" \
--set global.release.manifestSha256="d422ae3ea01d5f27ea8b5fdc5b19667cb4e3e2c153a35cb761cb53a6ce4f6ba4"
```
## Mirror overlay
If using a mirrored registry, layer the mirror values:
```bash
helm upgrade --install stellaops ./deploy/helm/stellaops \
--namespace "$NAMESPACE" --create-namespace \
-f deploy/helm/stellaops/values-prod.yaml \
-f deploy/helm/stellaops/values-mirror.yaml \
--set global.release.version="2025.09.2" \
--set global.release.manifestSha256="dc3c8fe1ab83941c838ccc5a8a5862f7ddfa38c2078e580b5649db26554565b7"
```
## Validate chart and digests
```bash
deploy/tools/check-channel-alignment.py --manifest deploy/releases/$RELEASE_CHANNEL.yaml \
--values deploy/helm/stellaops/values-prod.yaml
helm lint ./deploy/helm/stellaops
helm template stellaops ./deploy/helm/stellaops -f deploy/helm/stellaops/values-prod.yaml >/tmp/stellaops.yaml
```
## Notes
- Surface.Env and Surface.Secrets defaults are defined in `values*.yaml`; adjust endpoints, cache roots, and providers before promotion.
- Keep `global.release.*` in sync with the chosen release manifest; never deploy with empty version/channel/manifestSha256.
- For offline clusters, run image preload and secret creation before `helm upgrade` to avoid pull failures.

View File

@@ -1,16 +0,0 @@
# Mock Overlay (Dev Only)
Purpose: let deployment tasks progress with placeholder digests until real releases land.
Use:
```bash
helm template mock ./deploy/helm/stellaops -f deploy/helm/stellaops/values-mock.yaml
```
Contents:
- Mock deployments for orchestrator, policy-registry, packs-registry, task-runner, VEX Lens, issuer-directory, findings-ledger, vuln-explorer-api.
- Image pins pulled from `deploy/releases/2025.09-mock-dev.yaml`.
Notes:
- Annotated with `stellaops.dev/mock: "true"` to discourage production use.
- Swap to real values once official digests publish; keep mock overlay gated behind `mock.enabled`.

View File

@@ -1,64 +0,0 @@
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
tls:
cert_file: ${STELLAOPS_OTEL_TLS_CERT:?STELLAOPS_OTEL_TLS_CERT not set}
key_file: ${STELLAOPS_OTEL_TLS_KEY:?STELLAOPS_OTEL_TLS_KEY not set}
client_ca_file: ${STELLAOPS_OTEL_TLS_CA:?STELLAOPS_OTEL_TLS_CA not set}
require_client_certificate: ${STELLAOPS_OTEL_REQUIRE_CLIENT_CERT:true}
http:
endpoint: 0.0.0.0:4318
tls:
cert_file: ${STELLAOPS_OTEL_TLS_CERT:?STELLAOPS_OTEL_TLS_CERT not set}
key_file: ${STELLAOPS_OTEL_TLS_KEY:?STELLAOPS_OTEL_TLS_KEY not set}
client_ca_file: ${STELLAOPS_OTEL_TLS_CA:?STELLAOPS_OTEL_TLS_CA not set}
require_client_certificate: ${STELLAOPS_OTEL_REQUIRE_CLIENT_CERT:true}
processors:
attributes/tenant-tag:
actions:
- key: tenant.id
action: insert
value: ${STELLAOPS_TENANT_ID:unknown}
batch:
send_batch_size: 1024
timeout: 5s
exporters:
logging:
verbosity: normal
prometheus:
endpoint: ${STELLAOPS_OTEL_PROMETHEUS_ENDPOINT:0.0.0.0:9464}
enable_open_metrics: true
metric_expiration: 5m
tls:
cert_file: ${STELLAOPS_OTEL_TLS_CERT:?STELLAOPS_OTEL_TLS_CERT not set}
key_file: ${STELLAOPS_OTEL_TLS_KEY:?STELLAOPS_OTEL_TLS_KEY not set}
client_ca_file: ${STELLAOPS_OTEL_TLS_CA:?STELLAOPS_OTEL_TLS_CA not set}
extensions:
health_check:
endpoint: ${STELLAOPS_OTEL_HEALTH_ENDPOINT:0.0.0.0:13133}
pprof:
endpoint: ${STELLAOPS_OTEL_PPROF_ENDPOINT:0.0.0.0:1777}
service:
telemetry:
logs:
level: ${STELLAOPS_OTEL_LOG_LEVEL:info}
extensions: [health_check, pprof]
pipelines:
traces:
receivers: [otlp]
processors: [attributes/tenant-tag, batch]
exporters: [logging]
metrics:
receivers: [otlp]
processors: [attributes/tenant-tag, batch]
exporters: [logging, prometheus]
logs:
receivers: [otlp]
processors: [attributes/tenant-tag, batch]
exporters: [logging]

View File

@@ -1,43 +0,0 @@
{{- define "stellaops.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "stellaops.telemetryCollector.config" -}}
{{- if .Values.telemetry.collector.config }}
{{ tpl .Values.telemetry.collector.config . }}
{{- else }}
{{ tpl (.Files.Get "files/otel-collector-config.yaml") . }}
{{- end }}
{{- end -}}
{{- define "stellaops.telemetryCollector.fullname" -}}
{{- printf "%s-otel-collector" (include "stellaops.name" .) | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "stellaops.fullname" -}}
{{- $name := default .root.Chart.Name .root.Values.fullnameOverride -}}
{{- printf "%s-%s" $name .name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "stellaops.selectorLabels" -}}
app.kubernetes.io/name: {{ include "stellaops.name" .root | quote }}
app.kubernetes.io/instance: {{ .root.Release.Name | quote }}
app.kubernetes.io/component: {{ .name | quote }}
{{- if .svc.class }}
app.kubernetes.io/part-of: {{ printf "stellaops-%s" .svc.class | quote }}
{{- else }}
app.kubernetes.io/part-of: "stellaops-core"
{{- end }}
{{- end -}}
{{- define "stellaops.labels" -}}
{{ include "stellaops.selectorLabels" . }}
helm.sh/chart: {{ printf "%s-%s" .root.Chart.Name .root.Chart.Version | quote }}
app.kubernetes.io/version: {{ .root.Values.global.release.version | quote }}
app.kubernetes.io/managed-by: {{ .root.Release.Service | quote }}
stellaops.release/channel: {{ .root.Values.global.release.channel | quote }}
stellaops.profile: {{ .root.Values.global.profile | quote }}
{{- range $k, $v := .root.Values.global.labels }}
{{ $k }}: {{ $v | quote }}
{{- end }}
{{- end -}}

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "stellaops.fullname" (dict "root" . "name" "release") }}
labels:
{{- include "stellaops.labels" (dict "root" . "name" "release" "svc" (dict "class" "meta")) | nindent 4 }}
data:
version: {{ .Values.global.release.version | quote }}
channel: {{ .Values.global.release.channel | quote }}
manifestSha256: {{ default "" .Values.global.release.manifestSha256 | quote }}

View File

@@ -1,15 +0,0 @@
{{- $root := . -}}
{{- range $name, $cfg := .Values.configMaps }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "stellaops.fullname" (dict "root" $root "name" $name) }}
labels:
{{- include "stellaops.labels" (dict "root" $root "name" $name "svc" (dict "class" "config")) | nindent 4 }}
data:
{{- range $fileName, $content := $cfg.data }}
{{ $fileName }}: |
{{ tpl $content $root | nindent 4 }}
{{- end }}
---
{{- end }}

View File

@@ -1,108 +0,0 @@
{{- if .Values.console.enabled }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "stellaops.fullname" . }}-console
labels:
app.kubernetes.io/component: console
{{- include "stellaops.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.console.replicas | default 1 }}
selector:
matchLabels:
app.kubernetes.io/component: console
{{- include "stellaops.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
app.kubernetes.io/component: console
{{- include "stellaops.selectorLabels" . | nindent 8 }}
spec:
securityContext:
{{- toYaml .Values.console.securityContext | nindent 8 }}
containers:
- name: console
image: {{ .Values.console.image }}
imagePullPolicy: {{ .Values.global.image.pullPolicy | default "IfNotPresent" }}
ports:
- name: http
containerPort: {{ .Values.console.port | default 8080 }}
protocol: TCP
securityContext:
{{- toYaml .Values.console.containerSecurityContext | nindent 12 }}
livenessProbe:
{{- toYaml .Values.console.livenessProbe | nindent 12 }}
readinessProbe:
{{- toYaml .Values.console.readinessProbe | nindent 12 }}
resources:
{{- toYaml .Values.console.resources | nindent 12 }}
volumeMounts:
{{- toYaml .Values.console.volumeMounts | nindent 12 }}
env:
- name: APP_PORT
value: "{{ .Values.console.port | default 8080 }}"
volumes:
{{- toYaml .Values.console.volumes | nindent 8 }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "stellaops.fullname" . }}-console
labels:
app.kubernetes.io/component: console
{{- include "stellaops.labels" . | nindent 4 }}
spec:
type: {{ .Values.console.service.type | default "ClusterIP" }}
ports:
- port: {{ .Values.console.service.port | default 80 }}
targetPort: {{ .Values.console.service.targetPort | default 8080 }}
protocol: TCP
name: http
selector:
app.kubernetes.io/component: console
{{- include "stellaops.selectorLabels" . | nindent 4 }}
{{- if .Values.console.ingress.enabled }}
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "stellaops.fullname" . }}-console
labels:
app.kubernetes.io/component: console
{{- include "stellaops.labels" . | nindent 4 }}
{{- with .Values.console.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.console.ingress.className }}
ingressClassName: {{ .Values.console.ingress.className }}
{{- end }}
{{- if .Values.console.ingress.tls }}
tls:
{{- range .Values.console.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.console.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType | default "Prefix" }}
backend:
service:
name: {{ include "stellaops.fullname" $ }}-console
port:
name: http
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,225 +0,0 @@
{{- $root := . -}}
{{- $configMaps := default (dict) .Values.configMaps -}}
{{- $hasPolicyActivationConfig := hasKey $configMaps "policy-engine-activation" -}}
{{- $policyActivationConfigName := "" -}}
{{- if $hasPolicyActivationConfig -}}
{{- $policyActivationConfigName = include "stellaops.fullname" (dict "root" $root "name" "policy-engine-activation") -}}
{{- end -}}
{{- $policyActivationTargets := dict "policy-engine" true "policy-gateway" true -}}
{{- range $name, $svc := .Values.services }}
{{- $configMounts := (default (list) $svc.configMounts) }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "stellaops.fullname" (dict "root" $root "name" $name) }}
labels:
{{- include "stellaops.labels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }}
spec:
replicas: {{ default 1 $svc.replicas }}
selector:
matchLabels:
{{- include "stellaops.selectorLabels" (dict "root" $root "name" $name "svc" $svc) | nindent 6 }}
template:
metadata:
labels:
{{- include "stellaops.selectorLabels" (dict "root" $root "name" $name "svc" $svc) | nindent 8 }}
{{- if $svc.podAnnotations }}
annotations:
{{ toYaml $svc.podAnnotations | nindent 8 }}
{{- end }}
annotations:
stellaops.release/version: {{ $root.Values.global.release.version | quote }}
stellaops.release/channel: {{ $root.Values.global.release.channel | quote }}
spec:
{{- if $svc.podSecurityContext }}
securityContext:
{{ toYaml $svc.podSecurityContext | nindent 6 }}
{{- end }}
containers:
- name: {{ $name }}
image: {{ $svc.image | quote }}
imagePullPolicy: {{ default $root.Values.global.image.pullPolicy $svc.imagePullPolicy }}
{{- if $svc.securityContext }}
securityContext:
{{ toYaml $svc.securityContext | nindent 12 }}
{{- end }}
{{- if $svc.command }}
command:
{{- range $cmd := $svc.command }}
- {{ $cmd | quote }}
{{- end }}
{{- end }}
{{- if $svc.args }}
args:
{{- range $arg := $svc.args }}
- {{ $arg | quote }}
{{- end }}
{{- end }}
{{- if $svc.env }}
env:
{{- range $envName, $envValue := $svc.env }}
- name: {{ $envName }}
value: {{ $envValue | quote }}
{{- end }}
{{- end }}
{{- $needsPolicyActivation := and $hasPolicyActivationConfig (hasKey $policyActivationTargets $name) }}
{{- $envFrom := default (list) $svc.envFrom }}
{{- if and (hasKey $root.Values.configMaps "surface-env") (or (hasPrefix "scanner-" $name) (hasPrefix "zastava-" $name)) }}
{{- $envFrom = append $envFrom (dict "configMapRef" (dict "name" (include "stellaops.fullname" (dict "root" $root "name" "surface-env")))) }}
{{- end }}
{{- if and $needsPolicyActivation (ne $policyActivationConfigName "") }}
{{- $hasActivationReference := false }}
{{- range $envFromEntry := $envFrom }}
{{- if and (hasKey $envFromEntry "configMapRef") (eq (index (index $envFromEntry "configMapRef") "name") $policyActivationConfigName) }}
{{- $hasActivationReference = true }}
{{- end }}
{{- end }}
{{- if not $hasActivationReference }}
{{- $envFrom = append $envFrom (dict "configMapRef" (dict "name" $policyActivationConfigName)) }}
{{- end }}
{{- end }}
{{- if $envFrom }}
envFrom:
{{ toYaml $envFrom | nindent 12 }}
{{- end }}
{{- if $svc.ports }}
ports:
{{- range $port := $svc.ports }}
- name: {{ default (printf "%s-%v" $name $port.containerPort) $port.name | trunc 63 | trimSuffix "-" }}
containerPort: {{ $port.containerPort }}
protocol: {{ default "TCP" $port.protocol }}
{{- end }}
{{- else if and $svc.service (hasKey $svc.service "port") }}
{{- $svcService := $svc.service }}
ports:
- name: {{ printf "%s-http" $name | trunc 63 | trimSuffix "-" }}
containerPort: {{ default (index $svcService "port") (index $svcService "targetPort") }}
protocol: {{ default "TCP" (index $svcService "protocol") }}
{{- end }}
{{- if $svc.resources }}
resources:
{{ toYaml $svc.resources | nindent 12 }}
{{- end }}
{{- if $svc.securityContext }}
securityContext:
{{ toYaml $svc.securityContext | nindent 12 }}
{{- end }}
{{- if $svc.securityContext }}
securityContext:
{{ toYaml $svc.securityContext | nindent 12 }}
{{- end }}
{{- if $svc.livenessProbe }}
livenessProbe:
{{ toYaml $svc.livenessProbe | nindent 12 }}
{{- end }}
{{- if $svc.readinessProbe }}
readinessProbe:
{{ toYaml $svc.readinessProbe | nindent 12 }}
{{- end }}
{{- if $svc.prometheus }}
{{- $pr := $svc.prometheus }}
{{- if $pr.enabled }}
{{- if not $svc.podAnnotations }}
{{- $svc = merge $svc (dict "podAnnotations" (dict)) }}
{{- end }}
{{- $svc.podAnnotations = merge $svc.podAnnotations (dict "prometheus.io/scrape" "true" "prometheus.io/path" (default "/metrics" $pr.path) "prometheus.io/port" (toString (default 8080 $pr.port)) "prometheus.io/scheme" (default "http" $pr.scheme))) }}
{{- end }}
{{- end }}
{{- if or $svc.volumeMounts $configMounts }}
volumeMounts:
{{- if $svc.volumeMounts }}
{{ toYaml $svc.volumeMounts | nindent 12 }}
{{- end }}
{{- range $mount := $configMounts }}
- name: {{ $mount.name }}
mountPath: {{ $mount.mountPath }}
{{- if $mount.subPath }}
subPath: {{ $mount.subPath }}
{{- end }}
{{- if hasKey $mount "readOnly" }}
readOnly: {{ $mount.readOnly }}
{{- else }}
readOnly: true
{{- end }}
{{- end }}
{{- end }}
{{- if or $svc.volumes (or $svc.volumeClaims $configMounts) }}
volumes:
{{- if $svc.volumes }}
{{ toYaml $svc.volumes | nindent 8 }}
{{- end }}
{{- if $svc.volumeClaims }}
{{- range $claim := $svc.volumeClaims }}
- name: {{ $claim.name }}
persistentVolumeClaim:
claimName: {{ $claim.claimName }}
{{- end }}
{{- end }}
{{- range $mount := $configMounts }}
- name: {{ $mount.name }}
configMap:
name: {{ include "stellaops.fullname" (dict "root" $root "name" $mount.configMap) }}
{{- if $mount.items }}
items:
{{ toYaml $mount.items | nindent 12 }}
{{- else if $mount.subPath }}
items:
- key: {{ $mount.subPath }}
path: {{ $mount.subPath }}
{{- end }}
{{- end }}
{{- end }}
{{- if $svc.serviceAccount }}
serviceAccountName: {{ $svc.serviceAccount | quote }}
{{- end }}
{{- if $svc.nodeSelector }}
nodeSelector:
{{ toYaml $svc.nodeSelector | nindent 8 }}
{{- end }}
{{- if $svc.affinity }}
affinity:
{{ toYaml $svc.affinity | nindent 8 }}
{{- end }}
{{- if $svc.tolerations }}
tolerations:
{{ toYaml $svc.tolerations | nindent 8 }}
{{- end }}
{{- if $svc.pdb }}
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ include "stellaops.fullname" (dict "root" $root "name" $name) }}
labels:
{{- include "stellaops.labels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }}
spec:
{{- if $svc.pdb.minAvailable }}
minAvailable: {{ $svc.pdb.minAvailable }}
{{- end }}
{{- if $svc.pdb.maxUnavailable }}
maxUnavailable: {{ $svc.pdb.maxUnavailable }}
{{- end }}
selector:
matchLabels:
{{- include "stellaops.selectorLabels" (dict "root" $root "name" $name "svc" $svc) | nindent 6 }}
{{- end }}
---
{{- if $svc.service }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "stellaops.fullname" (dict "root" $root "name" $name) }}
labels:
{{- include "stellaops.labels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }}
spec:
type: {{ default "ClusterIP" $svc.service.type }}
selector:
{{- include "stellaops.selectorLabels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }}
ports:
- name: {{ default "http" $svc.service.portName }}
port: {{ $svc.service.port }}
targetPort: {{ $svc.service.targetPort | default $svc.service.port }}
protocol: {{ default "TCP" $svc.service.protocol }}
---
{{- end }}
{{- end }}

View File

@@ -1,28 +0,0 @@
{{- if and .Values.externalSecrets.enabled .Values.externalSecrets.secrets }}
{{- range $secret := .Values.externalSecrets.secrets }}
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: {{ include "stellaops.fullname" $ }}-{{ $secret.name }}
labels:
{{- include "stellaops.labels" $ | nindent 4 }}
spec:
refreshInterval: {{ default "1h" $secret.refreshInterval }}
secretStoreRef:
name: {{ $secret.storeRef.name }}
kind: {{ default "ClusterSecretStore" $secret.storeRef.kind }}
target:
name: {{ $secret.target.name | default (printf "%s-%s" (include "stellaops.fullname" $) $secret.name) }}
creationPolicy: {{ default "Owner" $secret.target.creationPolicy }}
data:
{{- range $secret.data }}
- secretKey: {{ .key }}
remoteRef:
key: {{ .remoteKey }}
{{- if .property }}
property: {{ .property }}
{{- end }}
{{- end }}
---
{{- end }}
{{- end }}

View File

@@ -1,39 +0,0 @@
{{- if and .Values.hpa.enabled .Values.services }}
{{- range $name, $svc := .Values.services }}
{{- if and $svc.hpa $svc.hpa.enabled }}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "stellaops.fullname" (dict "root" $ "name" $name) }}
labels:
{{- include "stellaops.labels" (dict "root" $ "name" $name "svc" $svc) | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "stellaops.fullname" (dict "root" $ "name" $name) }}
minReplicas: {{ default $.Values.hpa.minReplicas $svc.hpa.minReplicas }}
maxReplicas: {{ default $.Values.hpa.maxReplicas $svc.hpa.maxReplicas }}
metrics:
{{- $cpu := coalesce $svc.hpa.cpu.targetPercentage $.Values.hpa.cpu.targetPercentage -}}
{{- if $cpu }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ $cpu }}
{{- end }}
{{- $mem := coalesce $svc.hpa.memory.targetPercentage $.Values.hpa.memory.targetPercentage -}}
{{- if $mem }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ $mem }}
{{- end }}
---
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,32 +0,0 @@
{{- if and .Values.ingress.enabled .Values.ingress.hosts }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "stellaops.fullname" . }}
labels:
{{- include "stellaops.labels" . | nindent 4 }}
annotations:
{{- range $k, $v := .Values.ingress.annotations }}
{{ $k }}: {{ $v | quote }}
{{- end }}
spec:
ingressClassName: {{ .Values.ingress.className | default "nginx" | quote }}
tls:
{{- range .Values.ingress.tls }}
- hosts: {{ toYaml .hosts | nindent 6 }}
secretName: {{ .secretName }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host }}
http:
paths:
- path: {{ .path | default "/" }}
pathType: Prefix
backend:
service:
name: {{ include "stellaops.fullname" $ }}-gateway
port:
number: {{ .servicePort | default 80 }}
{{- end }}
{{- end }}

View File

@@ -1,50 +0,0 @@
{{- if and .Values.migrations.enabled .Values.migrations.jobs }}
{{- range $job := .Values.migrations.jobs }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "stellaops.fullname" $ }}-migration-{{ $job.name | trunc 30 | trimSuffix "-" }}
labels:
{{- include "stellaops.labels" $ | nindent 4 }}
stellaops.io/component: migration
stellaops.io/migration-name: {{ $job.name | quote }}
spec:
backoffLimit: {{ default 3 $job.backoffLimit }}
ttlSecondsAfterFinished: {{ default 3600 $job.ttlSecondsAfterFinished }}
template:
metadata:
labels:
{{- include "stellaops.selectorLabels" $ | nindent 8 }}
stellaops.io/component: migration
stellaops.io/migration-name: {{ $job.name | quote }}
spec:
restartPolicy: {{ default "Never" $job.restartPolicy }}
serviceAccountName: {{ default "default" $job.serviceAccountName }}
containers:
- name: {{ $job.name | trunc 50 | trimSuffix "-" }}
image: {{ $job.image | quote }}
imagePullPolicy: {{ default "IfNotPresent" $job.imagePullPolicy }}
command: {{- if $job.command }} {{ toJson $job.command }} {{- else }} null {{- end }}
args: {{- if $job.args }} {{ toJson $job.args }} {{- else }} null {{- end }}
env:
{{- if $job.env }}
{{- range $k, $v := $job.env }}
- name: {{ $k }}
value: {{ $v | quote }}
{{- end }}
{{- end }}
envFrom:
{{- if $job.envFrom }}
{{- toYaml $job.envFrom | nindent 12 }}
{{- end }}
resources:
{{- if $job.resources }}
{{- toYaml $job.resources | nindent 12 }}
{{- else }}{}
{{- end }}
imagePullSecrets:
{{- if $.Values.global.image.pullSecrets }}
{{- toYaml $.Values.global.image.pullSecrets | nindent 8 }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,45 +0,0 @@
{{- if .Values.networkPolicy.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ include "stellaops.fullname" . }}-default
labels:
{{- include "stellaops.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "stellaops.selectorLabelsRoot" . | nindent 6 }}
policyTypes:
- Ingress
- Egress
ingress:
- from:
{{- if .Values.networkPolicy.ingressNamespaces }}
- namespaceSelector:
matchLabels:
{{- toYaml .Values.networkPolicy.ingressNamespaces | nindent 14 }}
{{- end }}
{{- if .Values.networkPolicy.ingressPods }}
- podSelector:
matchLabels:
{{- toYaml .Values.networkPolicy.ingressPods | nindent 14 }}
{{- end }}
ports:
- protocol: TCP
port: {{ default 80 .Values.networkPolicy.ingressPort }}
egress:
- to:
{{- if .Values.networkPolicy.egressNamespaces }}
- namespaceSelector:
matchLabels:
{{- toYaml .Values.networkPolicy.egressNamespaces | nindent 14 }}
{{- end }}
{{- if .Values.networkPolicy.egressPods }}
- podSelector:
matchLabels:
{{- toYaml .Values.networkPolicy.egressPods | nindent 14 }}
{{- end }}
ports:
- protocol: TCP
port: {{ default 443 .Values.networkPolicy.egressPort }}
{{- end }}

View File

@@ -1,22 +0,0 @@
{{- if .Values.mock.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: orchestrator-mock
annotations:
stellaops.dev/mock: "true"
spec:
replicas: 1
selector:
matchLabels:
app: orchestrator-mock
template:
metadata:
labels:
app: orchestrator-mock
spec:
containers:
- name: orchestrator
image: "{{ .Values.mock.orchestrator.image }}"
args: ["dotnet", "StellaOps.Orchestrator.WebService.dll"]
{{- end }}

View File

@@ -1,121 +0,0 @@
{{- if .Values.telemetry.collector.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "stellaops.telemetryCollector.fullname" . }}
labels:
{{- include "stellaops.labels" (dict "root" . "name" "otel-collector" "svc" (dict "class" "telemetry")) | nindent 4 }}
data:
config.yaml: |
{{ include "stellaops.telemetryCollector.config" . | indent 4 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "stellaops.telemetryCollector.fullname" . }}
labels:
{{- include "stellaops.labels" (dict "root" . "name" "otel-collector" "svc" (dict "class" "telemetry")) | nindent 4 }}
spec:
replicas: {{ .Values.telemetry.collector.replicas | default 1 }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "stellaops.name" . | quote }}
app.kubernetes.io/component: "otel-collector"
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "stellaops.name" . | quote }}
app.kubernetes.io/component: "otel-collector"
stellaops.profile: {{ .Values.global.profile | quote }}
spec:
containers:
- name: otel-collector
image: {{ .Values.telemetry.collector.image | default "otel/opentelemetry-collector:0.105.0" | quote }}
args:
- "--config=/etc/otel/config.yaml"
ports:
- name: otlp-grpc
containerPort: 4317
- name: otlp-http
containerPort: 4318
- name: metrics
containerPort: 9464
- name: health
containerPort: 13133
- name: pprof
containerPort: 1777
env:
- name: STELLAOPS_OTEL_TLS_CERT
value: {{ .Values.telemetry.collector.tls.certPath | default "/etc/otel/tls/tls.crt" | quote }}
- name: STELLAOPS_OTEL_TLS_KEY
value: {{ .Values.telemetry.collector.tls.keyPath | default "/etc/otel/tls/tls.key" | quote }}
- name: STELLAOPS_OTEL_TLS_CA
value: {{ .Values.telemetry.collector.tls.caPath | default "/etc/otel/tls/ca.crt" | quote }}
- name: STELLAOPS_OTEL_PROMETHEUS_ENDPOINT
value: {{ .Values.telemetry.collector.prometheusEndpoint | default "0.0.0.0:9464" | quote }}
- name: STELLAOPS_OTEL_REQUIRE_CLIENT_CERT
value: {{ .Values.telemetry.collector.requireClientCert | default true | quote }}
- name: STELLAOPS_TENANT_ID
value: {{ .Values.telemetry.collector.defaultTenant | default "unknown" | quote }}
- name: STELLAOPS_OTEL_LOG_LEVEL
value: {{ .Values.telemetry.collector.logLevel | default "info" | quote }}
volumeMounts:
- name: config
mountPath: /etc/otel/config.yaml
subPath: config.yaml
readOnly: true
- name: tls
mountPath: /etc/otel/tls
readOnly: true
livenessProbe:
httpGet:
scheme: HTTPS
port: health
path: /healthz
initialDelaySeconds: 10
periodSeconds: 30
readinessProbe:
httpGet:
scheme: HTTPS
port: health
path: /healthz
initialDelaySeconds: 5
periodSeconds: 15
{{- with .Values.telemetry.collector.resources }}
resources:
{{ toYaml . | indent 12 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ include "stellaops.telemetryCollector.fullname" . }}
- name: tls
secret:
secretName: {{ .Values.telemetry.collector.tls.secretName | required "telemetry.collector.tls.secretName is required" }}
{{- if .Values.telemetry.collector.tls.items }}
items:
{{ toYaml .Values.telemetry.collector.tls.items | indent 14 }}
{{- end }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "stellaops.telemetryCollector.fullname" . }}
labels:
{{- include "stellaops.labels" (dict "root" . "name" "otel-collector" "svc" (dict "class" "telemetry")) | nindent 4 }}
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: {{ include "stellaops.name" . | quote }}
app.kubernetes.io/component: "otel-collector"
ports:
- name: otlp-grpc
port: {{ .Values.telemetry.collector.service.grpcPort | default 4317 }}
targetPort: otlp-grpc
- name: otlp-http
port: {{ .Values.telemetry.collector.service.httpPort | default 4318 }}
targetPort: otlp-http
- name: metrics
port: {{ .Values.telemetry.collector.service.metricsPort | default 9464 }}
targetPort: metrics
{{- end }}

View File

@@ -1,44 +0,0 @@
{{- if .Values.mock.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: packs-registry-mock
annotations:
stellaops.dev/mock: "true"
spec:
replicas: 1
selector:
matchLabels:
app: packs-registry-mock
template:
metadata:
labels:
app: packs-registry-mock
spec:
containers:
- name: packs-registry
image: "{{ .Values.mock.packsRegistry.image }}"
args: ["dotnet", "StellaOps.PacksRegistry.dll"]
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: task-runner-mock
annotations:
stellaops.dev/mock: "true"
spec:
replicas: 1
selector:
matchLabels:
app: task-runner-mock
template:
metadata:
labels:
app: task-runner-mock
spec:
containers:
- name: task-runner
image: "{{ .Values.mock.taskRunner.image }}"
args: ["dotnet", "StellaOps.TaskRunner.WebService.dll"]
{{- end }}

View File

@@ -1,22 +0,0 @@
{{- if .Values.mock.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: policy-registry-mock
annotations:
stellaops.dev/mock: "true"
spec:
replicas: 1
selector:
matchLabels:
app: policy-registry-mock
template:
metadata:
labels:
app: policy-registry-mock
spec:
containers:
- name: policy-registry
image: "{{ .Values.mock.policyRegistry.image }}"
args: ["dotnet", "StellaOps.Policy.Engine.dll"]
{{- end }}

View File

@@ -1,22 +0,0 @@
{{- if .Values.mock.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: vex-lens-mock
annotations:
stellaops.dev/mock: "true"
spec:
replicas: 1
selector:
matchLabels:
app: vex-lens-mock
template:
metadata:
labels:
app: vex-lens-mock
spec:
containers:
- name: vex-lens
image: "{{ .Values.mock.vexLens.image }}"
args: ["dotnet", "StellaOps.VexLens.dll"]
{{- end }}

View File

@@ -1,44 +0,0 @@
{{- if .Values.mock.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: findings-ledger-mock
annotations:
stellaops.dev/mock: "true"
spec:
replicas: 1
selector:
matchLabels:
app: findings-ledger-mock
template:
metadata:
labels:
app: findings-ledger-mock
spec:
containers:
- name: findings-ledger
image: "{{ .Values.mock.findingsLedger.image }}"
args: ["dotnet", "StellaOps.Findings.Ledger.WebService.dll"]
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vuln-explorer-api-mock
annotations:
stellaops.dev/mock: "true"
spec:
replicas: 1
selector:
matchLabels:
app: vuln-explorer-api-mock
template:
metadata:
labels:
app: vuln-explorer-api-mock
spec:
containers:
- name: vuln-explorer-api
image: "{{ .Values.mock.vulnExplorerApi.image }}"
args: ["dotnet", "StellaOps.VulnExplorer.Api.dll"]
{{- end }}

View File

@@ -1,318 +0,0 @@
global:
profile: airgap
release:
version: "2025.09.2-airgap"
channel: airgap
manifestSha256: "b787b833dddd73960c31338279daa0b0a0dce2ef32bd32ef1aaf953d66135f94"
image:
pullPolicy: IfNotPresent
labels:
stellaops.io/channel: airgap
migrations:
enabled: false
jobs: []
networkPolicy:
enabled: true
ingressPort: 8443
egressPort: 443
ingressNamespaces:
kubernetes.io/metadata.name: stellaops
egressNamespaces:
kubernetes.io/metadata.name: stellaops
ingress:
enabled: false
className: nginx
annotations: {}
hosts: []
tls: []
externalSecrets:
enabled: false
secrets: []
prometheus:
enabled: true
path: /metrics
port: 8080
scheme: http
hpa:
enabled: false
minReplicas: 1
maxReplicas: 3
cpu:
targetPercentage: 70
memory:
targetPercentage: 80
configMaps:
notify-config:
data:
notify.yaml: |
storage:
driver: postgres
connectionString: "Host=stellaops-postgres;Port=5432;Database=notify;Username=stellaops;Password=stellaops"
commandTimeoutSeconds: 60
authority:
enabled: true
issuer: "https://authority.stella-ops.org"
metadataAddress: "https://authority.stella-ops.org/.well-known/openid-configuration"
requireHttpsMetadata: true
allowAnonymousFallback: false
backchannelTimeoutSeconds: 30
tokenClockSkewSeconds: 60
audiences:
- notify
readScope: notify.read
adminScope: notify.admin
api:
basePath: "/api/v1/notify"
internalBasePath: "/internal/notify"
tenantHeader: "X-StellaOps-Tenant"
plugins:
baseDirectory: "/var/opt/stellaops"
directory: "plugins/notify"
searchPatterns:
- "StellaOps.Notify.Connectors.*.dll"
orderedPlugins:
- StellaOps.Notify.Connectors.Slack
- StellaOps.Notify.Connectors.Teams
- StellaOps.Notify.Connectors.Email
- StellaOps.Notify.Connectors.Webhook
telemetry:
enableRequestLogging: true
minimumLogLevel: Warning
policy-engine-activation:
data:
STELLAOPS_POLICY_ENGINE__ACTIVATION__FORCETWOPERSONAPPROVAL: "true"
STELLAOPS_POLICY_ENGINE__ACTIVATION__DEFAULTREQUIRESTWOPERSONAPPROVAL: "true"
STELLAOPS_POLICY_ENGINE__ACTIVATION__EMITAUDITLOGS: "true"
services:
authority:
image: registry.stella-ops.org/stellaops/authority@sha256:5551a3269b7008cd5aceecf45df018c67459ed519557ccbe48b093b926a39bcc
service:
port: 8440
env:
STELLAOPS_AUTHORITY__ISSUER: "https://stellaops-authority:8440"
STELLAOPS_AUTHORITY__STORAGE__DRIVER: "postgres"
STELLAOPS_AUTHORITY__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=authority;Username=stellaops;Password=stellaops"
STELLAOPS_AUTHORITY__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
STELLAOPS_AUTHORITY__ALLOWANONYMOUSFALLBACK: "false"
signer:
image: registry.stella-ops.org/stellaops/signer@sha256:ddbbd664a42846cea6b40fca6465bc679b30f72851158f300d01a8571c5478fc
service:
port: 8441
env:
SIGNER__AUTHORITY__BASEURL: "https://stellaops-authority:8440"
SIGNER__POE__INTROSPECTURL: "file:///offline/poe/introspect.json"
SIGNER__STORAGE__DRIVER: "postgres"
SIGNER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=signer;Username=stellaops;Password=stellaops"
SIGNER__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
attestor:
image: registry.stella-ops.org/stellaops/attestor@sha256:1ff0a3124d66d3a2702d8e421df40fbd98cc75cb605d95510598ebbae1433c50
service:
port: 8442
env:
ATTESTOR__SIGNER__BASEURL: "https://stellaops-signer:8441"
ATTESTOR__STORAGE__DRIVER: "postgres"
ATTESTOR__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=attestor;Username=stellaops;Password=stellaops"
ATTESTOR__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
concelier:
image: registry.stella-ops.org/stellaops/concelier@sha256:29e2e1a0972707e092cbd3d370701341f9fec2aa9316fb5d8100480f2a1c76b5
service:
port: 8445
env:
CONCELIER__STORAGE__DRIVER: "postgres"
CONCELIER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=concelier;Username=stellaops;Password=stellaops"
CONCELIER__STORAGE__S3__ENDPOINT: "http://stellaops-rustfs:8080"
CONCELIER__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
CONCELIER__AUTHORITY__BASEURL: "https://stellaops-authority:8440"
CONCELIER__AUTHORITY__RESILIENCE__ALLOWOFFLINECACHEFALLBACK: "true"
CONCELIER__AUTHORITY__RESILIENCE__OFFLINECACHETOLERANCE: "00:45:00"
volumeMounts:
- name: concelier-jobs
mountPath: /var/lib/concelier/jobs
volumeClaims:
- name: concelier-jobs
claimName: stellaops-concelier-jobs
scanner-web:
image: registry.stella-ops.org/stellaops/scanner-web@sha256:3df8ca21878126758203c1a0444e39fd97f77ddacf04a69685cda9f1e5e94718
service:
port: 8444
env:
SCANNER__STORAGE__DRIVER: "postgres"
SCANNER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=scanner;Username=stellaops;Password=stellaops"
SCANNER__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
SCANNER__ARTIFACTSTORE__DRIVER: "rustfs"
SCANNER__ARTIFACTSTORE__ENDPOINT: "http://stellaops-rustfs:8080/api/v1"
SCANNER__ARTIFACTSTORE__BUCKET: "scanner-artifacts"
SCANNER__ARTIFACTSTORE__TIMEOUTSECONDS: "30"
SCANNER__QUEUE__BROKER: "valkey://stellaops-valkey:6379"
SCANNER__EVENTS__ENABLED: "false"
SCANNER__EVENTS__DRIVER: "valkey"
SCANNER__EVENTS__DSN: "stellaops-valkey:6379"
SCANNER__EVENTS__STREAM: "stella.events"
SCANNER__EVENTS__PUBLISHTIMEOUTSECONDS: "5"
SCANNER__EVENTS__MAXSTREAMLENGTH: "10000"
SCANNER__OFFLINEKIT__ENABLED: "false"
SCANNER__OFFLINEKIT__REQUIREDSSE: "true"
SCANNER__OFFLINEKIT__REKOROFFLINEMODE: "true"
SCANNER__OFFLINEKIT__TRUSTROOTDIRECTORY: "/etc/stellaops/trust-roots"
SCANNER__OFFLINEKIT__REKORSNAPSHOTDIRECTORY: "/var/lib/stellaops/rekor-snapshot"
SCANNER_SURFACE_FS_ENDPOINT: "http://stellaops-rustfs:8080/api/v1"
SCANNER_SURFACE_CACHE_ROOT: "/var/lib/stellaops/surface"
SCANNER_SURFACE_SECRETS_PROVIDER: "file"
SCANNER_SURFACE_SECRETS_ROOT: "/etc/stellaops/secrets"
scanner-worker:
image: registry.stella-ops.org/stellaops/scanner-worker@sha256:eea5d6cfe7835950c5ec7a735a651f2f0d727d3e470cf9027a4a402ea89c4fb5
env:
SCANNER__STORAGE__DRIVER: "postgres"
SCANNER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=scanner;Username=stellaops;Password=stellaops"
SCANNER__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
SCANNER__ARTIFACTSTORE__DRIVER: "rustfs"
SCANNER__ARTIFACTSTORE__ENDPOINT: "http://stellaops-rustfs:8080/api/v1"
SCANNER__ARTIFACTSTORE__BUCKET: "scanner-artifacts"
SCANNER__ARTIFACTSTORE__TIMEOUTSECONDS: "30"
SCANNER__QUEUE__BROKER: "valkey://stellaops-valkey:6379"
SCANNER__EVENTS__ENABLED: "false"
SCANNER__EVENTS__DRIVER: "valkey"
SCANNER__EVENTS__DSN: "stellaops-valkey:6379"
SCANNER__EVENTS__STREAM: "stella.events"
SCANNER__EVENTS__PUBLISHTIMEOUTSECONDS: "5"
SCANNER__EVENTS__MAXSTREAMLENGTH: "10000"
SCANNER_SURFACE_FS_ENDPOINT: "http://stellaops-rustfs:8080/api/v1"
SCANNER_SURFACE_CACHE_ROOT: "/var/lib/stellaops/surface"
SCANNER_SURFACE_SECRETS_PROVIDER: "file"
SCANNER_SURFACE_SECRETS_ROOT: "/etc/stellaops/secrets"
# Secret Detection Rules Bundle
SCANNER__FEATURES__EXPERIMENTAL__SECRETLEAKDETECTION: "false"
SCANNER__SECRETS__BUNDLEPATH: "/opt/stellaops/plugins/scanner/analyzers/secrets"
SCANNER__SECRETS__REQUIRESIGNATURE: "true"
volumeMounts:
- name: secrets-rules
mountPath: /opt/stellaops/plugins/scanner/analyzers/secrets
readOnly: true
volumeClaims:
- name: secrets-rules
claimName: stellaops-secrets-rules
notify-web:
image: registry.stella-ops.org/stellaops/notify-web:2025.09.2
service:
port: 8446
env:
DOTNET_ENVIRONMENT: Production
NOTIFY__QUEUE__DRIVER: "valkey"
NOTIFY__QUEUE__VALKEY__URL: "stellaops-valkey:6379"
configMounts:
- name: notify-config
mountPath: /app/etc/notify.yaml
subPath: notify.yaml
configMap: notify-config
excititor:
image: registry.stella-ops.org/stellaops/excititor@sha256:65c0ee13f773efe920d7181512349a09d363ab3f3e177d276136bd2742325a68
env:
EXCITITOR__CONCELIER__BASEURL: "https://stellaops-concelier:8445"
EXCITITOR__STORAGE__DRIVER: "postgres"
EXCITITOR__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=excititor;Username=stellaops;Password=stellaops"
advisory-ai-web:
image: registry.stella-ops.org/stellaops/advisory-ai-web:2025.09.2-airgap
service:
port: 8448
env:
ADVISORYAI__AdvisoryAI__SbomBaseAddress: https://stellaops-scanner-web:8444
ADVISORYAI__AdvisoryAI__Queue__DirectoryPath: /var/lib/advisory-ai/queue
ADVISORYAI__AdvisoryAI__Storage__PlanCacheDirectory: /var/lib/advisory-ai/plans
ADVISORYAI__AdvisoryAI__Storage__OutputDirectory: /var/lib/advisory-ai/outputs
ADVISORYAI__AdvisoryAI__Inference__Mode: Local
ADVISORYAI__AdvisoryAI__Inference__Remote__BaseAddress: ""
ADVISORYAI__AdvisoryAI__Inference__Remote__ApiKey: ""
volumeMounts:
- name: advisory-ai-data
mountPath: /var/lib/advisory-ai
volumeClaims:
- name: advisory-ai-data
claimName: stellaops-advisory-ai-data
advisory-ai-worker:
image: registry.stella-ops.org/stellaops/advisory-ai-worker:2025.09.2-airgap
env:
ADVISORYAI__AdvisoryAI__SbomBaseAddress: https://stellaops-scanner-web:8444
ADVISORYAI__AdvisoryAI__Queue__DirectoryPath: /var/lib/advisory-ai/queue
ADVISORYAI__AdvisoryAI__Storage__PlanCacheDirectory: /var/lib/advisory-ai/plans
ADVISORYAI__AdvisoryAI__Storage__OutputDirectory: /var/lib/advisory-ai/outputs
ADVISORYAI__AdvisoryAI__Inference__Mode: Local
ADVISORYAI__AdvisoryAI__Inference__Remote__BaseAddress: ""
ADVISORYAI__AdvisoryAI__Inference__Remote__ApiKey: ""
volumeMounts:
- name: advisory-ai-data
mountPath: /var/lib/advisory-ai
volumeClaims:
- name: advisory-ai-data
claimName: stellaops-advisory-ai-data
web-ui:
image: registry.stella-ops.org/stellaops/web-ui@sha256:bee9668011ff414572131dc777faab4da24473fe12c230893f161cabee092a1d
service:
port: 9443
targetPort: 8443
env:
STELLAOPS_UI__BACKEND__BASEURL: "https://stellaops-scanner-web:8444"
# Infrastructure services
postgres:
class: infrastructure
image: docker.io/library/postgres@sha256:8e97b8526ed19304b144f7478bc9201646acf0723cdc6e4b19bc9eb34879a27e
service:
port: 5432
env:
POSTGRES_USER: stellaops
POSTGRES_PASSWORD: stellaops
POSTGRES_DB: stellaops
volumeMounts:
- name: postgres-data
mountPath: /var/lib/postgresql/data
volumeClaims:
- name: postgres-data
claimName: stellaops-postgres-data
valkey:
class: infrastructure
image: docker.io/valkey/valkey:9.0.1-alpine
service:
port: 6379
command:
- valkey-server
- --appendonly
- "yes"
volumeMounts:
- name: valkey-data
mountPath: /data
volumeClaims:
- name: valkey-data
claimName: stellaops-valkey-data
rustfs:
class: infrastructure
image: registry.stella-ops.org/stellaops/rustfs:2025.09.2
service:
port: 8080
command:
- serve
- --listen
- 0.0.0.0:8080
- --root
- /data
env:
RUSTFS__LOG__LEVEL: info
RUSTFS__STORAGE__PATH: /data
volumeMounts:
- name: rustfs-data
mountPath: /data
volumeClaims:
- name: rustfs-data
claimName: stellaops-rustfs-data

View File

@@ -1,104 +0,0 @@
# Blue/Green Deployment: Blue Environment
# Use this file alongside values-prod.yaml for the blue (current) environment
#
# Deploy with:
# helm upgrade stellaops-blue ./devops/helm/stellaops \
# --namespace stellaops-blue \
# --values devops/helm/stellaops/values-prod.yaml \
# --values devops/helm/stellaops/values-bluegreen-blue.yaml \
# --wait
# Environment identification
global:
profile: prod-blue
labels:
stellaops.io/environment: blue
stellaops.io/deployment-strategy: blue-green
# Deployment identification
deployment:
environment: blue
color: blue
namespace: stellaops-blue
# Ingress for direct blue access (for validation/debugging)
ingress:
enabled: true
hosts:
- host: stellaops-blue.example.com
path: /
servicePort: 80
annotations:
# Not a canary - this is the primary ingress for blue
nginx.ingress.kubernetes.io/canary: "false"
# Service naming for traffic routing
services:
api:
name: stellaops-blue-api
web:
name: stellaops-blue-web
scanner:
name: stellaops-blue-scanner
# Pod labels for service selector
podLabels:
stellaops.io/color: blue
# Shared resources (same for both blue and green)
database:
# IMPORTANT: Blue and Green share the same database
# Ensure migrations are N-1 compatible
host: postgres.shared.svc.cluster.local
database: stellaops_production
# Connection pool tuning for blue/green (half of normal)
pool:
minSize: 5
maxSize: 25
valkey:
# Separate Valkey (Redis-compatible) instance per environment to avoid cache conflicts
host: valkey-blue.stellaops-blue.svc.cluster.local
database: 0
evidence:
storage:
# IMPORTANT: Shared evidence storage for continuity
bucket: stellaops-evidence-production
prefix: "" # No prefix - shared namespace
# Health check configuration
healthCheck:
readiness:
path: /health/ready
initialDelaySeconds: 10
periodSeconds: 15
liveness:
path: /health/live
initialDelaySeconds: 30
periodSeconds: 10
# Resource allocation (half of normal for blue/green)
resources:
api:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 2000m
memory: 2Gi
scanner:
requests:
cpu: 1000m
memory: 1Gi
limits:
cpu: 4000m
memory: 4Gi
# Replica count (half of normal for blue/green)
replicaCount:
api: 2
web: 2
scanner: 2
signer: 1
attestor: 1

View File

@@ -1,126 +0,0 @@
# Blue/Green Deployment: Green Environment
# Use this file alongside values-prod.yaml for the green (new version) environment
#
# Deploy with:
# helm upgrade stellaops-green ./devops/helm/stellaops \
# --namespace stellaops-green \
# --create-namespace \
# --values devops/helm/stellaops/values-prod.yaml \
# --values devops/helm/stellaops/values-bluegreen-green.yaml \
# --set global.release.version="NEW_VERSION" \
# --wait
# Environment identification
global:
profile: prod-green
labels:
stellaops.io/environment: green
stellaops.io/deployment-strategy: blue-green
# Deployment identification
deployment:
environment: green
color: green
namespace: stellaops-green
# Ingress for green - starts as canary with 0% weight
ingress:
enabled: true
hosts:
- host: stellaops-green.example.com
path: /
servicePort: 80
annotations:
# Canary ingress for gradual traffic shifting
nginx.ingress.kubernetes.io/canary: "true"
nginx.ingress.kubernetes.io/canary-weight: "0"
# Optional: header-based routing for testing
nginx.ingress.kubernetes.io/canary-by-header: "X-Canary"
nginx.ingress.kubernetes.io/canary-by-header-value: "green"
# Canary ingress for production hostname (traffic shifting)
canaryIngress:
enabled: true
host: stellaops.example.com
annotations:
nginx.ingress.kubernetes.io/canary: "true"
nginx.ingress.kubernetes.io/canary-weight: "0" # Start at 0%, increase during cutover
# Service naming for traffic routing
services:
api:
name: stellaops-green-api
web:
name: stellaops-green-web
scanner:
name: stellaops-green-scanner
# Pod labels for service selector
podLabels:
stellaops.io/color: green
# Shared resources (same for both blue and green)
database:
# IMPORTANT: Blue and Green share the same database
# Ensure migrations are N-1 compatible
host: postgres.shared.svc.cluster.local
database: stellaops_production
# Connection pool tuning for blue/green (half of normal)
pool:
minSize: 5
maxSize: 25
valkey:
# Separate Valkey (Redis-compatible) instance per environment to avoid cache conflicts
host: valkey-green.stellaops-green.svc.cluster.local
database: 0
evidence:
storage:
# IMPORTANT: Shared evidence storage for continuity
bucket: stellaops-evidence-production
prefix: "" # No prefix - shared namespace
# Health check configuration
healthCheck:
readiness:
path: /health/ready
initialDelaySeconds: 10
periodSeconds: 15
liveness:
path: /health/live
initialDelaySeconds: 30
periodSeconds: 10
# Resource allocation (half of normal for blue/green)
resources:
api:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 2000m
memory: 2Gi
scanner:
requests:
cpu: 1000m
memory: 1Gi
limits:
cpu: 4000m
memory: 4Gi
# Replica count (half of normal for blue/green)
replicaCount:
api: 2
web: 2
scanner: 2
signer: 1
attestor: 1
# Migration jobs - enable for green environment
migrations:
enabled: true
# Run migrations before main deployment
preUpgrade:
enabled: true
backoffLimit: 3

View File

@@ -1,84 +0,0 @@
# Console (Angular SPA) values overlay
# Use: helm install stellaops . -f values-console.yaml
console:
enabled: true
image: registry.stella-ops.org/stellaops/console:2025.10.0-edge
replicas: 1
port: 8080
# Backend API URL injected via config.json at startup
apiBaseUrl: ""
# Authority URL for OAuth/OIDC
authorityUrl: ""
# Tenant header name
tenantHeader: "X-StellaOps-Tenant"
# Resource limits (nginx is lightweight)
resources:
limits:
cpu: "200m"
memory: "128Mi"
requests:
cpu: "50m"
memory: "64Mi"
# Service configuration
service:
type: ClusterIP
port: 80
targetPort: 8080
# Ingress configuration (enable for external access)
ingress:
enabled: false
className: nginx
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "10m"
hosts:
- host: console.local
paths:
- path: /
pathType: Prefix
tls: []
# Health probes
livenessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 10
periodSeconds: 30
readinessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 5
periodSeconds: 10
# Pod security context (non-root per DOCKER-44-001)
securityContext:
runAsNonRoot: true
runAsUser: 101
runAsGroup: 101
fsGroup: 101
# Container security context
containerSecurityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
# Volume mounts for nginx temp directories (RO rootfs)
volumeMounts:
- name: nginx-cache
mountPath: /var/cache/nginx
- name: nginx-run
mountPath: /var/run
volumes:
- name: nginx-cache
emptyDir: {}
- name: nginx-run
emptyDir: {}

View File

@@ -1,266 +0,0 @@
global:
profile: dev
release:
version: "2025.10.0-edge"
channel: edge
manifestSha256: "822f82987529ea38d2321dbdd2ef6874a4062a117116a20861c26a8df1807beb"
image:
pullPolicy: IfNotPresent
labels:
stellaops.io/channel: edge
telemetry:
collector:
enabled: true
defaultTenant: dev
tls:
secretName: stellaops-otel-tls
configMaps:
notify-config:
data:
notify.yaml: |
storage:
driver: postgres
connectionString: "Host=stellaops-postgres;Port=5432;Database=notify;Username=stellaops;Password=stellaops"
commandTimeoutSeconds: 30
authority:
enabled: true
issuer: "https://authority.dev.stella-ops.local"
metadataAddress: "https://authority.dev.stella-ops.local/.well-known/openid-configuration"
requireHttpsMetadata: false
allowAnonymousFallback: false
backchannelTimeoutSeconds: 30
tokenClockSkewSeconds: 60
audiences:
- notify.dev
readScope: notify.read
adminScope: notify.admin
api:
basePath: "/api/v1/notify"
internalBasePath: "/internal/notify"
tenantHeader: "X-StellaOps-Tenant"
plugins:
baseDirectory: "../"
directory: "plugins/notify"
searchPatterns:
- "StellaOps.Notify.Connectors.*.dll"
orderedPlugins:
- StellaOps.Notify.Connectors.Slack
- StellaOps.Notify.Connectors.Teams
- StellaOps.Notify.Connectors.Email
- StellaOps.Notify.Connectors.Webhook
telemetry:
enableRequestLogging: true
minimumLogLevel: Debug
policy-engine-activation:
data:
STELLAOPS_POLICY_ENGINE__ACTIVATION__FORCETWOPERSONAPPROVAL: "false"
STELLAOPS_POLICY_ENGINE__ACTIVATION__DEFAULTREQUIRESTWOPERSONAPPROVAL: "false"
STELLAOPS_POLICY_ENGINE__ACTIVATION__EMITAUDITLOGS: "true"
services:
authority:
image: registry.stella-ops.org/stellaops/authority@sha256:a8e8faec44a579aa5714e58be835f25575710430b1ad2ccd1282a018cd9ffcdd
service:
port: 8440
env:
STELLAOPS_AUTHORITY__ISSUER: "https://stellaops-authority:8440"
STELLAOPS_AUTHORITY__STORAGE__DRIVER: "postgres"
STELLAOPS_AUTHORITY__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=authority;Username=stellaops;Password=stellaops"
STELLAOPS_AUTHORITY__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0: "/app/plugins"
STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY: "/app/etc/authority.plugins"
signer:
image: registry.stella-ops.org/stellaops/signer@sha256:8bfef9a75783883d49fc18e3566553934e970b00ee090abee9cb110d2d5c3298
service:
port: 8441
env:
SIGNER__AUTHORITY__BASEURL: "https://stellaops-authority:8440"
SIGNER__POE__INTROSPECTURL: "https://licensing.svc.local/introspect"
SIGNER__STORAGE__DRIVER: "postgres"
SIGNER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=signer;Username=stellaops;Password=stellaops"
SIGNER__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
attestor:
image: registry.stella-ops.org/stellaops/attestor@sha256:5cc417948c029da01dccf36e4645d961a3f6d8de7e62fe98d845f07cd2282114
service:
port: 8442
env:
ATTESTOR__SIGNER__BASEURL: "https://stellaops-signer:8441"
ATTESTOR__STORAGE__DRIVER: "postgres"
ATTESTOR__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=attestor;Username=stellaops;Password=stellaops"
ATTESTOR__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
concelier:
image: registry.stella-ops.org/stellaops/concelier@sha256:dafef3954eb4b837e2c424dd2d23e1e4d60fa83794840fac9cd3dea1d43bd085
service:
port: 8445
env:
CONCELIER__STORAGE__DRIVER: "postgres"
CONCELIER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=concelier;Username=stellaops;Password=stellaops"
CONCELIER__STORAGE__S3__ENDPOINT: "http://stellaops-rustfs:8080"
CONCELIER__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
CONCELIER__AUTHORITY__BASEURL: "https://stellaops-authority:8440"
volumeMounts:
- name: concelier-jobs
mountPath: /var/lib/concelier/jobs
volumes:
- name: concelier-jobs
emptyDir: {}
scanner-web:
image: registry.stella-ops.org/stellaops/scanner-web@sha256:e0dfdb087e330585a5953029fb4757f5abdf7610820a085bd61b457dbead9a11
service:
port: 8444
env:
SCANNER__STORAGE__DRIVER: "postgres"
SCANNER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=scanner;Username=stellaops;Password=stellaops"
SCANNER__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
SCANNER__ARTIFACTSTORE__DRIVER: "rustfs"
SCANNER__ARTIFACTSTORE__ENDPOINT: "http://stellaops-rustfs:8080/api/v1"
SCANNER__ARTIFACTSTORE__BUCKET: "scanner-artifacts"
SCANNER__ARTIFACTSTORE__TIMEOUTSECONDS: "30"
SCANNER__QUEUE__BROKER: "valkey://stellaops-valkey:6379"
SCANNER__EVENTS__ENABLED: "false"
SCANNER__EVENTS__DRIVER: "valkey"
SCANNER__EVENTS__DSN: "stellaops-valkey:6379"
SCANNER__EVENTS__STREAM: "stella.events"
SCANNER__EVENTS__PUBLISHTIMEOUTSECONDS: "5"
SCANNER__EVENTS__MAXSTREAMLENGTH: "10000"
SCANNER__OFFLINEKIT__ENABLED: "false"
SCANNER__OFFLINEKIT__REQUIREDSSE: "true"
SCANNER__OFFLINEKIT__REKOROFFLINEMODE: "true"
SCANNER__OFFLINEKIT__TRUSTROOTDIRECTORY: "/etc/stellaops/trust-roots"
SCANNER__OFFLINEKIT__REKORSNAPSHOTDIRECTORY: "/var/lib/stellaops/rekor-snapshot"
SCANNER_SURFACE_FS_ENDPOINT: "http://stellaops-rustfs:8080/api/v1"
SCANNER_SURFACE_CACHE_ROOT: "/var/lib/stellaops/surface"
SCANNER_SURFACE_SECRETS_PROVIDER: "inline"
SCANNER_SURFACE_SECRETS_ROOT: ""
scanner-worker:
image: registry.stella-ops.org/stellaops/scanner-worker@sha256:92dda42f6f64b2d9522104a5c9ffb61d37b34dd193132b68457a259748008f37
env:
SCANNER__STORAGE__DRIVER: "postgres"
SCANNER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=scanner;Username=stellaops;Password=stellaops"
SCANNER__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
SCANNER__ARTIFACTSTORE__DRIVER: "rustfs"
SCANNER__ARTIFACTSTORE__ENDPOINT: "http://stellaops-rustfs:8080/api/v1"
SCANNER__ARTIFACTSTORE__BUCKET: "scanner-artifacts"
SCANNER__ARTIFACTSTORE__TIMEOUTSECONDS: "30"
SCANNER__QUEUE__BROKER: "valkey://stellaops-valkey:6379"
SCANNER__EVENTS__ENABLED: "false"
SCANNER__EVENTS__DRIVER: "valkey"
SCANNER__EVENTS__DSN: "stellaops-valkey:6379"
SCANNER__EVENTS__STREAM: "stella.events"
SCANNER__EVENTS__PUBLISHTIMEOUTSECONDS: "5"
SCANNER__EVENTS__MAXSTREAMLENGTH: "10000"
SCANNER_SURFACE_FS_ENDPOINT: "http://stellaops-rustfs:8080/api/v1"
SCANNER_SURFACE_CACHE_ROOT: "/var/lib/stellaops/surface"
SCANNER_SURFACE_SECRETS_PROVIDER: "inline"
SCANNER_SURFACE_SECRETS_ROOT: ""
notify-web:
image: registry.stella-ops.org/stellaops/notify-web:2025.10.0-edge
service:
port: 8446
env:
DOTNET_ENVIRONMENT: Development
NOTIFY__QUEUE__DRIVER: "valkey"
NOTIFY__QUEUE__VALKEY__URL: "stellaops-valkey:6379"
configMounts:
- name: notify-config
mountPath: /app/etc/notify.yaml
subPath: notify.yaml
configMap: notify-config
excititor:
image: registry.stella-ops.org/stellaops/excititor@sha256:d9bd5cadf1eab427447ce3df7302c30ded837239771cc6433b9befb895054285
env:
EXCITITOR__CONCELIER__BASEURL: "https://stellaops-concelier:8445"
EXCITITOR__STORAGE__DRIVER: "postgres"
EXCITITOR__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=excititor;Username=stellaops;Password=stellaops"
advisory-ai-web:
image: registry.stella-ops.org/stellaops/advisory-ai-web:2025.10.0-edge
service:
port: 8448
env:
ADVISORYAI__AdvisoryAI__SbomBaseAddress: http://stellaops-scanner-web:8444
ADVISORYAI__AdvisoryAI__Queue__DirectoryPath: /var/lib/advisory-ai/queue
ADVISORYAI__AdvisoryAI__Storage__PlanCacheDirectory: /var/lib/advisory-ai/plans
ADVISORYAI__AdvisoryAI__Storage__OutputDirectory: /var/lib/advisory-ai/outputs
ADVISORYAI__AdvisoryAI__Inference__Mode: Local
ADVISORYAI__AdvisoryAI__Inference__Remote__BaseAddress: ""
ADVISORYAI__AdvisoryAI__Inference__Remote__ApiKey: ""
volumeMounts:
- name: advisory-ai-data
mountPath: /var/lib/advisory-ai
volumeClaims:
- name: advisory-ai-data
claimName: stellaops-advisory-ai-data
advisory-ai-worker:
image: registry.stella-ops.org/stellaops/advisory-ai-worker:2025.10.0-edge
env:
ADVISORYAI__AdvisoryAI__SbomBaseAddress: http://stellaops-scanner-web:8444
ADVISORYAI__AdvisoryAI__Queue__DirectoryPath: /var/lib/advisory-ai/queue
ADVISORYAI__AdvisoryAI__Storage__PlanCacheDirectory: /var/lib/advisory-ai/plans
ADVISORYAI__AdvisoryAI__Storage__OutputDirectory: /var/lib/advisory-ai/outputs
ADVISORYAI__AdvisoryAI__Inference__Mode: Local
ADVISORYAI__AdvisoryAI__Inference__Remote__BaseAddress: ""
ADVISORYAI__AdvisoryAI__Inference__Remote__ApiKey: ""
volumeMounts:
- name: advisory-ai-data
mountPath: /var/lib/advisory-ai
volumeClaims:
- name: advisory-ai-data
claimName: stellaops-advisory-ai-data
web-ui:
image: registry.stella-ops.org/stellaops/web-ui@sha256:38b225fa7767a5b94ebae4dae8696044126aac429415e93de514d5dd95748dcf
service:
port: 8443
env:
STELLAOPS_UI__BACKEND__BASEURL: "https://stellaops-scanner-web:8444"
# Infrastructure services
postgres:
class: infrastructure
image: docker.io/library/postgres@sha256:8e97b8526ed19304b144f7478bc9201646acf0723cdc6e4b19bc9eb34879a27e
service:
port: 5432
env:
POSTGRES_USER: stellaops
POSTGRES_PASSWORD: stellaops
POSTGRES_DB: stellaops
volumeMounts:
- name: postgres-data
mountPath: /var/lib/postgresql/data
volumes:
- name: postgres-data
emptyDir: {}
valkey:
class: infrastructure
image: docker.io/valkey/valkey:9.0.1-alpine
service:
port: 6379
command:
- valkey-server
- --appendonly
- "yes"
volumeMounts:
- name: valkey-data
mountPath: /data
volumes:
- name: valkey-data
emptyDir: {}
rustfs:
class: infrastructure
image: registry.stella-ops.org/stellaops/rustfs:2025.09.2
service:
port: 8080
env:
RUSTFS__LOG__LEVEL: info
RUSTFS__STORAGE__PATH: /data
volumeMounts:
- name: rustfs-data
mountPath: /data
volumes:
- name: rustfs-data
emptyDir: {}

View File

@@ -1,14 +0,0 @@
exportcenter:
image:
repository: registry.stella-ops.org/export-center
tag: latest
objectStorage:
endpoint: http://rustfs:8080
bucket: export-prod
accessKeySecret: exportcenter-rustfs
secretKeySecret: exportcenter-rustfs
signing:
kmsKey: exportcenter-kms
kmsRegion: us-east-1
dsse:
enabled: true

View File

@@ -1,58 +0,0 @@
# Exporter (Export Center) values overlay
# Use: helm install stellaops . -f values-exporter.yaml
exporter:
enabled: true
image: registry.stella-ops.org/stellaops/exporter:2025.10.0-edge
replicas: 1
port: 8080
# Export configuration
storage:
# Object store for export artifacts
endpoint: ""
bucket: "stellaops-exports"
region: ""
# Retention policy
retention:
defaultDays: 30
maxDays: 365
resources:
limits:
cpu: "500m"
memory: "512Mi"
requests:
cpu: "100m"
memory: "256Mi"
service:
type: ClusterIP
port: 8080
livenessProbe:
httpGet:
path: /health/liveness
port: 8080
initialDelaySeconds: 10
periodSeconds: 30
readinessProbe:
httpGet:
path: /health/readiness
port: 8080
initialDelaySeconds: 5
periodSeconds: 10
securityContext:
runAsNonRoot: true
runAsUser: 10001
runAsGroup: 10001
containerSecurityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL

View File

@@ -1,59 +0,0 @@
# Ledger (Findings Ledger) values overlay
# Use: helm install stellaops . -f values-ledger.yaml
ledger:
enabled: true
image: registry.stella-ops.org/stellaops/findings-ledger:2025.10.0-edge
replicas: 1
port: 8080
# Database configuration
postgres:
host: ""
port: 5432
database: "stellaops_ledger"
schema: "findings"
# Connection string override (takes precedence)
connectionString: ""
# Tenant isolation
multiTenant: true
defaultTenant: "default"
resources:
limits:
cpu: "1000m"
memory: "1Gi"
requests:
cpu: "200m"
memory: "512Mi"
service:
type: ClusterIP
port: 8080
livenessProbe:
httpGet:
path: /health/liveness
port: 8080
initialDelaySeconds: 15
periodSeconds: 30
readinessProbe:
httpGet:
path: /health/readiness
port: 8080
initialDelaySeconds: 10
periodSeconds: 10
securityContext:
runAsNonRoot: true
runAsUser: 10001
runAsGroup: 10001
containerSecurityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL

View File

@@ -1,305 +0,0 @@
global:
profile: mirror-managed
release:
version: "2025.10.0-edge"
channel: edge
manifestSha256: "822f82987529ea38d2321dbdd2ef6874a4062a117116a20861c26a8df1807beb"
image:
pullPolicy: IfNotPresent
labels:
stellaops.io/channel: edge
configMaps:
mirror-gateway:
data:
mirror.conf: |
proxy_cache_path /var/cache/nginx/mirror levels=1:2 keys_zone=mirror_cache:100m max_size=10g inactive=12h use_temp_path=off;
map $request_uri $mirror_cache_key {
default $scheme$request_method$host$request_uri;
}
upstream concelier_backend {
server stellaops-concelier:8445;
keepalive 32;
}
upstream excititor_backend {
server stellaops-excititor:8448;
keepalive 32;
}
server {
listen 80;
server_name _;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl http2;
server_name mirror-primary.stella-ops.org;
ssl_certificate /etc/nginx/tls/mirror-primary.crt;
ssl_certificate_key /etc/nginx/tls/mirror-primary.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
auth_basic "StellaOps Mirror primary";
auth_basic_user_file /etc/nginx/secrets/mirror-primary.htpasswd;
include /etc/nginx/conf.d/mirror-locations.conf;
}
server {
listen 443 ssl http2;
server_name mirror-community.stella-ops.org;
ssl_certificate /etc/nginx/tls/mirror-community.crt;
ssl_certificate_key /etc/nginx/tls/mirror-community.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
auth_basic "StellaOps Mirror community";
auth_basic_user_file /etc/nginx/secrets/mirror-community.htpasswd;
include /etc/nginx/conf.d/mirror-locations.conf;
}
mirror-locations.conf: |
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_redirect off;
add_header X-Cache-Status $upstream_cache_status always;
location = /healthz {
default_type application/json;
return 200 '{"status":"ok"}';
}
location /concelier/exports/ {
proxy_pass http://concelier_backend/concelier/exports/;
proxy_cache mirror_cache;
proxy_cache_key $mirror_cache_key;
proxy_cache_valid 200 5m;
proxy_cache_valid 404 1m;
add_header Cache-Control "public, max-age=300, immutable" always;
}
location /concelier/ {
proxy_pass http://concelier_backend/concelier/;
proxy_cache off;
}
location /excititor/mirror/ {
proxy_pass http://excititor_backend/excititor/mirror/;
proxy_cache mirror_cache;
proxy_cache_key $mirror_cache_key;
proxy_cache_valid 200 5m;
proxy_cache_valid 404 1m;
add_header Cache-Control "public, max-age=300, immutable" always;
}
location /excititor/ {
proxy_pass http://excititor_backend/excititor/;
proxy_cache off;
}
location / {
return 404;
}
policy-engine-activation:
data:
STELLAOPS_POLICY_ENGINE__ACTIVATION__FORCETWOPERSONAPPROVAL: "true"
STELLAOPS_POLICY_ENGINE__ACTIVATION__DEFAULTREQUIRESTWOPERSONAPPROVAL: "true"
STELLAOPS_POLICY_ENGINE__ACTIVATION__EMITAUDITLOGS: "true"
services:
concelier:
image: registry.stella-ops.org/stellaops/concelier@sha256:dafef3954eb4b837e2c424dd2d23e1e4d60fa83794840fac9cd3dea1d43bd085
service:
port: 8445
env:
ASPNETCORE_URLS: "http://+:8445"
CONCELIER__STORAGE__DRIVER: "postgres"
CONCELIER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=concelier;Username=stellaops;Password=stellaops"
CONCELIER__STORAGE__S3__ENDPOINT: "http://stellaops-rustfs:8080"
CONCELIER__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
CONCELIER__TELEMETRY__SERVICENAME: "stellaops-concelier-mirror"
CONCELIER__MIRROR__ENABLED: "true"
CONCELIER__MIRROR__EXPORTROOT: "/exports/json"
CONCELIER__MIRROR__LATESTDIRECTORYNAME: "latest"
CONCELIER__MIRROR__MIRRORDIRECTORYNAME: "mirror"
CONCELIER__MIRROR__REQUIREAUTHENTICATION: "true"
CONCELIER__MIRROR__MAXINDEXREQUESTSPERHOUR: "600"
CONCELIER__MIRROR__DOMAINS__0__ID: "primary"
CONCELIER__MIRROR__DOMAINS__0__DISPLAYNAME: "Primary Mirror"
CONCELIER__MIRROR__DOMAINS__0__REQUIREAUTHENTICATION: "true"
CONCELIER__MIRROR__DOMAINS__0__MAXDOWNLOADREQUESTSPERHOUR: "3600"
CONCELIER__MIRROR__DOMAINS__1__ID: "community"
CONCELIER__MIRROR__DOMAINS__1__DISPLAYNAME: "Community Mirror"
CONCELIER__MIRROR__DOMAINS__1__REQUIREAUTHENTICATION: "false"
CONCELIER__MIRROR__DOMAINS__1__MAXDOWNLOADREQUESTSPERHOUR: "1800"
CONCELIER__AUTHORITY__ENABLED: "true"
CONCELIER__AUTHORITY__ALLOWANONYMOUSFALLBACK: "false"
CONCELIER__AUTHORITY__ISSUER: "https://authority.stella-ops.org"
CONCELIER__AUTHORITY__METADATAADDRESS: ""
CONCELIER__AUTHORITY__CLIENTID: "stellaops-concelier-mirror"
CONCELIER__AUTHORITY__CLIENTSECRETFILE: "/run/secrets/concelier-authority-client"
CONCELIER__AUTHORITY__CLIENTSCOPES__0: "concelier.mirror.read"
CONCELIER__AUTHORITY__AUDIENCES__0: "api://concelier.mirror"
CONCELIER__AUTHORITY__BYPASSNETWORKS__0: "10.0.0.0/8"
CONCELIER__AUTHORITY__BYPASSNETWORKS__1: "127.0.0.1/32"
CONCELIER__AUTHORITY__BYPASSNETWORKS__2: "::1/128"
CONCELIER__AUTHORITY__RESILIENCE__ENABLERETRIES: "true"
CONCELIER__AUTHORITY__RESILIENCE__RETRYDELAYS__0: "00:00:01"
CONCELIER__AUTHORITY__RESILIENCE__RETRYDELAYS__1: "00:00:02"
CONCELIER__AUTHORITY__RESILIENCE__RETRYDELAYS__2: "00:00:05"
CONCELIER__AUTHORITY__RESILIENCE__ALLOWOFFLINECACHEFALLBACK: "true"
CONCELIER__AUTHORITY__RESILIENCE__OFFLINECACHETOLERANCE: "00:10:00"
volumeMounts:
- name: concelier-jobs
mountPath: /var/lib/concelier/jobs
- name: concelier-exports
mountPath: /exports/json
- name: concelier-secrets
mountPath: /run/secrets
readOnly: true
volumes:
- name: concelier-jobs
persistentVolumeClaim:
claimName: concelier-mirror-jobs
- name: concelier-exports
persistentVolumeClaim:
claimName: concelier-mirror-exports
- name: concelier-secrets
secret:
secretName: concelier-mirror-auth
excititor:
image: registry.stella-ops.org/stellaops/excititor@sha256:d9bd5cadf1eab427447ce3df7302c30ded837239771cc6433b9befb895054285
env:
ASPNETCORE_URLS: "http://+:8448"
EXCITITOR__STORAGE__DRIVER: "postgres"
EXCITITOR__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=excititor;Username=stellaops;Password=stellaops"
EXCITITOR__ARTIFACTS__FILESYSTEM__ROOT: "/exports"
EXCITITOR__ARTIFACTS__FILESYSTEM__OVERWRITEEXISTING: "false"
EXCITITOR__MIRROR__DOMAINS__0__ID: "primary"
EXCITITOR__MIRROR__DOMAINS__0__DISPLAYNAME: "Primary Mirror"
EXCITITOR__MIRROR__DOMAINS__0__REQUIREAUTHENTICATION: "true"
EXCITITOR__MIRROR__DOMAINS__0__MAXINDEXREQUESTSPERHOUR: "300"
EXCITITOR__MIRROR__DOMAINS__0__MAXDOWNLOADREQUESTSPERHOUR: "2400"
EXCITITOR__MIRROR__DOMAINS__0__EXPORTS__0__KEY: "consensus-json"
EXCITITOR__MIRROR__DOMAINS__0__EXPORTS__0__FORMAT: "json"
EXCITITOR__MIRROR__DOMAINS__0__EXPORTS__0__VIEW: "consensus"
EXCITITOR__MIRROR__DOMAINS__0__EXPORTS__1__KEY: "consensus-openvex"
EXCITITOR__MIRROR__DOMAINS__0__EXPORTS__1__FORMAT: "openvex"
EXCITITOR__MIRROR__DOMAINS__0__EXPORTS__1__VIEW: "consensus"
EXCITITOR__MIRROR__DOMAINS__1__ID: "community"
EXCITITOR__MIRROR__DOMAINS__1__DISPLAYNAME: "Community Mirror"
EXCITITOR__MIRROR__DOMAINS__1__REQUIREAUTHENTICATION: "false"
EXCITITOR__MIRROR__DOMAINS__1__MAXINDEXREQUESTSPERHOUR: "120"
EXCITITOR__MIRROR__DOMAINS__1__MAXDOWNLOADREQUESTSPERHOUR: "600"
EXCITITOR__MIRROR__DOMAINS__1__EXPORTS__0__KEY: "community-consensus"
EXCITITOR__MIRROR__DOMAINS__1__EXPORTS__0__FORMAT: "json"
EXCITITOR__MIRROR__DOMAINS__1__EXPORTS__0__VIEW: "consensus"
volumeMounts:
- name: excititor-exports
mountPath: /exports
- name: excititor-secrets
mountPath: /run/secrets
readOnly: true
volumes:
- name: excititor-exports
persistentVolumeClaim:
claimName: excititor-mirror-exports
- name: excititor-secrets
secret:
secretName: excititor-mirror-auth
# Infrastructure services
postgres:
class: infrastructure
image: docker.io/library/postgres@sha256:8e97b8526ed19304b144f7478bc9201646acf0723cdc6e4b19bc9eb34879a27e
service:
port: 5432
env:
POSTGRES_USER: stellaops
POSTGRES_PASSWORD: stellaops
POSTGRES_DB: stellaops
volumeMounts:
- name: postgres-data
mountPath: /var/lib/postgresql/data
volumeClaims:
- name: postgres-data
claimName: mirror-postgres-data
valkey:
class: infrastructure
image: docker.io/valkey/valkey:9.0.1-alpine
service:
port: 6379
command:
- valkey-server
- --appendonly
- "yes"
volumeMounts:
- name: valkey-data
mountPath: /data
volumeClaims:
- name: valkey-data
claimName: mirror-valkey-data
rustfs:
class: infrastructure
image: registry.stella-ops.org/stellaops/rustfs:2025.09.2
service:
port: 8080
command:
- serve
- --listen
- 0.0.0.0:8080
- --root
- /data
env:
RUSTFS__LOG__LEVEL: info
RUSTFS__STORAGE__PATH: /data
volumeMounts:
- name: rustfs-data
mountPath: /data
volumeClaims:
- name: rustfs-data
claimName: mirror-rustfs-data
mirror-gateway:
image: docker.io/library/nginx@sha256:208b70eefac13ee9be00e486f79c695b15cef861c680527171a27d253d834be9
service:
type: LoadBalancer
port: 443
portName: https
targetPort: 443
configMounts:
- name: mirror-gateway-conf
mountPath: /etc/nginx/conf.d
configMap: mirror-gateway
volumeMounts:
- name: mirror-gateway-tls
mountPath: /etc/nginx/tls
readOnly: true
- name: mirror-gateway-secrets
mountPath: /etc/nginx/secrets
readOnly: true
- name: mirror-cache
mountPath: /var/cache/nginx
volumes:
- name: mirror-gateway-tls
secret:
secretName: mirror-gateway-tls
- name: mirror-gateway-secrets
secret:
secretName: mirror-gateway-htpasswd
- name: mirror-cache
emptyDir: {}

View File

@@ -1,18 +0,0 @@
mock:
enabled: true
orchestrator:
image: registry.stella-ops.org/stellaops/orchestrator@sha256:97f12856ce870bafd3328bda86833bcccbf56d255941d804966b5557f6610119
policyRegistry:
image: registry.stella-ops.org/stellaops/policy-registry@sha256:c6cad8055e9827ebcbebb6ad4d6866dce4b83a0a49b0a8a6500b736a5cb26fa7
packsRegistry:
image: registry.stella-ops.org/stellaops/packs-registry@sha256:1f5e9416c4dc608594ad6fad87c24d72134427f899c192b494e22b268499c791
taskRunner:
image: registry.stella-ops.org/stellaops/task-runner@sha256:eb5ad992b49a41554f41516be1a6afcfa6522faf2111c08ff2b3664ad2fc954b
vexLens:
image: registry.stella-ops.org/stellaops/vex-lens@sha256:b44e63ecfeebc345a70c073c1ce5ace709c58be0ffaad0e2862758aeee3092fb
issuerDirectory:
image: registry.stella-ops.org/stellaops/issuer-directory@sha256:67e8ef02c97d3156741e857756994888f30c373ace8e84886762edba9dc51914
findingsLedger:
image: registry.stella-ops.org/stellaops/findings-ledger@sha256:71d4c361ba8b2f8b69d652597bc3f2efc8a64f93fab854ce25272a88506df49c
vulnExplorerApi:
image: registry.stella-ops.org/stellaops/vuln-explorer-api@sha256:7fc7e43a05cbeb0106ce7d4d634612e83de6fdc119aaab754a71c1d60b82841d

View File

@@ -1,15 +0,0 @@
notify:
image:
repository: registry.stella-ops.org/notify
tag: latest
smtp:
host: smtp.example.com
port: 587
usernameSecret: notify-smtp
passwordSecret: notify-smtp
webhook:
allowedHosts: ["https://hooks.slack.com"]
chat:
webhookSecret: notify-chat
tls:
secretName: notify-tls

View File

@@ -1,209 +0,0 @@
# Orchestrator Service Helm Values Overlay
# Enables job scheduling, DAG planning, and worker coordination.
#
# Usage:
# helm upgrade stellaops ./stellaops -f values.yaml -f values-orchestrator.yaml
global:
labels:
stellaops.io/component: orchestrator
# Orchestrator-specific ConfigMaps
configMaps:
orchestrator-config:
data:
orchestrator.yaml: |
Orchestrator:
# Telemetry configuration
telemetry:
minimumLogLevel: Information
enableRequestLogging: true
otelEndpoint: ""
# Authority integration (disable for standalone testing)
authority:
enabled: true
issuer: https://authority.svc.cluster.local/realms/stellaops
requireHttpsMetadata: true
audiences:
- stellaops-platform
readScope: orchestrator:read
writeScope: orchestrator:write
adminScope: orchestrator:admin
# Tenant resolution
tenantHeader: X-StellaOps-Tenant
# PostgreSQL connection
storage:
connectionString: "Host=orchestrator-postgres;Database=stellaops_orchestrator;Username=orchestrator;Password=${POSTGRES_PASSWORD}"
commandTimeoutSeconds: 60
enableSensitiveDataLogging: false
# Scheduler configuration
scheduler:
# Maximum concurrent jobs per tenant
defaultConcurrencyLimit: 100
# Default rate limit (requests per second)
defaultRateLimit: 50
# Job claim timeout before re-queue
claimTimeoutMinutes: 30
# Heartbeat interval for active jobs
heartbeatIntervalSeconds: 30
# Maximum heartbeat misses before job marked stale
maxHeartbeatMisses: 3
# Autoscaling configuration
autoscaling:
# Enable autoscaling metrics endpoint
enabled: true
# Queue depth threshold for scale-up signal
queueDepthThreshold: 10000
# Dispatch latency P95 threshold (ms)
latencyP95ThresholdMs: 150
# Scale-up cooldown period
scaleUpCooldownSeconds: 60
# Scale-down cooldown period
scaleDownCooldownSeconds: 300
# Load shedding configuration
loadShedding:
enabled: true
# Warning threshold (load factor)
warningThreshold: 0.8
# Critical threshold (load factor)
criticalThreshold: 1.0
# Emergency threshold (load factor)
emergencyThreshold: 1.5
# Recovery cooldown
recoveryCooldownSeconds: 30
# Dead letter configuration
deadLetter:
# Maximum replay attempts
maxReplayAttempts: 3
# Entry expiration (days)
expirationDays: 30
# Purge interval
purgeIntervalHours: 24
# Backfill configuration
backfill:
# Maximum concurrent backfill requests
maxConcurrentRequests: 5
# Default batch size
defaultBatchSize: 1000
# Maximum retention lookback (days)
maxRetentionDays: 90
# Service definitions
services:
orchestrator-web:
image: registry.stella-ops.org/stellaops/orchestrator-web:2025.10.0-edge
replicas: 2
service:
port: 8080
configMounts:
- name: orchestrator-config
configMap: orchestrator-config
mountPath: /app/etc/orchestrator.yaml
subPath: orchestrator.yaml
envFrom:
- secretRef:
name: orchestrator-secrets
env:
ASPNETCORE_ENVIRONMENT: Production
ORCHESTRATOR__CONFIG: /app/etc/orchestrator.yaml
ports:
- containerPort: 8080
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "1Gi"
cpu: "1000m"
readinessProbe:
httpGet:
path: /readyz
port: 8080
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
livenessProbe:
httpGet:
path: /livez
port: 8080
initialDelaySeconds: 10
periodSeconds: 20
timeoutSeconds: 5
failureThreshold: 3
startupProbe:
httpGet:
path: /startupz
port: 8080
initialDelaySeconds: 3
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 30
orchestrator-worker:
image: registry.stella-ops.org/stellaops/orchestrator-worker:2025.10.0-edge
replicas: 1
configMounts:
- name: orchestrator-config
configMap: orchestrator-config
mountPath: /app/etc/orchestrator.yaml
subPath: orchestrator.yaml
envFrom:
- secretRef:
name: orchestrator-secrets
env:
DOTNET_ENVIRONMENT: Production
ORCHESTRATOR__CONFIG: /app/etc/orchestrator.yaml
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
orchestrator-postgres:
class: infrastructure
image: docker.io/library/postgres:16-alpine
service:
port: 5432
envFrom:
- secretRef:
name: orchestrator-postgres-secrets
env:
POSTGRES_DB: stellaops_orchestrator
POSTGRES_USER: orchestrator
volumeMounts:
- name: postgres-data
mountPath: /var/lib/postgresql/data
volumeClaims:
- name: postgres-data
claimName: orchestrator-postgres-data
readinessProbe:
exec:
command:
- pg_isready
- -U
- orchestrator
- -d
- stellaops_orchestrator
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
exec:
command:
- pg_isready
- -U
- orchestrator
- -d
- stellaops_orchestrator
initialDelaySeconds: 15
periodSeconds: 30

View File

@@ -1,356 +0,0 @@
global:
profile: prod
release:
version: "2025.09.2"
channel: stable
manifestSha256: "dc3c8fe1ab83941c838ccc5a8a5862f7ddfa38c2078e580b5649db26554565b7"
image:
pullPolicy: IfNotPresent
labels:
stellaops.io/channel: stable
stellaops.io/profile: prod
# Migration jobs for controlled rollouts (disabled by default)
migrations:
enabled: false
jobs: []
networkPolicy:
enabled: true
ingressPort: 8443
egressPort: 443
ingressNamespaces:
kubernetes.io/metadata.name: stellaops
egressNamespaces:
kubernetes.io/metadata.name: stellaops
ingress:
enabled: true
className: nginx
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: gateway.prod.stella-ops.org
path: /
servicePort: 80
tls:
- secretName: stellaops-prod-tls
hosts:
- gateway.prod.stella-ops.org
externalSecrets:
enabled: true
secrets:
- name: core-secrets
storeRef:
name: stellaops-secret-store
kind: ClusterSecretStore
target:
name: stellaops-prod-core
data:
- key: STELLAOPS_AUTHORITY__JWT__SIGNINGKEY
remoteKey: prod/authority/jwt-signing-key
- key: STELLAOPS_SECRETS_ENCRYPTION_KEY
remoteKey: prod/core/secrets-encryption-key
prometheus:
enabled: true
path: /metrics
port: 8080
scheme: http
hpa:
enabled: true
minReplicas: 2
maxReplicas: 6
cpu:
targetPercentage: 70
memory:
targetPercentage: 75
configMaps:
notify-config:
data:
notify.yaml: |
storage:
driver: postgres
connectionString: "Host=stellaops-postgres;Port=5432;Database=notify;Username=stellaops;Password=stellaops"
commandTimeoutSeconds: 45
authority:
enabled: true
issuer: "https://authority.prod.stella-ops.org"
metadataAddress: "https://authority.prod.stella-ops.org/.well-known/openid-configuration"
requireHttpsMetadata: true
allowAnonymousFallback: false
backchannelTimeoutSeconds: 30
tokenClockSkewSeconds: 60
audiences:
- notify
readScope: notify.read
adminScope: notify.admin
api:
basePath: "/api/v1/notify"
internalBasePath: "/internal/notify"
tenantHeader: "X-StellaOps-Tenant"
plugins:
baseDirectory: "/opt/stellaops"
directory: "plugins/notify"
searchPatterns:
- "StellaOps.Notify.Connectors.*.dll"
orderedPlugins:
- StellaOps.Notify.Connectors.Slack
- StellaOps.Notify.Connectors.Teams
- StellaOps.Notify.Connectors.Email
- StellaOps.Notify.Connectors.Webhook
telemetry:
enableRequestLogging: true
minimumLogLevel: Information
policy-engine-activation:
data:
STELLAOPS_POLICY_ENGINE__ACTIVATION__FORCETWOPERSONAPPROVAL: "true"
STELLAOPS_POLICY_ENGINE__ACTIVATION__DEFAULTREQUIRESTWOPERSONAPPROVAL: "true"
STELLAOPS_POLICY_ENGINE__ACTIVATION__EMITAUDITLOGS: "true"
services:
authority:
image: registry.stella-ops.org/stellaops/authority@sha256:b0348bad1d0b401cc3c71cb40ba034c8043b6c8874546f90d4783c9dbfcc0bf5
service:
port: 8440
env:
STELLAOPS_AUTHORITY__ISSUER: "https://authority.prod.stella-ops.org"
STELLAOPS_AUTHORITY__STORAGE__DRIVER: "postgres"
STELLAOPS_AUTHORITY__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=authority;Username=stellaops;Password=stellaops"
STELLAOPS_AUTHORITY__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0: "/app/plugins"
STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY: "/app/etc/authority.plugins"
envFrom:
- secretRef:
name: stellaops-prod-core
signer:
image: registry.stella-ops.org/stellaops/signer@sha256:8ad574e61f3a9e9bda8a58eb2700ae46813284e35a150b1137bc7c2b92ac0f2e
service:
port: 8441
env:
SIGNER__AUTHORITY__BASEURL: "https://stellaops-authority:8440"
SIGNER__POE__INTROSPECTURL: "https://licensing.prod.stella-ops.org/introspect"
SIGNER__STORAGE__DRIVER: "postgres"
SIGNER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=signer;Username=stellaops;Password=stellaops"
SIGNER__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
envFrom:
- secretRef:
name: stellaops-prod-core
attestor:
image: registry.stella-ops.org/stellaops/attestor@sha256:0534985f978b0b5d220d73c96fddd962cd9135f616811cbe3bff4666c5af568f
service:
port: 8442
env:
ATTESTOR__SIGNER__BASEURL: "https://stellaops-signer:8441"
ATTESTOR__STORAGE__DRIVER: "postgres"
ATTESTOR__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=attestor;Username=stellaops;Password=stellaops"
ATTESTOR__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
envFrom:
- secretRef:
name: stellaops-prod-core
concelier:
image: registry.stella-ops.org/stellaops/concelier@sha256:c58cdcaee1d266d68d498e41110a589dd204b487d37381096bd61ab345a867c5
service:
port: 8445
env:
CONCELIER__STORAGE__DRIVER: "postgres"
CONCELIER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=concelier;Username=stellaops;Password=stellaops"
CONCELIER__STORAGE__S3__ENDPOINT: "http://stellaops-rustfs:8080"
CONCELIER__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
CONCELIER__AUTHORITY__BASEURL: "https://stellaops-authority:8440"
envFrom:
- secretRef:
name: stellaops-prod-core
volumeMounts:
- name: concelier-jobs
mountPath: /var/lib/concelier/jobs
volumeClaims:
- name: concelier-jobs
claimName: stellaops-concelier-jobs
scanner-web:
image: registry.stella-ops.org/stellaops/scanner-web@sha256:14b23448c3f9586a9156370b3e8c1991b61907efa666ca37dd3aaed1e79fe3b7
service:
port: 8444
env:
SCANNER__STORAGE__DRIVER: "postgres"
SCANNER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=scanner;Username=stellaops;Password=stellaops"
SCANNER__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
SCANNER__ARTIFACTSTORE__DRIVER: "rustfs"
SCANNER__ARTIFACTSTORE__ENDPOINT: "http://stellaops-rustfs:8080/api/v1"
SCANNER__ARTIFACTSTORE__BUCKET: "scanner-artifacts"
SCANNER__ARTIFACTSTORE__TIMEOUTSECONDS: "30"
SCANNER__QUEUE__BROKER: "valkey://stellaops-valkey:6379"
SCANNER__EVENTS__ENABLED: "true"
SCANNER__EVENTS__DRIVER: "valkey"
SCANNER__EVENTS__DSN: "stellaops-valkey:6379"
SCANNER__EVENTS__STREAM: "stella.events"
SCANNER__EVENTS__PUBLISHTIMEOUTSECONDS: "5"
SCANNER__EVENTS__MAXSTREAMLENGTH: "10000"
SCANNER__OFFLINEKIT__ENABLED: "false"
SCANNER__OFFLINEKIT__REQUIREDSSE: "true"
SCANNER__OFFLINEKIT__REKOROFFLINEMODE: "true"
SCANNER__OFFLINEKIT__TRUSTROOTDIRECTORY: "/etc/stellaops/trust-roots"
SCANNER__OFFLINEKIT__REKORSNAPSHOTDIRECTORY: "/var/lib/stellaops/rekor-snapshot"
SCANNER_SURFACE_FS_ENDPOINT: "http://stellaops-rustfs:8080/api/v1"
SCANNER_SURFACE_CACHE_ROOT: "/var/lib/stellaops/surface"
SCANNER_SURFACE_SECRETS_PROVIDER: "kubernetes"
SCANNER_SURFACE_SECRETS_ROOT: "stellaops/scanner"
envFrom:
- secretRef:
name: stellaops-prod-core
scanner-worker:
image: registry.stella-ops.org/stellaops/scanner-worker@sha256:32e25e76386eb9ea8bee0a1ad546775db9a2df989fab61ac877e351881960dab
replicas: 3
env:
SCANNER__STORAGE__DRIVER: "postgres"
SCANNER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=scanner;Username=stellaops;Password=stellaops"
SCANNER__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
SCANNER__ARTIFACTSTORE__DRIVER: "rustfs"
SCANNER__ARTIFACTSTORE__ENDPOINT: "http://stellaops-rustfs:8080/api/v1"
SCANNER__ARTIFACTSTORE__BUCKET: "scanner-artifacts"
SCANNER__ARTIFACTSTORE__TIMEOUTSECONDS: "30"
SCANNER__QUEUE__BROKER: "valkey://stellaops-valkey:6379"
SCANNER__EVENTS__ENABLED: "true"
SCANNER__EVENTS__DRIVER: "valkey"
SCANNER__EVENTS__DSN: "stellaops-valkey:6379"
SCANNER__EVENTS__STREAM: "stella.events"
SCANNER__EVENTS__PUBLISHTIMEOUTSECONDS: "5"
SCANNER__EVENTS__MAXSTREAMLENGTH: "10000"
SCANNER_SURFACE_FS_ENDPOINT: "http://stellaops-rustfs:8080/api/v1"
SCANNER_SURFACE_CACHE_ROOT: "/var/lib/stellaops/surface"
SCANNER_SURFACE_SECRETS_PROVIDER: "kubernetes"
SCANNER_SURFACE_SECRETS_ROOT: "stellaops/scanner"
envFrom:
- secretRef:
name: stellaops-prod-core
notify-web:
image: registry.stella-ops.org/stellaops/notify-web:2025.09.2
service:
port: 8446
env:
DOTNET_ENVIRONMENT: Production
NOTIFY__QUEUE__DRIVER: "valkey"
NOTIFY__QUEUE__VALKEY__URL: "stellaops-valkey:6379"
envFrom:
- secretRef:
name: stellaops-prod-notify
configMounts:
- name: notify-config
mountPath: /app/etc/notify.yaml
subPath: notify.yaml
configMap: notify-config
excititor:
image: registry.stella-ops.org/stellaops/excititor@sha256:59022e2016aebcef5c856d163ae705755d3f81949d41195256e935ef40a627fa
env:
EXCITITOR__CONCELIER__BASEURL: "https://stellaops-concelier:8445"
EXCITITOR__STORAGE__DRIVER: "postgres"
EXCITITOR__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=excititor;Username=stellaops;Password=stellaops"
envFrom:
- secretRef:
name: stellaops-prod-core
advisory-ai-web:
image: registry.stella-ops.org/stellaops/advisory-ai-web:2025.09.2
service:
port: 8448
env:
ADVISORYAI__AdvisoryAI__SbomBaseAddress: https://stellaops-scanner-web:8444
ADVISORYAI__AdvisoryAI__Queue__DirectoryPath: /var/lib/advisory-ai/queue
ADVISORYAI__AdvisoryAI__Storage__PlanCacheDirectory: /var/lib/advisory-ai/plans
ADVISORYAI__AdvisoryAI__Storage__OutputDirectory: /var/lib/advisory-ai/outputs
ADVISORYAI__AdvisoryAI__Inference__Mode: Local
ADVISORYAI__AdvisoryAI__Inference__Remote__BaseAddress: ""
ADVISORYAI__AdvisoryAI__Inference__Remote__ApiKey: ""
envFrom:
- secretRef:
name: stellaops-prod-core
volumeMounts:
- name: advisory-ai-data
mountPath: /var/lib/advisory-ai
volumeClaims:
- name: advisory-ai-data
claimName: stellaops-advisory-ai-data
advisory-ai-worker:
image: registry.stella-ops.org/stellaops/advisory-ai-worker:2025.09.2
env:
ADVISORYAI__AdvisoryAI__SbomBaseAddress: https://stellaops-scanner-web:8444
ADVISORYAI__AdvisoryAI__Queue__DirectoryPath: /var/lib/advisory-ai/queue
ADVISORYAI__AdvisoryAI__Storage__PlanCacheDirectory: /var/lib/advisory-ai/plans
ADVISORYAI__AdvisoryAI__Storage__OutputDirectory: /var/lib/advisory-ai/outputs
ADVISORYAI__AdvisoryAI__Inference__Mode: Local
ADVISORYAI__AdvisoryAI__Inference__Remote__BaseAddress: ""
ADVISORYAI__AdvisoryAI__Inference__Remote__ApiKey: ""
envFrom:
- secretRef:
name: stellaops-prod-core
volumeMounts:
- name: advisory-ai-data
mountPath: /var/lib/advisory-ai
volumeClaims:
- name: advisory-ai-data
claimName: stellaops-advisory-ai-data
web-ui:
image: registry.stella-ops.org/stellaops/web-ui@sha256:10d924808c48e4353e3a241da62eb7aefe727a1d6dc830eb23a8e181013b3a23
service:
port: 8443
env:
STELLAOPS_UI__BACKEND__BASEURL: "https://stellaops-scanner-web:8444"
# Infrastructure services
postgres:
class: infrastructure
image: docker.io/library/postgres@sha256:8e97b8526ed19304b144f7478bc9201646acf0723cdc6e4b19bc9eb34879a27e
service:
port: 5432
env:
POSTGRES_USER: stellaops
POSTGRES_PASSWORD: stellaops
POSTGRES_DB: stellaops
volumeMounts:
- name: postgres-data
mountPath: /var/lib/postgresql/data
volumeClaims:
- name: postgres-data
claimName: stellaops-postgres-data
valkey:
class: infrastructure
image: docker.io/valkey/valkey:9.0.1-alpine
service:
port: 6379
command:
- valkey-server
- --appendonly
- "yes"
volumeMounts:
- name: valkey-data
mountPath: /data
volumeClaims:
- name: valkey-data
claimName: stellaops-valkey-data
rustfs:
class: infrastructure
image: registry.stella-ops.org/stellaops/rustfs:2025.09.2
service:
port: 8080
command:
- serve
- --listen
- 0.0.0.0:8080
- --root
- /data
env:
RUSTFS__LOG__LEVEL: info
RUSTFS__STORAGE__PATH: /data
volumeMounts:
- name: rustfs-data
mountPath: /data
volumeClaims:
- name: rustfs-data
claimName: stellaops-rustfs-data

View File

@@ -1,238 +0,0 @@
global:
profile: stage
release:
version: "2025.09.2"
channel: stable
manifestSha256: "dc3c8fe1ab83941c838ccc5a8a5862f7ddfa38c2078e580b5649db26554565b7"
image:
pullPolicy: IfNotPresent
labels:
stellaops.io/channel: stable
telemetry:
collector:
enabled: true
defaultTenant: stage
tls:
secretName: stellaops-otel-tls-stage
configMaps:
notify-config:
data:
notify.yaml: |
storage:
driver: postgres
connectionString: "Host=stellaops-postgres;Port=5432;Database=notify;Username=stellaops;Password=stellaops"
commandTimeoutSeconds: 45
authority:
enabled: true
issuer: "https://authority.stage.stella-ops.org"
metadataAddress: "https://authority.stage.stella-ops.org/.well-known/openid-configuration"
requireHttpsMetadata: true
allowAnonymousFallback: false
backchannelTimeoutSeconds: 30
tokenClockSkewSeconds: 60
audiences:
- notify
readScope: notify.read
adminScope: notify.admin
api:
basePath: "/api/v1/notify"
internalBasePath: "/internal/notify"
tenantHeader: "X-StellaOps-Tenant"
plugins:
baseDirectory: "/opt/stellaops"
directory: "plugins/notify"
searchPatterns:
- "StellaOps.Notify.Connectors.*.dll"
orderedPlugins:
- StellaOps.Notify.Connectors.Slack
- StellaOps.Notify.Connectors.Teams
- StellaOps.Notify.Connectors.Email
- StellaOps.Notify.Connectors.Webhook
telemetry:
enableRequestLogging: true
minimumLogLevel: Information
policy-engine-activation:
data:
STELLAOPS_POLICY_ENGINE__ACTIVATION__FORCETWOPERSONAPPROVAL: "true"
STELLAOPS_POLICY_ENGINE__ACTIVATION__DEFAULTREQUIRESTWOPERSONAPPROVAL: "true"
STELLAOPS_POLICY_ENGINE__ACTIVATION__EMITAUDITLOGS: "true"
services:
authority:
image: registry.stella-ops.org/stellaops/authority@sha256:b0348bad1d0b401cc3c71cb40ba034c8043b6c8874546f90d4783c9dbfcc0bf5
service:
port: 8440
env:
STELLAOPS_AUTHORITY__ISSUER: "https://stellaops-authority:8440"
STELLAOPS_AUTHORITY__STORAGE__DRIVER: "postgres"
STELLAOPS_AUTHORITY__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=authority;Username=stellaops;Password=stellaops"
STELLAOPS_AUTHORITY__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0: "/app/plugins"
STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY: "/app/etc/authority.plugins"
signer:
image: registry.stella-ops.org/stellaops/signer@sha256:8ad574e61f3a9e9bda8a58eb2700ae46813284e35a150b1137bc7c2b92ac0f2e
service:
port: 8441
env:
SIGNER__AUTHORITY__BASEURL: "https://stellaops-authority:8440"
SIGNER__POE__INTROSPECTURL: "https://licensing.stage.stella-ops.internal/introspect"
SIGNER__STORAGE__DRIVER: "postgres"
SIGNER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=signer;Username=stellaops;Password=stellaops"
SIGNER__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
attestor:
image: registry.stella-ops.org/stellaops/attestor@sha256:0534985f978b0b5d220d73c96fddd962cd9135f616811cbe3bff4666c5af568f
service:
port: 8442
env:
ATTESTOR__SIGNER__BASEURL: "https://stellaops-signer:8441"
ATTESTOR__STORAGE__DRIVER: "postgres"
ATTESTOR__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=attestor;Username=stellaops;Password=stellaops"
ATTESTOR__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
concelier:
image: registry.stella-ops.org/stellaops/concelier@sha256:c58cdcaee1d266d68d498e41110a589dd204b487d37381096bd61ab345a867c5
service:
port: 8445
env:
CONCELIER__STORAGE__DRIVER: "postgres"
CONCELIER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=concelier;Username=stellaops;Password=stellaops"
CONCELIER__STORAGE__S3__ENDPOINT: "http://stellaops-rustfs:8080"
CONCELIER__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
CONCELIER__AUTHORITY__BASEURL: "https://stellaops-authority:8440"
volumeMounts:
- name: concelier-jobs
mountPath: /var/lib/concelier/jobs
volumeClaims:
- name: concelier-jobs
claimName: stellaops-concelier-jobs
scanner-web:
image: registry.stella-ops.org/stellaops/scanner-web@sha256:14b23448c3f9586a9156370b3e8c1991b61907efa666ca37dd3aaed1e79fe3b7
service:
port: 8444
env:
SCANNER__STORAGE__DRIVER: "postgres"
SCANNER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=scanner;Username=stellaops;Password=stellaops"
SCANNER__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
SCANNER__ARTIFACTSTORE__DRIVER: "rustfs"
SCANNER__ARTIFACTSTORE__ENDPOINT: "http://stellaops-rustfs:8080/api/v1"
SCANNER__ARTIFACTSTORE__BUCKET: "scanner-artifacts"
SCANNER__ARTIFACTSTORE__TIMEOUTSECONDS: "30"
SCANNER__QUEUE__BROKER: "valkey://stellaops-valkey:6379"
SCANNER__EVENTS__ENABLED: "false"
SCANNER__EVENTS__DRIVER: "valkey"
SCANNER__EVENTS__DSN: "stellaops-valkey:6379"
SCANNER__EVENTS__STREAM: "stella.events"
SCANNER__EVENTS__PUBLISHTIMEOUTSECONDS: "5"
SCANNER__EVENTS__MAXSTREAMLENGTH: "10000"
SCANNER__OFFLINEKIT__ENABLED: "false"
SCANNER__OFFLINEKIT__REQUIREDSSE: "true"
SCANNER__OFFLINEKIT__REKOROFFLINEMODE: "true"
SCANNER__OFFLINEKIT__TRUSTROOTDIRECTORY: "/etc/stellaops/trust-roots"
SCANNER__OFFLINEKIT__REKORSNAPSHOTDIRECTORY: "/var/lib/stellaops/rekor-snapshot"
SCANNER_SURFACE_FS_ENDPOINT: "http://stellaops-rustfs:8080/api/v1"
SCANNER_SURFACE_CACHE_ROOT: "/var/lib/stellaops/surface"
SCANNER_SURFACE_SECRETS_PROVIDER: "kubernetes"
SCANNER_SURFACE_SECRETS_ROOT: "stellaops/scanner"
scanner-worker:
image: registry.stella-ops.org/stellaops/scanner-worker@sha256:32e25e76386eb9ea8bee0a1ad546775db9a2df989fab61ac877e351881960dab
replicas: 2
env:
SCANNER__STORAGE__DRIVER: "postgres"
SCANNER__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=scanner;Username=stellaops;Password=stellaops"
SCANNER__CACHE__REDIS__CONNECTIONSTRING: "stellaops-valkey:6379"
SCANNER__ARTIFACTSTORE__DRIVER: "rustfs"
SCANNER__ARTIFACTSTORE__ENDPOINT: "http://stellaops-rustfs:8080/api/v1"
SCANNER__ARTIFACTSTORE__BUCKET: "scanner-artifacts"
SCANNER__ARTIFACTSTORE__TIMEOUTSECONDS: "30"
SCANNER__QUEUE__BROKER: "valkey://stellaops-valkey:6379"
SCANNER__EVENTS__ENABLED: "false"
SCANNER__EVENTS__DRIVER: "valkey"
SCANNER__EVENTS__DSN: "stellaops-valkey:6379"
SCANNER__EVENTS__STREAM: "stella.events"
SCANNER__EVENTS__PUBLISHTIMEOUTSECONDS: "5"
SCANNER__EVENTS__MAXSTREAMLENGTH: "10000"
SCANNER_SURFACE_FS_ENDPOINT: "http://stellaops-rustfs:8080/api/v1"
SCANNER_SURFACE_CACHE_ROOT: "/var/lib/stellaops/surface"
SCANNER_SURFACE_SECRETS_PROVIDER: "kubernetes"
SCANNER_SURFACE_SECRETS_ROOT: "stellaops/scanner"
notify-web:
image: registry.stella-ops.org/stellaops/notify-web:2025.09.2
service:
port: 8446
env:
DOTNET_ENVIRONMENT: Production
NOTIFY__QUEUE__DRIVER: "valkey"
NOTIFY__QUEUE__VALKEY__URL: "stellaops-valkey:6379"
configMounts:
- name: notify-config
mountPath: /app/etc/notify.yaml
subPath: notify.yaml
configMap: notify-config
excititor:
image: registry.stella-ops.org/stellaops/excititor@sha256:59022e2016aebcef5c856d163ae705755d3f81949d41195256e935ef40a627fa
env:
EXCITITOR__CONCELIER__BASEURL: "https://stellaops-concelier:8445"
EXCITITOR__STORAGE__DRIVER: "postgres"
EXCITITOR__STORAGE__POSTGRES__CONNECTIONSTRING: "Host=stellaops-postgres;Port=5432;Database=excititor;Username=stellaops;Password=stellaops"
web-ui:
image: registry.stella-ops.org/stellaops/web-ui@sha256:10d924808c48e4353e3a241da62eb7aefe727a1d6dc830eb23a8e181013b3a23
service:
port: 8443
env:
STELLAOPS_UI__BACKEND__BASEURL: "https://stellaops-scanner-web:8444"
# Infrastructure services
postgres:
class: infrastructure
image: docker.io/library/postgres@sha256:8e97b8526ed19304b144f7478bc9201646acf0723cdc6e4b19bc9eb34879a27e
service:
port: 5432
env:
POSTGRES_USER: stellaops
POSTGRES_PASSWORD: stellaops
POSTGRES_DB: stellaops
volumeMounts:
- name: postgres-data
mountPath: /var/lib/postgresql/data
volumeClaims:
- name: postgres-data
claimName: stellaops-postgres-data
valkey:
class: infrastructure
image: docker.io/valkey/valkey:9.0.1-alpine
service:
port: 6379
command:
- valkey-server
- --appendonly
- "yes"
volumeMounts:
- name: valkey-data
mountPath: /data
volumeClaims:
- name: valkey-data
claimName: stellaops-valkey-data
rustfs:
class: infrastructure
image: registry.stella-ops.org/stellaops/rustfs:2025.09.2
service:
port: 8080
command:
- serve
- --listen
- 0.0.0.0:8080
- --root
- /data
env:
RUSTFS__LOG__LEVEL: info
RUSTFS__STORAGE__PATH: /data
volumeMounts:
- name: rustfs-data
mountPath: /data
volumeClaims:
- name: rustfs-data
claimName: stellaops-rustfs-data

View File

@@ -1,281 +0,0 @@
global:
release:
version: ""
channel: ""
manifestSha256: ""
profile: ""
image:
pullPolicy: IfNotPresent
labels: {}
migrations:
enabled: false
jobs: []
networkPolicy:
enabled: false
ingressPort: 80
egressPort: 443
ingressNamespaces: {}
ingressPods: {}
egressNamespaces: {}
egressPods: {}
ingress:
enabled: false
className: nginx
annotations: {}
hosts: []
tls: []
externalSecrets:
enabled: false
secrets: []
prometheus:
enabled: false
path: /metrics
port: 8080
scheme: http
hpa:
enabled: false
minReplicas: 1
maxReplicas: 3
cpu:
targetPercentage: 75
memory:
targetPercentage: null
# Surface.Env configuration for Scanner/Zastava components
# See docs/modules/scanner/design/surface-env.md for details
surface:
# Surface.FS storage configuration
fs:
# Base URI for Surface.FS / RustFS / S3-compatible store (required)
endpoint: ""
# Bucket/container for manifests and artefacts
bucket: "surface-cache"
# Optional region for S3-compatible stores (AWS/GCS)
region: ""
# Local cache configuration
cache:
# Local directory for warm caches
root: "/var/lib/stellaops/surface"
# Soft limit for on-disk cache usage in MB (64-262144)
quotaMb: 4096
# Enable manifest prefetch threads
prefetchEnabled: false
# Tenant configuration
tenant: "default"
# Comma-separated feature switches
features: ""
# TLS configuration for client authentication
tls:
# Path to PEM/PKCS#12 certificate file
certPath: ""
# Optional private key path when cert/key stored separately
keyPath: ""
# Secret name containing TLS cert/key
secretName: ""
# Secrets provider configuration
secrets:
# Provider ID: kubernetes, file, inline
provider: "kubernetes"
# Kubernetes namespace for secrets provider
namespace: ""
# Path or base for file provider
root: ""
# Optional fallback provider ID
fallbackProvider: ""
# Allow inline secrets (disable in production)
allowInline: false
telemetry:
collector:
enabled: false
replicas: 1
image: otel/opentelemetry-collector:0.105.0
requireClientCert: true
defaultTenant: unknown
logLevel: info
tls:
secretName: ""
certPath: /etc/otel/tls/tls.crt
keyPath: /etc/otel/tls/tls.key
caPath: /etc/otel/tls/ca.crt
items:
- key: tls.crt
path: tls.crt
- key: tls.key
path: tls.key
- key: ca.crt
path: ca.crt
service:
grpcPort: 4317
httpPort: 4318
metricsPort: 9464
resources: {}
configMaps:
# Surface.Env environment variables for Scanner/Zastava components
surface-env:
data:
SCANNER_SURFACE_FS_ENDPOINT: "{{ .Values.surface.fs.endpoint }}"
SCANNER_SURFACE_FS_BUCKET: "{{ .Values.surface.fs.bucket }}"
SCANNER_SURFACE_FS_REGION: "{{ .Values.surface.fs.region }}"
SCANNER_SURFACE_CACHE_ROOT: "{{ .Values.surface.cache.root }}"
SCANNER_SURFACE_CACHE_QUOTA_MB: "{{ .Values.surface.cache.quotaMb }}"
SCANNER_SURFACE_PREFETCH_ENABLED: "{{ .Values.surface.cache.prefetchEnabled }}"
SCANNER_SURFACE_TENANT: "{{ .Values.surface.tenant }}"
SCANNER_SURFACE_FEATURES: "{{ .Values.surface.features }}"
SCANNER_SURFACE_TLS_CERT_PATH: "{{ .Values.surface.tls.certPath }}"
SCANNER_SURFACE_TLS_KEY_PATH: "{{ .Values.surface.tls.keyPath }}"
SCANNER_SURFACE_SECRETS_PROVIDER: "{{ .Values.surface.secrets.provider }}"
SCANNER_SURFACE_SECRETS_NAMESPACE: "{{ .Values.surface.secrets.namespace }}"
SCANNER_SURFACE_SECRETS_ROOT: "{{ .Values.surface.secrets.root }}"
SCANNER_SURFACE_SECRETS_FALLBACK_PROVIDER: "{{ .Values.surface.secrets.fallbackProvider }}"
SCANNER_SURFACE_SECRETS_ALLOW_INLINE: "{{ .Values.surface.secrets.allowInline }}"
# Zastava consumers inherit Scanner defaults but can be overridden via ZASTAVA_* envs
ZASTAVA_SURFACE_FS_ENDPOINT: "{{ .Values.surface.fs.endpoint }}"
ZASTAVA_SURFACE_FS_BUCKET: "{{ .Values.surface.fs.bucket }}"
ZASTAVA_SURFACE_FS_REGION: "{{ .Values.surface.fs.region }}"
ZASTAVA_SURFACE_CACHE_ROOT: "{{ .Values.surface.cache.root }}"
ZASTAVA_SURFACE_CACHE_QUOTA_MB: "{{ .Values.surface.cache.quotaMb }}"
ZASTAVA_SURFACE_PREFETCH_ENABLED: "{{ .Values.surface.cache.prefetchEnabled }}"
ZASTAVA_SURFACE_TENANT: "{{ .Values.surface.tenant }}"
ZASTAVA_SURFACE_FEATURES: "{{ .Values.surface.features }}"
ZASTAVA_SURFACE_TLS_CERT_PATH: "{{ .Values.surface.tls.certPath }}"
ZASTAVA_SURFACE_TLS_KEY_PATH: "{{ .Values.surface.tls.keyPath }}"
ZASTAVA_SURFACE_SECRETS_PROVIDER: "{{ .Values.surface.secrets.provider }}"
ZASTAVA_SURFACE_SECRETS_NAMESPACE: "{{ .Values.surface.secrets.namespace }}"
ZASTAVA_SURFACE_SECRETS_ROOT: "{{ .Values.surface.secrets.root }}"
ZASTAVA_SURFACE_SECRETS_FALLBACK_PROVIDER: "{{ .Values.surface.secrets.fallbackProvider }}"
ZASTAVA_SURFACE_SECRETS_ALLOW_INLINE: "{{ .Values.surface.secrets.allowInline }}"
issuer-directory-config:
data:
issuer-directory.yaml: |
IssuerDirectory:
telemetry:
minimumLogLevel: Information
authority:
enabled: true
issuer: https://authority.svc.cluster.local/realms/stellaops
requireHttpsMetadata: true
audiences:
- stellaops-platform
readScope: issuer-directory:read
writeScope: issuer-directory:write
adminScope: issuer-directory:admin
tenantHeader: X-StellaOps-Tenant
seedCsafPublishers: true
csafSeedPath: data/csaf-publishers.json
Storage:
Driver: postgres
Postgres:
ConnectionString: Host=postgres;Port=5432;Database=issuer_directory;Username=stellaops;Password=stellaops
policy-engine-activation:
data:
STELLAOPS_POLICY_ENGINE__ACTIVATION__FORCETWOPERSONAPPROVAL: "false"
STELLAOPS_POLICY_ENGINE__ACTIVATION__DEFAULTREQUIRESTWOPERSONAPPROVAL: "false"
STELLAOPS_POLICY_ENGINE__ACTIVATION__EMITAUDITLOGS: "true"
services:
issuer-directory:
image: registry.stella-ops.org/stellaops/issuer-directory-web:2025.10.0-edge
replicas: 1
configMounts:
- name: issuer-directory-config
configMap: issuer-directory-config
mountPath: /etc/issuer-directory.yaml
subPath: issuer-directory.yaml
envFrom:
- secretRef:
name: issuer-directory-secrets
env:
ISSUERDIRECTORY__CONFIG: /etc/issuer-directory.yaml
ISSUERDIRECTORY__AUTHORITY__BASEURL: https://authority:8440
ISSUERDIRECTORY__SEEDCSAFPUBLISHERS: "true"
ports:
- containerPort: 8080
service:
port: 8080
readinessProbe:
httpGet:
path: /health/live
port: 8080
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
httpGet:
path: /health/live
port: 8080
initialDelaySeconds: 10
periodSeconds: 20
scheduler-worker:
image: registry.stella-ops.org/stellaops/scheduler-worker:2025.10.0-edge
replicas: 1
command:
- dotnet
- StellaOps.Scheduler.Worker.Host.dll
env:
SCHEDULER__QUEUE__KIND: Valkey
SCHEDULER__QUEUE__VALKEY__URL: valkey:6379
SCHEDULER__STORAGE__DRIVER: postgres
SCHEDULER__STORAGE__POSTGRES__CONNECTIONSTRING: Host=postgres;Port=5432;Database=scheduler;Username=stellaops;Password=stellaops
SCHEDULER__WORKER__RUNNER__SCANNER__BASEADDRESS: http://scanner-web:8444
advisory-ai-web:
image: registry.stella-ops.org/stellaops/advisory-ai-web:2025.10.0-edge
service:
port: 8448
env:
ADVISORYAI__AdvisoryAI__SbomBaseAddress: http://scanner-web:8444
ADVISORYAI__AdvisoryAI__Queue__DirectoryPath: /var/lib/advisory-ai/queue
ADVISORYAI__AdvisoryAI__Storage__PlanCacheDirectory: /var/lib/advisory-ai/plans
ADVISORYAI__AdvisoryAI__Storage__OutputDirectory: /var/lib/advisory-ai/outputs
ADVISORYAI__AdvisoryAI__Inference__Mode: Local
ADVISORYAI__AdvisoryAI__Inference__Remote__BaseAddress: ""
ADVISORYAI__AdvisoryAI__Inference__Remote__ApiKey: ""
volumeMounts:
- name: advisory-ai-data
mountPath: /var/lib/advisory-ai
volumeClaims:
- name: advisory-ai-data
claimName: stellaops-advisory-ai-data
advisory-ai-worker:
image: registry.stella-ops.org/stellaops/advisory-ai-worker:2025.10.0-edge
env:
ADVISORYAI__AdvisoryAI__SbomBaseAddress: http://scanner-web:8444
ADVISORYAI__AdvisoryAI__Queue__DirectoryPath: /var/lib/advisory-ai/queue
ADVISORYAI__AdvisoryAI__Storage__PlanCacheDirectory: /var/lib/advisory-ai/plans
ADVISORYAI__AdvisoryAI__Storage__OutputDirectory: /var/lib/advisory-ai/outputs
ADVISORYAI__AdvisoryAI__Inference__Mode: Local
ADVISORYAI__AdvisoryAI__Inference__Remote__BaseAddress: ""
ADVISORYAI__AdvisoryAI__Inference__Remote__ApiKey: ""
volumeMounts:
- name: advisory-ai-data
mountPath: /var/lib/advisory-ai
volumeClaims:
- name: advisory-ai-data
claimName: stellaops-advisory-ai-data
mock:
enabled: false
orchestrator:
image: registry.stella-ops.org/stellaops/orchestrator@sha256:97f12856ce870bafd3328bda86833bcccbf56d255941d804966b5557f6610119
policyRegistry:
image: registry.stella-ops.org/stellaops/policy-registry@sha256:c6cad8055e9827ebcbebb6ad4d6866dce4b83a0a49b0a8a6500b736a5cb26fa7
packsRegistry:
image: registry.stella-ops.org/stellaops/packs-registry@sha256:1f5e9416c4dc608594ad6fad87c24d72134427f899c192b494e22b268499c791
taskRunner:
image: registry.stella-ops.org/stellaops/task-runner@sha256:eb5ad992b49a41554f41516be1a6afcfa6522faf2111c08ff2b3664ad2fc954b
vexLens:
image: registry.stella-ops.org/stellaops/vex-lens@sha256:b44e63ecfeebc345a70c073c1ce5ace709c58be0ffaad0e2862758aeee3092fb
issuerDirectory:
image: registry.stella-ops.org/stellaops/issuer-directory@sha256:67e8ef02c97d3156741e857756994888f30c373ace8e84886762edba9dc51914
findingsLedger:
image: registry.stella-ops.org/stellaops/findings-ledger@sha256:71d4c361ba8b2f8b69d652597bc3f2efc8a64f93fab854ce25272a88506df49c
vulnExplorerApi:
image: registry.stella-ops.org/stellaops/vuln-explorer-api@sha256:7fc7e43a05cbeb0106ce7d4d634612e83de6fdc119aaab754a71c1d60b82841d

View File

@@ -1,22 +0,0 @@
# Air-gap Egress Guard Rails
Artifacts supporting `DEVOPS-AIRGAP-56-001`:
- `k8s-deny-egress.yaml` — NetworkPolicy template that denies all egress for pods labeled `sealed=true`, except optional in-cluster DNS when enabled.
- `compose-egress-guard.sh` — Idempotent iptables guard for Docker/compose using the `DOCKER-USER` chain to drop all outbound traffic from a compose project network while allowing loopback and RFC1918 intra-cluster ranges.
- `verify-egress-block.sh` — Verification harness that runs curl probes from Docker or Kubernetes and reports JSON results; exits non-zero if any target is reachable.
- `bundle_stage_import.py` — Deterministic bundle staging helper: validates sha256 manifest, copies bundles to staging dir as `<sha256>-<basename>`, emits `staging-report.json` for evidence.
- `stage-bundle.sh` — Thin wrapper around `bundle_stage_import.py` with positional args.
- `build_bootstrap_pack.py` — Builds a Bootstrap Pack from images/charts/extras listed in a JSON config, writing `bootstrap-manifest.json` + `checksums.sha256` deterministically.
- `build_bootstrap_pack.sh` — Wrapper for the bootstrap pack builder.
- `build_mirror_bundle.py` — Generates mirror bundle manifest + checksums with dual-control approvals; optional cosign signing. Outputs `mirror-bundle-manifest.json`, `checksums.sha256`, and optional signature/cert.
- `compose-syslog-smtp.yaml` — Local SMTP (MailHog) + syslog-ng stack for sealed environments.
- `health_syslog_smtp.sh` — Brings up the syslog/SMTP stack via docker compose and performs health checks (MailHog API + syslog logger).
- `compose-observability.yaml` — Sealed-mode observability stack (Prometheus, Grafana, Tempo, Loki) with offline configs and healthchecks.
- `health_observability.sh` — Starts the observability stack and probes Prometheus/Grafana/Tempo/Loki readiness.
- `compose-syslog-smtp.yaml` + `syslog-ng.conf` — Local SMTP + syslog stack for sealed-mode notifications; run via `scripts/devops/run-smtp-syslog.sh` (health check `health_syslog_smtp.sh`).
- `observability-offline-compose.yml` + `otel-offline.yaml` + `promtail-config.yaml` — Sealed-mode observability stack (Loki, Promtail, OTEL collector with file exporters) to satisfy DEVOPS-AIRGAP-58-002.
- `compose-syslog-smtp.yaml` — Local SMTP (MailHog) + syslog-ng stack for sealed environments.
- `health_syslog_smtp.sh` — Brings up the syslog/SMTP stack via docker compose and performs health checks (MailHog API + syslog logger).
See also `ops/devops/sealed-mode-ci/` for the full sealed-mode compose harness and `egress_probe.py`, which this verification script wraps.

View File

@@ -1,174 +0,0 @@
#!/usr/bin/env python3
"""Build a deterministic Bootstrap Pack bundle for sealed/offline transfer.
- Reads a JSON config listing artefacts to include (images, Helm charts, extras).
- Copies artefacts into an output directory with preserved basenames.
- Generates `bootstrap-manifest.json` and `checksums.sha256` with sha256 hashes
and sizes for evidence/verification.
- Intended to satisfy DEVOPS-AIRGAP-56-003.
Config schema (JSON):
{
"name": "bootstrap-pack",
"images": ["release/containers/taskrunner.tar", "release/containers/orchestrator.tar"],
"charts": ["deploy/helm/stella.tgz"],
"extras": ["docs/24_OFFLINE_KIT.md"]
}
Usage:
build_bootstrap_pack.py --config bootstrap.json --output out/bootstrap-pack
build_bootstrap_pack.py --self-test
"""
from __future__ import annotations
import argparse
import hashlib
import json
import os
import shutil
import sys
from datetime import datetime, timezone
from pathlib import Path
from typing import Dict, List, Tuple
DEFAULT_NAME = "bootstrap-pack"
def sha256_file(path: Path) -> Tuple[str, int]:
h = hashlib.sha256()
size = 0
with path.open("rb") as f:
for chunk in iter(lambda: f.read(1024 * 1024), b""):
h.update(chunk)
size += len(chunk)
return h.hexdigest(), size
def load_config(path: Path) -> Dict:
with path.open("r", encoding="utf-8") as handle:
cfg = json.load(handle)
if not isinstance(cfg, dict):
raise ValueError("config must be a JSON object")
return cfg
def ensure_list(cfg: Dict, key: str) -> List[str]:
value = cfg.get(key, [])
if value is None:
return []
if not isinstance(value, list):
raise ValueError(f"config.{key} must be a list")
return [str(x) for x in value]
def copy_item(src: Path, dest_root: Path, rel_dir: str) -> Tuple[str, str, int]:
dest_dir = dest_root / rel_dir
dest_dir.mkdir(parents=True, exist_ok=True)
dest_path = dest_dir / src.name
shutil.copy2(src, dest_path)
digest, size = sha256_file(dest_path)
rel_path = dest_path.relative_to(dest_root).as_posix()
return rel_path, digest, size
def build_pack(config_path: Path, output_dir: Path) -> Dict:
cfg = load_config(config_path)
name = cfg.get("name", DEFAULT_NAME)
images = ensure_list(cfg, "images")
charts = ensure_list(cfg, "charts")
extras = ensure_list(cfg, "extras")
output_dir.mkdir(parents=True, exist_ok=True)
items = []
def process_list(paths: List[str], kind: str, rel_dir: str):
for raw in sorted(paths):
src = Path(raw).expanduser().resolve()
if not src.exists():
items.append({
"type": kind,
"source": raw,
"status": "missing"
})
continue
rel_path, digest, size = copy_item(src, output_dir, rel_dir)
items.append({
"type": kind,
"source": raw,
"path": rel_path,
"sha256": digest,
"size": size,
"status": "ok",
})
process_list(images, "image", "images")
process_list(charts, "chart", "charts")
process_list(extras, "extra", "extras")
manifest = {
"name": name,
"created": datetime.now(timezone.utc).isoformat(),
"items": items,
}
# checksums file (only for ok items)
checksum_lines = [f"{item['sha256']} {item['path']}" for item in items if item.get("status") == "ok"]
(output_dir / "checksums.sha256").write_text("\n".join(checksum_lines) + ("\n" if checksum_lines else ""), encoding="utf-8")
(output_dir / "bootstrap-manifest.json").write_text(json.dumps(manifest, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
return manifest
def parse_args(argv: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--config", type=Path, help="Path to bootstrap pack config JSON")
parser.add_argument("--output", type=Path, help="Output directory for the pack")
parser.add_argument("--self-test", action="store_true", help="Run internal self-test and exit")
return parser.parse_args(argv)
def self_test() -> int:
import tempfile
with tempfile.TemporaryDirectory() as tmp:
tmpdir = Path(tmp)
files = []
for name, content in [("img1.tar", b"image-one"), ("chart1.tgz", b"chart-one"), ("readme.txt", b"hello")]:
p = tmpdir / name
p.write_bytes(content)
files.append(p)
cfg = {
"images": [str(files[0])],
"charts": [str(files[1])],
"extras": [str(files[2])],
}
cfg_path = tmpdir / "bootstrap.json"
cfg_path.write_text(json.dumps(cfg), encoding="utf-8")
outdir = tmpdir / "out"
manifest = build_pack(cfg_path, outdir)
assert all(item.get("status") == "ok" for item in manifest["items"]), manifest
for rel in ["images/img1.tar", "charts/chart1.tgz", "extras/readme.txt", "checksums.sha256", "bootstrap-manifest.json"]:
assert (outdir / rel).exists(), f"missing {rel}"
print("self-test passed")
return 0
def main(argv: List[str]) -> int:
args = parse_args(argv)
if args.self_test:
return self_test()
if not (args.config and args.output):
print("--config and --output are required unless --self-test", file=sys.stderr)
return 2
manifest = build_pack(args.config, args.output)
missing = [i for i in manifest["items"] if i.get("status") == "missing"]
if missing:
print("Pack built with missing items:")
for item in missing:
print(f" - {item['source']}")
return 1
print(f"Bootstrap pack written to {args.output}")
return 0
if __name__ == "__main__": # pragma: no cover
sys.exit(main(sys.argv[1:]))

View File

@@ -1,10 +0,0 @@
#!/usr/bin/env bash
# Thin wrapper for build_bootstrap_pack.py
# Usage: ./build_bootstrap_pack.sh config.json out/bootstrap-pack
set -euo pipefail
if [[ $# -lt 2 ]]; then
echo "Usage: $0 <config.json> <output-dir>" >&2
exit 2
fi
SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd)
python3 "$SCRIPT_DIR/build_bootstrap_pack.py" --config "$1" --output "$2"

Some files were not shown because too many files have changed in this diff Show More