Initial commit (history squashed)
Some checks failed
Build Test Deploy / authority-container (push) Has been cancelled
Build Test Deploy / docs (push) Has been cancelled
Build Test Deploy / deploy (push) Has been cancelled
Build Test Deploy / build-test (push) Has been cancelled
Docs CI / lint-and-preview (push) Has been cancelled
Some checks failed
Build Test Deploy / authority-container (push) Has been cancelled
Build Test Deploy / docs (push) Has been cancelled
Build Test Deploy / deploy (push) Has been cancelled
Build Test Deploy / build-test (push) Has been cancelled
Docs CI / lint-and-preview (push) Has been cancelled
This commit is contained in:
29
.gitea/workflows/_deprecated-feedser-ci.yml.disabled
Normal file
29
.gitea/workflows/_deprecated-feedser-ci.yml.disabled
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
name: Feedser CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["main", "develop"]
|
||||||
|
pull_request:
|
||||||
|
branches: ["main", "develop"]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-and-test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Check out repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup .NET 10 preview
|
||||||
|
uses: actions/setup-dotnet@v4
|
||||||
|
with:
|
||||||
|
dotnet-version: 10.0.100-rc.1.25451.107
|
||||||
|
include-prerelease: true
|
||||||
|
|
||||||
|
- name: Restore dependencies
|
||||||
|
run: dotnet restore src/StellaOps.Feedser/StellaOps.Feedser.sln
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: dotnet build src/StellaOps.Feedser/StellaOps.Feedser.sln --configuration Release --no-restore -warnaserror
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
run: dotnet test src/StellaOps.Feedser/StellaOps.Feedser.Tests/StellaOps.Feedser.Tests.csproj --configuration Release --no-restore --logger "trx;LogFileName=feedser-tests.trx"
|
||||||
87
.gitea/workflows/_deprecated-feedser-tests.yml.disabled
Normal file
87
.gitea/workflows/_deprecated-feedser-tests.yml.disabled
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
name: Feedser Tests CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- 'StellaOps.Feedser/**'
|
||||||
|
- '.gitea/workflows/feedser-tests.yml'
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- 'StellaOps.Feedser/**'
|
||||||
|
- '.gitea/workflows/feedser-tests.yml'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
advisory-store-performance:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up .NET SDK
|
||||||
|
uses: actions/setup-dotnet@v4
|
||||||
|
with:
|
||||||
|
dotnet-version: 10.0.100-rc.1
|
||||||
|
|
||||||
|
- name: Restore dependencies
|
||||||
|
working-directory: StellaOps.Feedser
|
||||||
|
run: dotnet restore StellaOps.Feedser.Tests/StellaOps.Feedser.Tests.csproj
|
||||||
|
|
||||||
|
- name: Run advisory store performance test
|
||||||
|
working-directory: StellaOps.Feedser
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
dotnet test \
|
||||||
|
StellaOps.Feedser.Tests/StellaOps.Feedser.Tests.csproj \
|
||||||
|
--filter "FullyQualifiedName~AdvisoryStorePerformanceTests" \
|
||||||
|
--logger:"console;verbosity=detailed" | tee performance.log
|
||||||
|
|
||||||
|
- name: Upload performance log
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: advisory-store-performance-log
|
||||||
|
path: StellaOps.Feedser/performance.log
|
||||||
|
|
||||||
|
full-test-suite:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up .NET SDK
|
||||||
|
uses: actions/setup-dotnet@v4
|
||||||
|
with:
|
||||||
|
dotnet-version: 10.0.100-rc.1
|
||||||
|
|
||||||
|
- name: Restore dependencies
|
||||||
|
working-directory: StellaOps.Feedser
|
||||||
|
run: dotnet restore StellaOps.Feedser.Tests/StellaOps.Feedser.Tests.csproj
|
||||||
|
|
||||||
|
- name: Run full test suite with baseline guard
|
||||||
|
working-directory: StellaOps.Feedser
|
||||||
|
env:
|
||||||
|
BASELINE_SECONDS: "19.8"
|
||||||
|
TOLERANCE_PERCENT: "25"
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
start=$(date +%s)
|
||||||
|
dotnet test StellaOps.Feedser.Tests/StellaOps.Feedser.Tests.csproj --no-build | tee full-tests.log
|
||||||
|
end=$(date +%s)
|
||||||
|
duration=$((end-start))
|
||||||
|
echo "Full test duration: ${duration}s"
|
||||||
|
export DURATION_SECONDS="$duration"
|
||||||
|
python - <<'PY'
|
||||||
|
import os, sys
|
||||||
|
duration = float(os.environ["DURATION_SECONDS"])
|
||||||
|
baseline = float(os.environ["BASELINE_SECONDS"])
|
||||||
|
tolerance = float(os.environ["TOLERANCE_PERCENT"])
|
||||||
|
threshold = baseline * (1 + tolerance / 100)
|
||||||
|
print(f"Baseline {baseline:.1f}s, threshold {threshold:.1f}s, observed {duration:.1f}s")
|
||||||
|
if duration > threshold:
|
||||||
|
sys.exit(f"Full test duration {duration:.1f}s exceeded threshold {threshold:.1f}s")
|
||||||
|
PY
|
||||||
|
|
||||||
|
- name: Upload full test log
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: full-test-suite-log
|
||||||
|
path: StellaOps.Feedser/full-tests.log
|
||||||
341
.gitea/workflows/build-test-deploy.yml
Normal file
341
.gitea/workflows/build-test-deploy.yml
Normal file
@@ -0,0 +1,341 @@
|
|||||||
|
# .gitea/workflows/build-test-deploy.yml
|
||||||
|
# Unified CI/CD workflow for git.stella-ops.org (Feedser monorepo)
|
||||||
|
|
||||||
|
name: Build Test Deploy
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ main ]
|
||||||
|
paths:
|
||||||
|
- 'src/**'
|
||||||
|
- 'docs/**'
|
||||||
|
- 'scripts/**'
|
||||||
|
- 'Directory.Build.props'
|
||||||
|
- 'Directory.Build.targets'
|
||||||
|
- 'global.json'
|
||||||
|
- '.gitea/workflows/**'
|
||||||
|
pull_request:
|
||||||
|
branches: [ main, develop ]
|
||||||
|
paths:
|
||||||
|
- 'src/**'
|
||||||
|
- 'docs/**'
|
||||||
|
- 'scripts/**'
|
||||||
|
- '.gitea/workflows/**'
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
force_deploy:
|
||||||
|
description: 'Ignore branch checks and run the deploy stage'
|
||||||
|
required: false
|
||||||
|
default: 'false'
|
||||||
|
type: boolean
|
||||||
|
|
||||||
|
env:
|
||||||
|
DOTNET_VERSION: '10.0.100-rc.1.25451.107'
|
||||||
|
BUILD_CONFIGURATION: Release
|
||||||
|
CI_CACHE_ROOT: /data/.cache/stella-ops/feedser
|
||||||
|
RUNNER_TOOL_CACHE: /toolcache
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-test:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
environment: ${{ github.event_name == 'pull_request' && 'preview' || 'staging' }}
|
||||||
|
env:
|
||||||
|
PUBLISH_DIR: ${{ github.workspace }}/artifacts/publish/webservice
|
||||||
|
AUTHORITY_PUBLISH_DIR: ${{ github.workspace }}/artifacts/publish/authority
|
||||||
|
TEST_RESULTS_DIR: ${{ github.workspace }}/artifacts/test-results
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Setup .NET ${{ env.DOTNET_VERSION }}
|
||||||
|
uses: actions/setup-dotnet@v4
|
||||||
|
with:
|
||||||
|
dotnet-version: ${{ env.DOTNET_VERSION }}
|
||||||
|
include-prerelease: true
|
||||||
|
|
||||||
|
- name: Restore dependencies
|
||||||
|
run: dotnet restore src/StellaOps.Feedser.sln
|
||||||
|
|
||||||
|
- name: Build solution (warnings as errors)
|
||||||
|
run: dotnet build src/StellaOps.Feedser.sln --configuration $BUILD_CONFIGURATION --no-restore -warnaserror
|
||||||
|
|
||||||
|
- name: Run unit and integration tests
|
||||||
|
run: |
|
||||||
|
mkdir -p "$TEST_RESULTS_DIR"
|
||||||
|
dotnet test src/StellaOps.Feedser.sln \
|
||||||
|
--configuration $BUILD_CONFIGURATION \
|
||||||
|
--no-build \
|
||||||
|
--logger "trx;LogFileName=stellaops-feedser-tests.trx" \
|
||||||
|
--results-directory "$TEST_RESULTS_DIR"
|
||||||
|
|
||||||
|
- name: Publish Feedser web service
|
||||||
|
run: |
|
||||||
|
mkdir -p "$PUBLISH_DIR"
|
||||||
|
dotnet publish src/StellaOps.Feedser.WebService/StellaOps.Feedser.WebService.csproj \
|
||||||
|
--configuration $BUILD_CONFIGURATION \
|
||||||
|
--no-build \
|
||||||
|
--output "$PUBLISH_DIR"
|
||||||
|
|
||||||
|
- name: Upload published artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: feedser-publish
|
||||||
|
path: ${{ env.PUBLISH_DIR }}
|
||||||
|
if-no-files-found: error
|
||||||
|
retention-days: 7
|
||||||
|
|
||||||
|
- name: Restore Authority solution
|
||||||
|
run: dotnet restore src/StellaOps.Authority/StellaOps.Authority.sln
|
||||||
|
|
||||||
|
- name: Build Authority solution
|
||||||
|
run: dotnet build src/StellaOps.Authority/StellaOps.Authority.sln --configuration $BUILD_CONFIGURATION --no-restore -warnaserror
|
||||||
|
|
||||||
|
- name: Run Authority tests
|
||||||
|
run: |
|
||||||
|
dotnet test src/StellaOps.Configuration.Tests/StellaOps.Configuration.Tests.csproj \
|
||||||
|
--configuration $BUILD_CONFIGURATION \
|
||||||
|
--no-build \
|
||||||
|
--logger "trx;LogFileName=stellaops-authority-tests.trx" \
|
||||||
|
--results-directory "$TEST_RESULTS_DIR"
|
||||||
|
|
||||||
|
- name: Publish Authority web service
|
||||||
|
run: |
|
||||||
|
mkdir -p "$AUTHORITY_PUBLISH_DIR"
|
||||||
|
dotnet publish src/StellaOps.Authority/StellaOps.Authority/StellaOps.Authority.csproj \
|
||||||
|
--configuration $BUILD_CONFIGURATION \
|
||||||
|
--no-build \
|
||||||
|
--output "$AUTHORITY_PUBLISH_DIR"
|
||||||
|
|
||||||
|
- name: Upload Authority artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: authority-publish
|
||||||
|
path: ${{ env.AUTHORITY_PUBLISH_DIR }}
|
||||||
|
if-no-files-found: error
|
||||||
|
retention-days: 7
|
||||||
|
|
||||||
|
- name: Upload test results
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: feedser-test-results
|
||||||
|
path: ${{ env.TEST_RESULTS_DIR }}
|
||||||
|
if-no-files-found: ignore
|
||||||
|
retention-days: 7
|
||||||
|
|
||||||
|
authority-container:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
needs: build-test
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Validate Authority compose file
|
||||||
|
run: docker compose -f ops/authority/docker-compose.authority.yaml config
|
||||||
|
|
||||||
|
- name: Build Authority container image
|
||||||
|
run: docker build -f ops/authority/Dockerfile -t stellaops-authority:ci .
|
||||||
|
|
||||||
|
docs:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
env:
|
||||||
|
DOCS_OUTPUT_DIR: ${{ github.workspace }}/artifacts/docs-site
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Install documentation dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
python -m pip install markdown pygments
|
||||||
|
|
||||||
|
- name: Render documentation bundle
|
||||||
|
run: |
|
||||||
|
python scripts/render_docs.py --source docs --output "$DOCS_OUTPUT_DIR" --clean
|
||||||
|
|
||||||
|
- name: Upload documentation artifact
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: feedser-docs-site
|
||||||
|
path: ${{ env.DOCS_OUTPUT_DIR }}
|
||||||
|
if-no-files-found: error
|
||||||
|
retention-days: 7
|
||||||
|
|
||||||
|
deploy:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
needs: [build-test, docs]
|
||||||
|
if: >-
|
||||||
|
needs.build-test.result == 'success' &&
|
||||||
|
needs.docs.result == 'success' &&
|
||||||
|
(
|
||||||
|
(github.event_name == 'push' && github.ref == 'refs/heads/main') ||
|
||||||
|
github.event_name == 'workflow_dispatch'
|
||||||
|
)
|
||||||
|
environment: staging
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
sparse-checkout: |
|
||||||
|
scripts
|
||||||
|
.gitea/workflows
|
||||||
|
sparse-checkout-cone-mode: true
|
||||||
|
|
||||||
|
- name: Check if deployment should proceed
|
||||||
|
id: check-deploy
|
||||||
|
run: |
|
||||||
|
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||||
|
if [ "${{ github.event.inputs.force_deploy }}" = "true" ]; then
|
||||||
|
echo "should-deploy=true" >> $GITHUB_OUTPUT
|
||||||
|
echo "✅ Manual deployment requested"
|
||||||
|
else
|
||||||
|
echo "should-deploy=false" >> $GITHUB_OUTPUT
|
||||||
|
echo "ℹ️ Manual dispatch without force_deploy=true — skipping"
|
||||||
|
fi
|
||||||
|
elif [ "${{ github.ref }}" = "refs/heads/main" ]; then
|
||||||
|
echo "should-deploy=true" >> $GITHUB_OUTPUT
|
||||||
|
echo "✅ Deploying latest main branch build"
|
||||||
|
else
|
||||||
|
echo "should-deploy=false" >> $GITHUB_OUTPUT
|
||||||
|
echo "ℹ️ Deployment restricted to main branch"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Resolve deployment credentials
|
||||||
|
id: params
|
||||||
|
if: steps.check-deploy.outputs.should-deploy == 'true'
|
||||||
|
run: |
|
||||||
|
missing=()
|
||||||
|
|
||||||
|
host="${{ secrets.STAGING_DEPLOYMENT_HOST }}"
|
||||||
|
if [ -z "$host" ]; then host="${{ vars.STAGING_DEPLOYMENT_HOST }}"; fi
|
||||||
|
if [ -z "$host" ]; then host="${{ secrets.DEPLOYMENT_HOST }}"; fi
|
||||||
|
if [ -z "$host" ]; then host="${{ vars.DEPLOYMENT_HOST }}"; fi
|
||||||
|
if [ -z "$host" ]; then missing+=("STAGING_DEPLOYMENT_HOST"); fi
|
||||||
|
|
||||||
|
user="${{ secrets.STAGING_DEPLOYMENT_USERNAME }}"
|
||||||
|
if [ -z "$user" ]; then user="${{ vars.STAGING_DEPLOYMENT_USERNAME }}"; fi
|
||||||
|
if [ -z "$user" ]; then user="${{ secrets.DEPLOYMENT_USERNAME }}"; fi
|
||||||
|
if [ -z "$user" ]; then user="${{ vars.DEPLOYMENT_USERNAME }}"; fi
|
||||||
|
if [ -z "$user" ]; then missing+=("STAGING_DEPLOYMENT_USERNAME"); fi
|
||||||
|
|
||||||
|
path="${{ secrets.STAGING_DEPLOYMENT_PATH }}"
|
||||||
|
if [ -z "$path" ]; then path="${{ vars.STAGING_DEPLOYMENT_PATH }}"; fi
|
||||||
|
|
||||||
|
docs_path="${{ secrets.STAGING_DOCS_PATH }}"
|
||||||
|
if [ -z "$docs_path" ]; then docs_path="${{ vars.STAGING_DOCS_PATH }}"; fi
|
||||||
|
|
||||||
|
key="${{ secrets.STAGING_DEPLOYMENT_KEY }}"
|
||||||
|
if [ -z "$key" ]; then key="${{ secrets.DEPLOYMENT_KEY }}"; fi
|
||||||
|
if [ -z "$key" ]; then key="${{ vars.STAGING_DEPLOYMENT_KEY }}"; fi
|
||||||
|
if [ -z "$key" ]; then key="${{ vars.DEPLOYMENT_KEY }}"; fi
|
||||||
|
if [ -z "$key" ]; then missing+=("STAGING_DEPLOYMENT_KEY"); fi
|
||||||
|
|
||||||
|
if [ ${#missing[@]} -gt 0 ]; then
|
||||||
|
echo "❌ Missing deployment configuration: ${missing[*]}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
key_file="$RUNNER_TEMP/staging_deploy_key"
|
||||||
|
printf '%s\n' "$key" > "$key_file"
|
||||||
|
chmod 600 "$key_file"
|
||||||
|
|
||||||
|
echo "host=$host" >> $GITHUB_OUTPUT
|
||||||
|
echo "user=$user" >> $GITHUB_OUTPUT
|
||||||
|
echo "path=$path" >> $GITHUB_OUTPUT
|
||||||
|
echo "docs-path=$docs_path" >> $GITHUB_OUTPUT
|
||||||
|
echo "key-file=$key_file" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Download service artifact
|
||||||
|
if: steps.check-deploy.outputs.should-deploy == 'true' && steps.params.outputs.path != ''
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: feedser-publish
|
||||||
|
path: artifacts/service
|
||||||
|
|
||||||
|
- name: Download documentation artifact
|
||||||
|
if: steps.check-deploy.outputs.should-deploy == 'true' && steps.params.outputs['docs-path'] != ''
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: feedser-docs-site
|
||||||
|
path: artifacts/docs
|
||||||
|
|
||||||
|
- name: Install rsync
|
||||||
|
if: steps.check-deploy.outputs.should-deploy == 'true'
|
||||||
|
run: |
|
||||||
|
if command -v rsync >/dev/null 2>&1; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
CACHE_DIR="${CI_CACHE_ROOT:-/tmp}/apt"
|
||||||
|
mkdir -p "$CACHE_DIR"
|
||||||
|
KEY="rsync-$(lsb_release -rs 2>/dev/null || echo unknown)"
|
||||||
|
DEB_DIR="$CACHE_DIR/$KEY"
|
||||||
|
mkdir -p "$DEB_DIR"
|
||||||
|
if ls "$DEB_DIR"/rsync*.deb >/dev/null 2>&1; then
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y --no-install-recommends "$DEB_DIR"/libpopt0*.deb "$DEB_DIR"/rsync*.deb
|
||||||
|
else
|
||||||
|
apt-get update
|
||||||
|
apt-get download rsync libpopt0
|
||||||
|
mv rsync*.deb libpopt0*.deb "$DEB_DIR"/
|
||||||
|
dpkg -i "$DEB_DIR"/libpopt0*.deb "$DEB_DIR"/rsync*.deb || apt-get install -f -y
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Deploy service bundle
|
||||||
|
if: steps.check-deploy.outputs.should-deploy == 'true' && steps.params.outputs.path != ''
|
||||||
|
env:
|
||||||
|
HOST: ${{ steps.params.outputs.host }}
|
||||||
|
USER: ${{ steps.params.outputs.user }}
|
||||||
|
TARGET: ${{ steps.params.outputs.path }}
|
||||||
|
KEY_FILE: ${{ steps.params.outputs['key-file'] }}
|
||||||
|
run: |
|
||||||
|
SERVICE_DIR="artifacts/service/feedser-publish"
|
||||||
|
if [ ! -d "$SERVICE_DIR" ]; then
|
||||||
|
echo "❌ Service artifact directory missing ($SERVICE_DIR)"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "🚀 Deploying Feedser web service to $HOST:$TARGET"
|
||||||
|
rsync -az --delete \
|
||||||
|
-e "ssh -i $KEY_FILE -o StrictHostKeyChecking=no" \
|
||||||
|
"$SERVICE_DIR"/ \
|
||||||
|
"$USER@$HOST:$TARGET/"
|
||||||
|
|
||||||
|
- name: Deploy documentation bundle
|
||||||
|
if: steps.check-deploy.outputs.should-deploy == 'true' && steps.params.outputs['docs-path'] != ''
|
||||||
|
env:
|
||||||
|
HOST: ${{ steps.params.outputs.host }}
|
||||||
|
USER: ${{ steps.params.outputs.user }}
|
||||||
|
DOCS_TARGET: ${{ steps.params.outputs['docs-path'] }}
|
||||||
|
KEY_FILE: ${{ steps.params.outputs['key-file'] }}
|
||||||
|
run: |
|
||||||
|
DOCS_DIR="artifacts/docs/feedser-docs-site"
|
||||||
|
if [ ! -d "$DOCS_DIR" ]; then
|
||||||
|
echo "❌ Documentation artifact directory missing ($DOCS_DIR)"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "📚 Deploying documentation bundle to $HOST:$DOCS_TARGET"
|
||||||
|
rsync -az --delete \
|
||||||
|
-e "ssh -i $KEY_FILE -o StrictHostKeyChecking=no" \
|
||||||
|
"$DOCS_DIR"/ \
|
||||||
|
"$USER@$HOST:$DOCS_TARGET/"
|
||||||
|
|
||||||
|
- name: Deployment summary
|
||||||
|
if: steps.check-deploy.outputs.should-deploy == 'true'
|
||||||
|
run: |
|
||||||
|
echo "✅ Deployment completed"
|
||||||
|
echo " Host: ${{ steps.params.outputs.host }}"
|
||||||
|
echo " Service path: ${{ steps.params.outputs.path || '(skipped)' }}"
|
||||||
|
echo " Docs path: ${{ steps.params.outputs['docs-path'] || '(skipped)' }}"
|
||||||
|
|
||||||
|
- name: Deployment skipped summary
|
||||||
|
if: steps.check-deploy.outputs.should-deploy != 'true'
|
||||||
|
run: |
|
||||||
|
echo "ℹ️ Deployment stage skipped"
|
||||||
|
echo " Event: ${{ github.event_name }}"
|
||||||
|
echo " Ref: ${{ github.ref }}"
|
||||||
70
.gitea/workflows/docs.yml
Executable file
70
.gitea/workflows/docs.yml
Executable file
@@ -0,0 +1,70 @@
|
|||||||
|
# .gitea/workflows/docs.yml
|
||||||
|
# Documentation quality checks and preview artefacts
|
||||||
|
|
||||||
|
name: Docs CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- 'docs/**'
|
||||||
|
- 'scripts/render_docs.py'
|
||||||
|
- '.gitea/workflows/docs.yml'
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- 'docs/**'
|
||||||
|
- 'scripts/render_docs.py'
|
||||||
|
- '.gitea/workflows/docs.yml'
|
||||||
|
workflow_dispatch: {}
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODE_VERSION: '20'
|
||||||
|
PYTHON_VERSION: '3.11'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint-and-preview:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
env:
|
||||||
|
DOCS_OUTPUT_DIR: ${{ github.workspace }}/artifacts/docs-preview
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: ${{ env.NODE_VERSION }}
|
||||||
|
|
||||||
|
- name: Install markdown linters
|
||||||
|
run: |
|
||||||
|
npm install markdown-link-check remark-cli remark-preset-lint-recommended
|
||||||
|
|
||||||
|
- name: Link check
|
||||||
|
run: |
|
||||||
|
find docs -name '*.md' -print0 | \
|
||||||
|
xargs -0 -n1 -I{} npx markdown-link-check --quiet '{}'
|
||||||
|
|
||||||
|
- name: Remark lint
|
||||||
|
run: |
|
||||||
|
npx remark docs -qf
|
||||||
|
|
||||||
|
- name: Setup Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: ${{ env.PYTHON_VERSION }}
|
||||||
|
|
||||||
|
- name: Install documentation dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
python -m pip install markdown pygments
|
||||||
|
|
||||||
|
- name: Render documentation preview bundle
|
||||||
|
run: |
|
||||||
|
python scripts/render_docs.py --source docs --output "$DOCS_OUTPUT_DIR" --clean
|
||||||
|
|
||||||
|
- name: Upload documentation preview
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: feedser-docs-preview
|
||||||
|
path: ${{ env.DOCS_OUTPUT_DIR }}
|
||||||
|
retention-days: 7
|
||||||
206
.gitea/workflows/promote.yml
Normal file
206
.gitea/workflows/promote.yml
Normal file
@@ -0,0 +1,206 @@
|
|||||||
|
# .gitea/workflows/promote.yml
|
||||||
|
# Manual promotion workflow to copy staged artefacts to production
|
||||||
|
|
||||||
|
name: Promote Feedser (Manual)
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
include_docs:
|
||||||
|
description: 'Also promote the generated documentation bundle'
|
||||||
|
required: false
|
||||||
|
default: 'true'
|
||||||
|
type: boolean
|
||||||
|
tag:
|
||||||
|
description: 'Optional build identifier to record in the summary'
|
||||||
|
required: false
|
||||||
|
default: 'latest'
|
||||||
|
type: string
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
promote:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
environment: production
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Resolve staging credentials
|
||||||
|
id: staging
|
||||||
|
run: |
|
||||||
|
missing=()
|
||||||
|
|
||||||
|
host="${{ secrets.STAGING_DEPLOYMENT_HOST }}"
|
||||||
|
if [ -z "$host" ]; then host="${{ vars.STAGING_DEPLOYMENT_HOST }}"; fi
|
||||||
|
if [ -z "$host" ]; then host="${{ secrets.DEPLOYMENT_HOST }}"; fi
|
||||||
|
if [ -z "$host" ]; then host="${{ vars.DEPLOYMENT_HOST }}"; fi
|
||||||
|
if [ -z "$host" ]; then missing+=("STAGING_DEPLOYMENT_HOST"); fi
|
||||||
|
|
||||||
|
user="${{ secrets.STAGING_DEPLOYMENT_USERNAME }}"
|
||||||
|
if [ -z "$user" ]; then user="${{ vars.STAGING_DEPLOYMENT_USERNAME }}"; fi
|
||||||
|
if [ -z "$user" ]; then user="${{ secrets.DEPLOYMENT_USERNAME }}"; fi
|
||||||
|
if [ -z "$user" ]; then user="${{ vars.DEPLOYMENT_USERNAME }}"; fi
|
||||||
|
if [ -z "$user" ]; then missing+=("STAGING_DEPLOYMENT_USERNAME"); fi
|
||||||
|
|
||||||
|
path="${{ secrets.STAGING_DEPLOYMENT_PATH }}"
|
||||||
|
if [ -z "$path" ]; then path="${{ vars.STAGING_DEPLOYMENT_PATH }}"; fi
|
||||||
|
if [ -z "$path" ]; then missing+=("STAGING_DEPLOYMENT_PATH")
|
||||||
|
fi
|
||||||
|
|
||||||
|
docs_path="${{ secrets.STAGING_DOCS_PATH }}"
|
||||||
|
if [ -z "$docs_path" ]; then docs_path="${{ vars.STAGING_DOCS_PATH }}"; fi
|
||||||
|
|
||||||
|
key="${{ secrets.STAGING_DEPLOYMENT_KEY }}"
|
||||||
|
if [ -z "$key" ]; then key="${{ secrets.DEPLOYMENT_KEY }}"; fi
|
||||||
|
if [ -z "$key" ]; then key="${{ vars.STAGING_DEPLOYMENT_KEY }}"; fi
|
||||||
|
if [ -z "$key" ]; then key="${{ vars.DEPLOYMENT_KEY }}"; fi
|
||||||
|
if [ -z "$key" ]; then missing+=("STAGING_DEPLOYMENT_KEY"); fi
|
||||||
|
|
||||||
|
if [ ${#missing[@]} -gt 0 ]; then
|
||||||
|
echo "❌ Missing staging configuration: ${missing[*]}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
key_file="$RUNNER_TEMP/staging_key"
|
||||||
|
printf '%s\n' "$key" > "$key_file"
|
||||||
|
chmod 600 "$key_file"
|
||||||
|
|
||||||
|
echo "host=$host" >> $GITHUB_OUTPUT
|
||||||
|
echo "user=$user" >> $GITHUB_OUTPUT
|
||||||
|
echo "path=$path" >> $GITHUB_OUTPUT
|
||||||
|
echo "docs-path=$docs_path" >> $GITHUB_OUTPUT
|
||||||
|
echo "key-file=$key_file" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Resolve production credentials
|
||||||
|
id: production
|
||||||
|
run: |
|
||||||
|
missing=()
|
||||||
|
|
||||||
|
host="${{ secrets.PRODUCTION_DEPLOYMENT_HOST }}"
|
||||||
|
if [ -z "$host" ]; then host="${{ vars.PRODUCTION_DEPLOYMENT_HOST }}"; fi
|
||||||
|
if [ -z "$host" ]; then host="${{ secrets.DEPLOYMENT_HOST }}"; fi
|
||||||
|
if [ -z "$host" ]; then host="${{ vars.DEPLOYMENT_HOST }}"; fi
|
||||||
|
if [ -z "$host" ]; then missing+=("PRODUCTION_DEPLOYMENT_HOST"); fi
|
||||||
|
|
||||||
|
user="${{ secrets.PRODUCTION_DEPLOYMENT_USERNAME }}"
|
||||||
|
if [ -z "$user" ]; then user="${{ vars.PRODUCTION_DEPLOYMENT_USERNAME }}"; fi
|
||||||
|
if [ -z "$user" ]; then user="${{ secrets.DEPLOYMENT_USERNAME }}"; fi
|
||||||
|
if [ -z "$user" ]; then user="${{ vars.DEPLOYMENT_USERNAME }}"; fi
|
||||||
|
if [ -z "$user" ]; then missing+=("PRODUCTION_DEPLOYMENT_USERNAME"); fi
|
||||||
|
|
||||||
|
path="${{ secrets.PRODUCTION_DEPLOYMENT_PATH }}"
|
||||||
|
if [ -z "$path" ]; then path="${{ vars.PRODUCTION_DEPLOYMENT_PATH }}"; fi
|
||||||
|
if [ -z "$path" ]; then missing+=("PRODUCTION_DEPLOYMENT_PATH")
|
||||||
|
fi
|
||||||
|
|
||||||
|
docs_path="${{ secrets.PRODUCTION_DOCS_PATH }}"
|
||||||
|
if [ -z "$docs_path" ]; then docs_path="${{ vars.PRODUCTION_DOCS_PATH }}"; fi
|
||||||
|
|
||||||
|
key="${{ secrets.PRODUCTION_DEPLOYMENT_KEY }}"
|
||||||
|
if [ -z "$key" ]; then key="${{ secrets.DEPLOYMENT_KEY }}"; fi
|
||||||
|
if [ -z "$key" ]; then key="${{ vars.PRODUCTION_DEPLOYMENT_KEY }}"; fi
|
||||||
|
if [ -z "$key" ]; then key="${{ vars.DEPLOYMENT_KEY }}"; fi
|
||||||
|
if [ -z "$key" ]; then missing+=("PRODUCTION_DEPLOYMENT_KEY"); fi
|
||||||
|
|
||||||
|
if [ ${#missing[@]} -gt 0 ]; then
|
||||||
|
echo "❌ Missing production configuration: ${missing[*]}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
key_file="$RUNNER_TEMP/production_key"
|
||||||
|
printf '%s\n' "$key" > "$key_file"
|
||||||
|
chmod 600 "$key_file"
|
||||||
|
|
||||||
|
echo "host=$host" >> $GITHUB_OUTPUT
|
||||||
|
echo "user=$user" >> $GITHUB_OUTPUT
|
||||||
|
echo "path=$path" >> $GITHUB_OUTPUT
|
||||||
|
echo "docs-path=$docs_path" >> $GITHUB_OUTPUT
|
||||||
|
echo "key-file=$key_file" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Install rsync
|
||||||
|
run: |
|
||||||
|
if command -v rsync >/dev/null 2>&1; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
CACHE_DIR="${CI_CACHE_ROOT:-/tmp}/apt"
|
||||||
|
mkdir -p "$CACHE_DIR"
|
||||||
|
KEY="rsync-$(lsb_release -rs 2>/dev/null || echo unknown)"
|
||||||
|
DEB_DIR="$CACHE_DIR/$KEY"
|
||||||
|
mkdir -p "$DEB_DIR"
|
||||||
|
if ls "$DEB_DIR"/rsync*.deb >/dev/null 2>&1; then
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y --no-install-recommends "$DEB_DIR"/libpopt0*.deb "$DEB_DIR"/rsync*.deb
|
||||||
|
else
|
||||||
|
apt-get update
|
||||||
|
apt-get download rsync libpopt0
|
||||||
|
mv rsync*.deb libpopt0*.deb "$DEB_DIR"/
|
||||||
|
dpkg -i "$DEB_DIR"/libpopt0*.deb "$DEB_DIR"/rsync*.deb || apt-get install -f -y
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Fetch staging artefacts
|
||||||
|
id: fetch
|
||||||
|
run: |
|
||||||
|
staging_root="${{ runner.temp }}/staging"
|
||||||
|
mkdir -p "$staging_root/service" "$staging_root/docs"
|
||||||
|
|
||||||
|
echo "📥 Copying service bundle from staging"
|
||||||
|
rsync -az --delete \
|
||||||
|
-e "ssh -i ${{ steps.staging.outputs['key-file'] }} -o StrictHostKeyChecking=no" \
|
||||||
|
"${{ steps.staging.outputs.user }}@${{ steps.staging.outputs.host }}:${{ steps.staging.outputs.path }}/" \
|
||||||
|
"$staging_root/service/"
|
||||||
|
|
||||||
|
if [ "${{ github.event.inputs.include_docs }}" = "true" ] && [ -n "${{ steps.staging.outputs['docs-path'] }}" ]; then
|
||||||
|
echo "📥 Copying documentation bundle from staging"
|
||||||
|
rsync -az --delete \
|
||||||
|
-e "ssh -i ${{ steps.staging.outputs['key-file'] }} -o StrictHostKeyChecking=no" \
|
||||||
|
"${{ steps.staging.outputs.user }}@${{ steps.staging.outputs.host }}:${{ steps.staging.outputs['docs-path'] }}/" \
|
||||||
|
"$staging_root/docs/"
|
||||||
|
else
|
||||||
|
echo "ℹ️ Documentation promotion skipped"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "service-dir=$staging_root/service" >> $GITHUB_OUTPUT
|
||||||
|
echo "docs-dir=$staging_root/docs" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Backup production service content
|
||||||
|
run: |
|
||||||
|
ssh -o StrictHostKeyChecking=no -i "${{ steps.production.outputs['key-file'] }}" \
|
||||||
|
"${{ steps.production.outputs.user }}@${{ steps.production.outputs.host }}" \
|
||||||
|
"set -e; TARGET='${{ steps.production.outputs.path }}'; \
|
||||||
|
if [ -d \"$TARGET\" ]; then \
|
||||||
|
parent=\$(dirname \"$TARGET\"); \
|
||||||
|
base=\$(basename \"$TARGET\"); \
|
||||||
|
backup=\"\$parent/\${base}.backup.\$(date +%Y%m%d_%H%M%S)\"; \
|
||||||
|
mkdir -p \"\$backup\"; \
|
||||||
|
rsync -a --delete \"$TARGET/\" \"\$backup/\"; \
|
||||||
|
ls -dt \"\$parent/\${base}.backup.*\" 2>/dev/null | tail -n +6 | xargs rm -rf || true; \
|
||||||
|
echo 'Backup created at ' \"\$backup\"; \
|
||||||
|
else \
|
||||||
|
echo 'Production service path missing; skipping backup'; \
|
||||||
|
fi"
|
||||||
|
|
||||||
|
- name: Publish service to production
|
||||||
|
run: |
|
||||||
|
rsync -az --delete \
|
||||||
|
-e "ssh -i ${{ steps.production.outputs['key-file'] }} -o StrictHostKeyChecking=no" \
|
||||||
|
"${{ steps.fetch.outputs['service-dir'] }}/" \
|
||||||
|
"${{ steps.production.outputs.user }}@${{ steps.production.outputs.host }}:${{ steps.production.outputs.path }}/"
|
||||||
|
|
||||||
|
- name: Promote documentation bundle
|
||||||
|
if: github.event.inputs.include_docs == 'true' && steps.production.outputs['docs-path'] != ''
|
||||||
|
run: |
|
||||||
|
rsync -az --delete \
|
||||||
|
-e "ssh -i ${{ steps.production.outputs['key-file'] }} -o StrictHostKeyChecking=no" \
|
||||||
|
"${{ steps.fetch.outputs['docs-dir'] }}/" \
|
||||||
|
"${{ steps.production.outputs.user }}@${{ steps.production.outputs.host }}:${{ steps.production.outputs['docs-path'] }}/"
|
||||||
|
|
||||||
|
- name: Promotion summary
|
||||||
|
run: |
|
||||||
|
echo "✅ Promotion completed"
|
||||||
|
echo " Tag: ${{ github.event.inputs.tag }}"
|
||||||
|
echo " Service: ${{ steps.staging.outputs.host }} → ${{ steps.production.outputs.host }}"
|
||||||
|
if [ "${{ github.event.inputs.include_docs }}" = "true" ]; then
|
||||||
|
echo " Docs: included"
|
||||||
|
else
|
||||||
|
echo " Docs: skipped"
|
||||||
|
fi
|
||||||
21
.gitignore
vendored
Normal file
21
.gitignore
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
# Build outputs
|
||||||
|
bin/
|
||||||
|
obj/
|
||||||
|
*.pdb
|
||||||
|
*.dll
|
||||||
|
|
||||||
|
# IDE state
|
||||||
|
.vs/
|
||||||
|
*.user
|
||||||
|
*.suo
|
||||||
|
*.userprefs
|
||||||
|
|
||||||
|
# Rider/VSCode
|
||||||
|
.idea/
|
||||||
|
.vscode/
|
||||||
|
|
||||||
|
# Packages and logs
|
||||||
|
*.log
|
||||||
|
TestResults/
|
||||||
|
|
||||||
|
.dotnet
|
||||||
125
AGENTS.md
Normal file
125
AGENTS.md
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
# 1) What is StellaOps?
|
||||||
|
|
||||||
|
**StellaOps** an open, sovereign, modular container-security toolkit built for high-speed, offline operation, released under AGPL-3.0-or-later.
|
||||||
|
|
||||||
|
It follows an SBOM-first model—analyzing each container layer or ingesting existing CycloneDX/SPDX SBOMs, then enriching them with vulnerability, licence, secret-leak, and misconfiguration data to produce cryptographically signed reports.
|
||||||
|
|
||||||
|
Vulnerability detection maps OS and language dependencies to sources such as NVD, GHSA, OSV, ENISA.
|
||||||
|
Secrets sweep flags exposed credentials or keys in files or environment variables.
|
||||||
|
Licence audit identifies potential conflicts, especially copyleft obligations.
|
||||||
|
Misconfiguration checks detect unsafe Dockerfile patterns (root user, latest tags, permissive modes).
|
||||||
|
Provenance features include in-toto/SLSA attestations signed with cosign for supply-chain trust.
|
||||||
|
|
||||||
|
| Guiding principle | What it means for Feedser |
|
||||||
|
|-------------------|---------------------------|
|
||||||
|
| **SBOM-first ingest** | Prefer signed SBOMs or reproducible layer diffs before falling back to raw scraping; connectors treat source docs as provenance, never as mutable truth. |
|
||||||
|
| **Deterministic outputs** | Same inputs yield identical canonical advisories and exported JSON/Trivy DB artefacts; merge hashes and export manifests are reproducible across machines. |
|
||||||
|
| **Restart-time plug-ins only** | Connector/exporter plug-ins load at service start, keeping runtime sandboxing simple and avoiding hot-patch attack surface. |
|
||||||
|
| **Sovereign/offline-first** | No mandatory outbound calls beyond allow-listed advisories; Offline Kit bundles Mongo snapshots and exporter artefacts for air-gapped installs. |
|
||||||
|
| **Operational transparency** | Every stage logs structured events (fetch, parse, merge, export) with correlation IDs so parallel agents can debug without shared state. |
|
||||||
|
|
||||||
|
Performance: warm scans < 5 s, cold scans < 30 s on a 4 vCPU runner.
|
||||||
|
Deployment: entirely SaaS-free, suitable for air-gapped or on-prem use through its Offline Kit.
|
||||||
|
Policy: anonymous users → 33 scans/day; verified → 333 /day; nearing 90 % quota triggers throttling but never full blocks.
|
||||||
|
|
||||||
|
More documention is available ./docs/*.md files. Read `docs/README.md` to gather information about the available documentation. You could inquiry specific documents as your work requires it
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# 3) Practices
|
||||||
|
|
||||||
|
## 3.1) Naming
|
||||||
|
All modules are .NET projects based on .NET 10 (preview). Exclussion is the UI. It is based on Angular
|
||||||
|
All modules are contained by one or more projects. Each project goes in its dedicated folder. Each project starts with StellaOps.<ModuleName>. In case it is common for for all StellaOps modules it is library or plugin and it is named StellaOps.<LibraryOrPlugin>.
|
||||||
|
|
||||||
|
## 3.2) Key technologies & integrations
|
||||||
|
|
||||||
|
- **Runtime**: .NET 10 (`net10.0`) preview SDK; C# latest preview features.
|
||||||
|
- **Data**: MongoDB (canonical store and job/export state).
|
||||||
|
- **Observability**: structured logs, counters, and (optional) OpenTelemetry traces.
|
||||||
|
- **Ops posture**: offline‑first, allowlist for remote hosts, strict schema validation, gated LLM fallback (only where explicitly configured).
|
||||||
|
|
||||||
|
# 4) Modules
|
||||||
|
StellaOps is contained by different modules installable via docker containers
|
||||||
|
- Feedser. Responsible for aggregation and delivery of vulnerability database
|
||||||
|
- Cli. Command line tool to unlock full potential - request database operations, install scanner, request scan, configure backend
|
||||||
|
- Backend. Configures and Manages scans
|
||||||
|
- UI. UI to access the backend (and scanners)
|
||||||
|
- Agent. Installable daemon that does the scanning
|
||||||
|
- Zastava. Realtime monitor for allowed (verified) installations.
|
||||||
|
|
||||||
|
## 4.1) Feedser
|
||||||
|
It is webservice based module that is responsible for aggregating vulnerabilities information from various sources, parsing and normalizing them into a canonical shape, merging and deduplicating the results in one place, with export capabilities to Json and TrivyDb. It supports init and resume for all of the sources, parse/normalize and merge/deduplication operations, plus export. Export supports delta exports—similarly to full and incremential database backups.
|
||||||
|
|
||||||
|
### 4.1.1) Usage
|
||||||
|
It supports operations to be started by cmd line:
|
||||||
|
# stella db [fetch|merge|export] [init|resume <point>]
|
||||||
|
or
|
||||||
|
api available on https://db.stella-ops.org
|
||||||
|
|
||||||
|
### 4.1.2) Data flow (end‑to‑end)
|
||||||
|
|
||||||
|
1. **Fetch**: connectors request source windows with retries/backoff, persist raw documents with SHA256/ETag metadata.
|
||||||
|
2. **Parse & Normalize**: validate to DTOs (schema-checked), quarantine failures, normalize to canonical advisories (aliases, affected ranges with NEVRA/EVR/SemVer, references, provenance).
|
||||||
|
3. **Merge & Deduplicate**: enforce precedence, build/maintain alias graphs, compute deterministic hashes, and eliminate duplicates before persisting to MongoDB.
|
||||||
|
4. **Export**: JSON tree and/or Trivy DB; package and (optionally) push; write export state.
|
||||||
|
|
||||||
|
### 4.1.3) Architecture
|
||||||
|
For more information of the architecture see `./docs/ARCHITECTURE_FEEDSER.md`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 4.1.4) Glossary (quick)
|
||||||
|
|
||||||
|
- **OVAL** — Vendor/distro security definition format; authoritative for OS packages.
|
||||||
|
- **NEVRA / EVR** — RPM and Debian version semantics for OS packages.
|
||||||
|
- **PURL / SemVer** — Coordinates and version semantics for OSS ecosystems.
|
||||||
|
- **KEV** — Known Exploited Vulnerabilities (flag only).
|
||||||
|
|
||||||
|
---
|
||||||
|
# 5) Your role as StellaOps contributor
|
||||||
|
|
||||||
|
You acting as information technology engineer that will take different type of roles in goal achieving StellaOps production implementation
|
||||||
|
In order you to work - you have to be supplied with directory that contains `AGENTS.md`,`TASKS.md` files. There will you have more information about the role you have, the scope of your work and the tasks you will have.
|
||||||
|
|
||||||
|
Boundaries:
|
||||||
|
- You operate only in the working directories I gave you, unless there is dependencies that makes you to work on dependency in shared directory. Then you ask for confirmation.
|
||||||
|
|
||||||
|
You main characteristics:
|
||||||
|
- Keep endpoints small, deterministic, and cancellation-aware.
|
||||||
|
- Improve logs/metrics as per tasks.
|
||||||
|
- Update `TASKS.md` when moving tasks forward.
|
||||||
|
- When you are done with all task you state explicitly you are done.
|
||||||
|
- Impersonate the role described on working directory `AGENTS.md` you will read, if role is not available - take role of the CTO of the StellaOps in early stages.
|
||||||
|
- You always strive for best practices
|
||||||
|
- You always strive for re-usability
|
||||||
|
- When in doubt of design decision - you ask then act
|
||||||
|
- You are autonomus - meaning that you will work for long time alone and achieve maximum without stopping for stupid questions
|
||||||
|
- You operate on the same directory where other agents will work. In case you need to work on directory that is dependency on provided `AGENTS.md`,`TASKS.md` files you have to ask for confirmation first.
|
||||||
|
|
||||||
|
## 5.1) Type of contributions
|
||||||
|
|
||||||
|
- **BE‑Base (Platform & Pipeline)**
|
||||||
|
Owns DI, plugin host, job scheduler/coordinator, configuration binding, minimal API endpoints, and Mongo bootstrapping.
|
||||||
|
- **BE‑Conn‑X (Connectors)**
|
||||||
|
One agent per source family (NVD, Red Hat, Ubuntu, Debian, SUSE, GHSA, OSV, PSIRTs, CERTs, KEV, ICS). Implements fetch/parse/map with incremental watermarks.
|
||||||
|
- **BE‑Merge (Canonical Merge & Dedupe)**
|
||||||
|
Identity graph, precedence policies, canonical JSON serializer, and deterministic hashing (`merge_event`).
|
||||||
|
- **BE‑Export (JSON & Trivy DB)**
|
||||||
|
Deterministic export trees, Trivy DB packaging, optional ORAS push, and offline bundle.
|
||||||
|
- **QA (Validation & Observability)**
|
||||||
|
Schema tests, fixture goldens, determinism checks, metrics/logs/traces, e2e reproducibility runs.
|
||||||
|
- **DevEx/Docs**
|
||||||
|
Maintains this agent framework, templates, and per‑directory guides; assists parallelization and reviews.
|
||||||
|
|
||||||
|
|
||||||
|
## 5.2) Work-in-parallel rules (important)
|
||||||
|
|
||||||
|
- **Directory ownership**: Each agent works **only inside its module directory**. Cross‑module edits require a brief handshake in issues/PR description.
|
||||||
|
- **Scoping**: Use each module’s `AGENTS.md` and `TASKS.md` to plan; autonomous agents must read `src/AGENTS.md` and the module docs before acting.
|
||||||
|
- **Determinism**: Sort keys, normalize timestamps to UTC ISO‑8601, avoid non‑deterministic data in exports and tests.
|
||||||
|
- **Status tracking**: Update your module’s `TASKS.md` as you progress (TODO → DOING → DONE/BLOCKED). Before starting of actual work - ensure you have set the task to DOING. When complete or stop update the status in corresponding TASKS.md or in ./SPRINTS.md file.
|
||||||
|
- **Tests**: Add/extend fixtures and unit tests per change; never regress determinism or precedence.
|
||||||
|
- **Test layout**: Use module-specific projects in `StellaOps.Feedser.<Component>.Tests`; shared fixtures/harnesses live in `StellaOps.Feedser.Testing`.
|
||||||
|
|
||||||
|
---
|
||||||
235
LICENSE
Executable file
235
LICENSE
Executable file
@@ -0,0 +1,235 @@
|
|||||||
|
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 19 November 2007
|
||||||
|
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||||
|
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software.
|
||||||
|
|
||||||
|
The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users.
|
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things.
|
||||||
|
|
||||||
|
Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software.
|
||||||
|
|
||||||
|
A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public.
|
||||||
|
|
||||||
|
The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version.
|
||||||
|
|
||||||
|
An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license.
|
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and modification follow.
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
0. Definitions.
|
||||||
|
|
||||||
|
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||||
|
|
||||||
|
"Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks.
|
||||||
|
|
||||||
|
"The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations.
|
||||||
|
|
||||||
|
To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work.
|
||||||
|
|
||||||
|
A "covered work" means either the unmodified Program or a work based on the Program.
|
||||||
|
|
||||||
|
To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well.
|
||||||
|
|
||||||
|
To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying.
|
||||||
|
|
||||||
|
An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion.
|
||||||
|
|
||||||
|
1. Source Code.
|
||||||
|
The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work.
|
||||||
|
|
||||||
|
A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language.
|
||||||
|
|
||||||
|
The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it.
|
||||||
|
|
||||||
|
The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those
|
||||||
|
subprograms and other parts of the work.
|
||||||
|
|
||||||
|
The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source.
|
||||||
|
|
||||||
|
The Corresponding Source for a work in source code form is that same work.
|
||||||
|
|
||||||
|
2. Basic Permissions.
|
||||||
|
All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law.
|
||||||
|
|
||||||
|
You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you.
|
||||||
|
|
||||||
|
Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary.
|
||||||
|
|
||||||
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||||
|
No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures.
|
||||||
|
|
||||||
|
When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures.
|
||||||
|
|
||||||
|
4. Conveying Verbatim Copies.
|
||||||
|
You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program.
|
||||||
|
|
||||||
|
You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee.
|
||||||
|
|
||||||
|
5. Conveying Modified Source Versions.
|
||||||
|
You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) The work must carry prominent notices stating that you modified it, and giving a relevant date.
|
||||||
|
|
||||||
|
b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices".
|
||||||
|
|
||||||
|
c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it.
|
||||||
|
|
||||||
|
d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so.
|
||||||
|
|
||||||
|
A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate.
|
||||||
|
|
||||||
|
6. Conveying Non-Source Forms.
|
||||||
|
You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways:
|
||||||
|
|
||||||
|
a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange.
|
||||||
|
|
||||||
|
b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge.
|
||||||
|
|
||||||
|
c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b.
|
||||||
|
|
||||||
|
d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements.
|
||||||
|
|
||||||
|
e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d.
|
||||||
|
|
||||||
|
A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work.
|
||||||
|
|
||||||
|
A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product.
|
||||||
|
|
||||||
|
"Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made.
|
||||||
|
|
||||||
|
If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM).
|
||||||
|
|
||||||
|
The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network.
|
||||||
|
|
||||||
|
Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying.
|
||||||
|
|
||||||
|
7. Additional Terms.
|
||||||
|
"Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions.
|
||||||
|
|
||||||
|
When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms:
|
||||||
|
|
||||||
|
a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or
|
||||||
|
|
||||||
|
b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or
|
||||||
|
|
||||||
|
c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or
|
||||||
|
|
||||||
|
d) Limiting the use for publicity purposes of names of licensors or authors of the material; or
|
||||||
|
|
||||||
|
e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or
|
||||||
|
|
||||||
|
f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors.
|
||||||
|
|
||||||
|
All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying.
|
||||||
|
|
||||||
|
If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms.
|
||||||
|
|
||||||
|
Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way.
|
||||||
|
|
||||||
|
8. Termination.
|
||||||
|
|
||||||
|
You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11).
|
||||||
|
|
||||||
|
However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation.
|
||||||
|
|
||||||
|
Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice.
|
||||||
|
|
||||||
|
Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10.
|
||||||
|
|
||||||
|
9. Acceptance Not Required for Having Copies.
|
||||||
|
|
||||||
|
You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so.
|
||||||
|
|
||||||
|
10. Automatic Licensing of Downstream Recipients.
|
||||||
|
|
||||||
|
Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License.
|
||||||
|
|
||||||
|
An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts.
|
||||||
|
|
||||||
|
You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it.
|
||||||
|
|
||||||
|
11. Patents.
|
||||||
|
|
||||||
|
A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version".
|
||||||
|
|
||||||
|
A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License.
|
||||||
|
|
||||||
|
Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version.
|
||||||
|
|
||||||
|
In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party.
|
||||||
|
|
||||||
|
If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent
|
||||||
|
license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid.
|
||||||
|
|
||||||
|
If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it.
|
||||||
|
|
||||||
|
A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007.
|
||||||
|
|
||||||
|
Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law.
|
||||||
|
|
||||||
|
12. No Surrender of Others' Freedom.
|
||||||
|
|
||||||
|
If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may
|
||||||
|
not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program.
|
||||||
|
|
||||||
|
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License.
|
||||||
|
|
||||||
|
14. Revised Versions of this License.
|
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation.
|
||||||
|
|
||||||
|
If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program.
|
||||||
|
|
||||||
|
Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version.
|
||||||
|
|
||||||
|
15. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
|
|
||||||
|
16. Limitation of Liability.
|
||||||
|
|
||||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
|
||||||
|
|
||||||
|
17. Interpretation of Sections 15 and 16.
|
||||||
|
|
||||||
|
If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
How to Apply These Terms to Your New Programs
|
||||||
|
|
||||||
|
If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms.
|
||||||
|
|
||||||
|
To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
|
git.stella-ops.org
|
||||||
|
Copyright (C) 2025 stella-ops.org
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements.
|
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see <http://www.gnu.org/licenses/>.
|
||||||
28
README.md
Executable file
28
README.md
Executable file
@@ -0,0 +1,28 @@
|
|||||||
|
# StellaOps Feedser & CLI
|
||||||
|
|
||||||
|
This repository hosts the StellaOps Feedser service, its plug-in ecosystem, and the
|
||||||
|
first-party CLI (`stellaops-cli`). Feedser ingests vulnerability advisories from
|
||||||
|
authoritative sources, stores them in MongoDB, and exports deterministic JSON and
|
||||||
|
Trivy DB artefacts. The CLI drives scanner distribution, scan execution, and job
|
||||||
|
control against the Feedser API.
|
||||||
|
|
||||||
|
## Quickstart
|
||||||
|
|
||||||
|
1. Prepare a MongoDB instance and (optionally) install `trivy-db`/`oras`.
|
||||||
|
2. Copy `etc/feedser.yaml.sample` to `etc/feedser.yaml` and update the storage + telemetry
|
||||||
|
settings.
|
||||||
|
3. Copy `etc/authority.yaml.sample` to `etc/authority.yaml`, review the issuer, token
|
||||||
|
lifetimes, and plug-in descriptors, then edit the companion manifests under
|
||||||
|
`etc/authority.plugins/*.yaml` to match your deployment.
|
||||||
|
4. Start the web service with `dotnet run --project src/StellaOps.Feedser.WebService`.
|
||||||
|
5. Configure the CLI via environment variables (e.g. `STELLAOPS_BACKEND_URL`) and trigger
|
||||||
|
jobs with `dotnet run --project src/StellaOps.Cli -- db merge`.
|
||||||
|
|
||||||
|
Detailed operator guidance is available in `docs/10_FEEDSER_CLI_QUICKSTART.md`. API and
|
||||||
|
command reference material lives in `docs/09_API_CLI_REFERENCE.md`.
|
||||||
|
|
||||||
|
Pipeline note: deployment workflows should template `etc/feedser.yaml` during CI/CD,
|
||||||
|
injecting environment-specific Mongo credentials and telemetry endpoints. Upcoming
|
||||||
|
releases will add Microsoft OAuth (Entra ID) authentication support—track the quickstart
|
||||||
|
for integration steps once available.
|
||||||
|
|
||||||
95
SPRINTS.md
Normal file
95
SPRINTS.md
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
| Sprint | Theme | Tasks File Path | Status | Type of Specialist | Task ID | Task Description |
|
||||||
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Models/TASKS.md | — | Team Models & Merge Leads | FEEDMODELS-SCHEMA-01-001 | SemVer primitive range-style metadata<br>Instructions to work:<br>DONE Read ./AGENTS.md and src/StellaOps.Feedser.Models/AGENTS.md. This task lays the groundwork—complete the SemVer helper updates before teammates pick up FEEDMODELS-SCHEMA-01-002/003 and FEEDMODELS-SCHEMA-02-900. Use ./src/FASTER_MODELING_AND_NORMALIZATION.md for the target rule structure. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Models/TASKS.md | DONE (2025-10-11) | Team Models & Merge Leads | FEEDMODELS-SCHEMA-01-002 | Provenance decision rationale field<br>Instructions to work:<br>AdvisoryProvenance now carries `decisionReason` and docs/tests were updated. Connectors and merge tasks should populate the field when applying precedence/freshness/tie-breaker logic; see src/StellaOps.Feedser.Models/PROVENANCE_GUIDELINES.md for usage guidance. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Models/TASKS.md | DONE (2025-10-11) | Team Models & Merge Leads | FEEDMODELS-SCHEMA-01-003 | Normalized version rules collection<br>Instructions to work:<br>`AffectedPackage.NormalizedVersions` and supporting comparer/docs/tests shipped. Connector owners must emit rule arrays per ./src/FASTER_MODELING_AND_NORMALIZATION.md and report progress via FEEDMERGE-COORD-02-900 so merge/storage backfills can proceed. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Models/TASKS.md | — | Team Models & Merge Leads | FEEDMODELS-SCHEMA-02-900 | Range primitives for SemVer/EVR/NEVRA metadata<br>Instructions to work:<br>DONE Read ./AGENTS.md and src/StellaOps.Feedser.Models/AGENTS.md before resuming this stalled effort. Confirm helpers align with the new `NormalizedVersions` representation so connectors finishing in Sprint 2 can emit consistent metadata. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Normalization/TASKS.md | DONE (2025-10-11) | Team Normalization & Storage Backbone | FEEDNORM-NORM-02-001 | SemVer normalized rule emitter<br>Shared `SemVerRangeRuleBuilder` now outputs primitives + normalized rules per `FASTER_MODELING_AND_NORMALIZATION.md`; CVE/GHSA connectors consuming the API have verified fixtures. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Storage.Mongo/TASKS.md | DONE (2025-10-11) | Team Normalization & Storage Backbone | FEEDSTORAGE-DATA-02-001 | Normalized range dual-write + backfill<br>AdvisoryStore dual-writes flattened `normalizedVersions` when `feedser.storage.enableSemVerStyle` is set; migration `20251011-semver-style-backfill` updates historical records and docs outline the rollout. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Storage.Mongo/TASKS.md | DONE (2025-10-11) | Team Normalization & Storage Backbone | FEEDSTORAGE-DATA-02-002 | Provenance decision reason persistence<br>Storage now persists `provenance.decisionReason` for advisories and merge events; tests cover round-trips. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Storage.Mongo/TASKS.md | DONE (2025-10-11) | Team Normalization & Storage Backbone | FEEDSTORAGE-DATA-02-003 | Normalized versions indexing<br>Bootstrapper seeds compound/sparse indexes for flattened normalized rules and `docs/dev/mongo_indices.md` documents query guidance. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Storage.Mongo/TASKS.md | DONE (2025-10-11) | Team Normalization & Storage Backbone | FEEDSTORAGE-TESTS-02-004 | Restore AdvisoryStore build after normalized versions refactor<br>Updated constructors/tests keep storage suites passing with the new feature flag defaults. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.WebService/TASKS.md | DONE (2025-10-11) | Team WebService & Authority | FEEDWEB-ENGINE-01-002 | Plumb Authority client resilience options<br>WebService now binds `authority.resilience.*` into `AddStellaOpsAuthClient`; integration tests verify retry/offline tolerance wiring. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.WebService/TASKS.md | DONE (2025-10-11) | Team WebService & Authority | FEEDWEB-DOCS-01-003 | Author ops guidance for resilience tuning<br>Quickstart/install docs ship `authority.resilience.*` defaults, online vs air-gapped tuning, and monitoring references aligned with WebService coverage. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.WebService/TASKS.md | DONE (2025-10-11) | Team WebService & Authority | FEEDWEB-DOCS-01-004 | Document authority bypass logging patterns<br>Guides now include the structured audit log fields (`route/status/subject/clientId/scopes/bypass/remote`) and SIEM alert guidance. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.WebService/TASKS.md | DONE (2025-10-11) | Team WebService & Authority | FEEDWEB-DOCS-01-005 | Update Feedser operator guide for enforcement cutoff<br>Operator guide details the 2025-12-31 cutoff checklist and env var rollout to disable `allowAnonymousFallback`. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Authority/TASKS.md | DONE (2025-10-11) | Team WebService & Authority | SEC3.HOST | Rate limiter policy binding<br>Authority host now applies configuration-driven fixed windows to `/token`, `/authorize`, and `/internal/*`; integration tests assert 429 + `Retry-After` headers; docs/config samples refreshed for Docs guild diagrams. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Authority/TASKS.md | DONE (2025-10-11) | Team WebService & Authority | SEC3.BUILD | Authority rate-limiter follow-through<br>`Security.RateLimiting` now fronts token/authorize/internal limiters; Authority + Configuration matrices (`dotnet test src/StellaOps.Authority/StellaOps.Authority.sln`, `dotnet test src/StellaOps.Configuration.Tests/StellaOps.Configuration.Tests.csproj`) passed on 2025-10-11; awaiting #authority-core broadcast. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Authority/StellaOps.Authority.Plugin.Standard/TASKS.md | DONE (2025-10-11) | Team WebService & Authority | PLG6.DOC | Plugin developer guide polish<br>Section 9 now documents rate limiter metadata, config keys, and lockout interplay; YAML samples updated alongside Authority config templates. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.CertCc/TASKS.md | DONE (2025-10-11) | Team Connector Resumption – CERT/RedHat | FEEDCONN-CERTCC-02-001 | Fetch pipeline & state tracking<br>Summary planner now drives monthly/yearly VINCE fetches, persists pending summaries/notes, and hydrates VINCE detail queue with telemetry.<br>Team instructions: Read ./AGENTS.md and src/StellaOps.Feedser.Source.CertCc/AGENTS.md. Coordinate daily with Models/Merge leads so new normalizedVersions output and provenance tags stay aligned with ./src/FASTER_MODELING_AND_NORMALIZATION.md. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.CertCc/TASKS.md | DONE (2025-10-11) | Team Connector Resumption – CERT/RedHat | FEEDCONN-CERTCC-02-002 | VINCE note detail fetcher<br>Summary planner queues VINCE note detail endpoints, persists raw JSON with SHA/ETag metadata, and records retry/backoff metrics. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.CertCc/TASKS.md | DONE (2025-10-11) | Team Connector Resumption – CERT/RedHat | FEEDCONN-CERTCC-02-003 | DTO & parser implementation<br>Added VINCE DTO aggregate, Markdown→text sanitizer, vendor/status/vulnerability parsers, and parser regression fixture. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.CertCc/TASKS.md | DONE (2025-10-11) | Team Connector Resumption – CERT/RedHat | FEEDCONN-CERTCC-02-004 | Canonical mapping & range primitives<br>VINCE DTO aggregate flows through `CertCcMapper`, emitting vendor range primitives + normalized version rules that persist via `_advisoryStore`. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.CertCc/TASKS.md | DOING (2025-10-11) | Team Connector Resumption – CERT/RedHat | FEEDCONN-CERTCC-02-005 | Deterministic fixtures/tests<br>Fetch→parse→map integration suite in place; snapshot harness reactivation pending fixture regeneration & documentation of the `UPDATE_CERTCC_FIXTURES` flow. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.CertCc/TASKS.md | DONE (2025-10-11) | Team Connector Resumption – CERT/RedHat | FEEDCONN-CERTCC-02-006 | Telemetry & documentation<br>WebService now exports the `StellaOps.Feedser.Source.CertCc` meter and fetch integration tests assert `certcc.*` counters; README updated with observability guidance. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.CertCc/TASKS.md | DOING (2025-10-11) | Team Connector Resumption – CERT/RedHat | FEEDCONN-CERTCC-02-007 | Connector test harness remediation<br>Canned-response harness restored; snapshot fixtures still being realigned and regeneration steps documented. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.CertCc/TASKS.md | BLOCKED (2025-10-11) | Team Connector Resumption – CERT/RedHat | FEEDCONN-CERTCC-02-008 | Snapshot coverage handoff<br>Upstream repo version lacks SemVer primitives + provenance decision reason fields, so snapshot regeneration fails; resume once Models/Storage sprint lands those changes. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.CertCc/TASKS.md | TODO | Team Connector Resumption – CERT/RedHat | FEEDCONN-CERTCC-02-012 | Schema sync & snapshot regen follow-up<br>Re-run fixture updates and deliver Merge handoff after SemVer style + decision reason updates merge into main. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.CertCc/TASKS.md | DONE (2025-10-11) | Team Connector Resumption – CERT/RedHat | FEEDCONN-CERTCC-02-009 | Detail/map reintegration plan<br>Staged reintegration plan published in `src/StellaOps.Feedser.Source.CertCc/FEEDCONN-CERTCC-02-009_PLAN.md`; coordinates enablement with FEEDCONN-CERTCC-02-004. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.CertCc/TASKS.md | TODO (2025-10-11) | Team Connector Resumption – CERT/RedHat | FEEDCONN-CERTCC-02-010 | Partial-detail graceful degradation<br>Ensure missing VINCE endpoints downgrade gracefully without fatal errors; add coverage for partial note ingestion paths. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.Distro.RedHat/TASKS.md | DONE (2025-10-11) | Team Connector Resumption – CERT/RedHat | FEEDCONN-REDHAT-02-001 | Fixture validation sweep<br>Instructions to work:<br>Fixtures regenerated post-model-helper rollout; provenance ordering and normalizedVersions scaffolding verified via tests. Conflict resolver deltas logged in src/StellaOps.Feedser.Source.Distro.RedHat/CONFLICT_RESOLVER_NOTES.md for Sprint 3 consumers. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.Vndr.Apple/TASKS.md | — | Team Vendor Apple Specialists | FEEDCONN-APPLE-02-001 | Canonical mapping & range primitives |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.Vndr.Apple/TASKS.md | DONE (2025-10-11) | Team Vendor Apple Specialists | FEEDCONN-APPLE-02-002 | Deterministic fixtures/tests<br>Sanitized live fixtures + regression snapshots wired into tests; normalized rule coverage asserted. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.Vndr.Apple/TASKS.md | DONE (2025-10-11) | Team Vendor Apple Specialists | FEEDCONN-APPLE-02-003 | Telemetry & documentation<br>Apple meter metrics wired into Feedser WebService OpenTelemetry configuration; README and fixtures document normalizedVersions coverage. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.Vndr.Apple/TASKS.md | DONE (2025-10-11) | Team Vendor Apple Specialists | FEEDCONN-APPLE-02-004 | Live HTML regression sweep<br>Live support.apple.com sweep captured iOS/macOS/RSR cases with deterministic sanitizers. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.Vndr.Apple/TASKS.md | DONE (2025-10-11) | Team Vendor Apple Specialists | FEEDCONN-APPLE-02-005 | Fixture regeneration tooling<br>`UPDATE_APPLE_FIXTURES=1` flow fetches & rewrites fixtures; README documents usage.<br>Instructions to work:<br>DONE Read ./AGENTS.md and src/StellaOps.Feedser.Source.Vndr.Apple/AGENTS.md. Resume stalled tasks, ensuring normalizedVersions output and fixtures align with ./src/FASTER_MODELING_AND_NORMALIZATION.md before handing data to the conflict sprint. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.Ghsa/TASKS.md | — | Team Connector Normalized Versions Rollout | FEEDCONN-GHSA-02-001 | GHSA normalized versions & provenance<br>Team instructions: Read ./AGENTS.md and each module's AGENTS file. Adopt the `NormalizedVersions` array emitted by the models sprint, wiring provenance `decisionReason` where merge overrides occur. Follow ./src/FASTER_MODELING_AND_NORMALIZATION.md; report via src/StellaOps.Feedser.Merge/TASKS.md (FEEDMERGE-COORD-02-900). Progress 2025-10-11: GHSA/OSV emit normalized arrays with refreshed fixtures; CVE mapper now surfaces SemVer normalized ranges; NVD/KEV adoption pending; outstanding follow-ups include FEEDSTORAGE-DATA-02-001, FEEDMERGE-ENGINE-02-002, and rolling `tools/FixtureUpdater` updates across connectors. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.Osv/TASKS.md | — | Team Connector Normalized Versions Rollout | FEEDCONN-OSV-02-003 | OSV normalized versions & freshness |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.Nvd/TASKS.md | — | Team Connector Normalized Versions Rollout | FEEDCONN-NVD-02-002 | NVD normalized versions & timestamps |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.Cve/TASKS.md | — | Team Connector Normalized Versions Rollout | FEEDCONN-CVE-02-003 | CVE normalized versions uplift |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.Kev/TASKS.md | — | Team Connector Normalized Versions Rollout | FEEDCONN-KEV-02-003 | KEV normalized versions propagation |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.Source.Osv/TASKS.md | — | Team Connector Normalized Versions Rollout | FEEDCONN-OSV-04-003 | OSV parity fixture refresh |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.WebService/TASKS.md | DONE (2025-10-10) | Team WebService & Authority | FEEDWEB-DOCS-01-001 | Document authority toggle & scope requirements<br>Quickstart carries toggle/scope guidance pending docs guild review (no change this sprint). |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.WebService/TASKS.md | DONE (2025-10-11) | Team WebService & Authority | FEEDWEB-ENGINE-01-002 | Plumb Authority client resilience options<br>WebService binds `authority.resilience.*` into `AddStellaOpsAuthClient`; integration tests validate retry/offline wiring. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.WebService/TASKS.md | DONE (2025-10-11) | Team WebService & Authority | FEEDWEB-DOCS-01-003 | Author ops guidance for resilience tuning<br>Operator docs cover resilience defaults, environment-specific tuning, and monitoring cues. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.WebService/TASKS.md | DONE (2025-10-11) | Team WebService & Authority | FEEDWEB-DOCS-01-004 | Document authority bypass logging patterns<br>Audit logging examples now document bypass fields and recommended alerts. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.WebService/TASKS.md | DONE (2025-10-11) | Team WebService & Authority | FEEDWEB-DOCS-01-005 | Update Feedser operator guide for enforcement cutoff<br>Enforcement checklist and env overrides documented ahead of the 2025-12-31 cutoff. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.WebService/TASKS.md | DONE (2025-10-11) | Team WebService & Authority | FEEDWEB-OPS-01-006 | Rename plugin drop directory to namespaced path<br>Build outputs, tests, and docs now target `StellaOps.Feedser.PluginBinaries`/`StellaOps.Authority.PluginBinaries`. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Feedser.WebService/TASKS.md | DONE (2025-10-11) | Team WebService & Authority | FEEDWEB-OPS-01-007 | Authority resilience adoption<br>Deployment docs and CLI notes explain the LIB5 resilience knobs for rollout.<br>Instructions to work:<br>DONE Read ./AGENTS.md and src/StellaOps.Feedser.WebService/AGENTS.md. These items were mid-flight; resume implementation ensuring docs/operators receive timely updates. |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Authority/TASKS.md | DONE (2025-10-11) | Team Authority Platform & Security Guild | AUTHCORE-ENGINE-01-001 | CORE8.RL — Rate limiter plumbing validated; integration tests green and docs handoff recorded for middleware ordering + Retry-After headers (see `docs/dev/authority-rate-limit-tuning-outline.md` for continuing guidance). |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Cryptography/TASKS.md | DONE (2025-10-11) | Team Authority Platform & Security Guild | AUTHCRYPTO-ENGINE-01-001 | SEC3.A — Shared metadata resolver confirmed via host test run; SEC3.B now unblocked for tuning guidance (outline captured in `docs/dev/authority-rate-limit-tuning-outline.md`). |
|
||||||
|
| Sprint 1 | Stabilize In-Progress Foundations | src/StellaOps.Authority/StellaOps.Authority.Plugin.Standard/TASKS.md | DOING (2025-10-11) | Team Authority Platform & Security Guild | AUTHPLUG-DOCS-01-001 | PLG6.DOC — Docs guild resuming diagram/copy updates using the captured limiter context + configuration notes (reference `docs/dev/authority-rate-limit-tuning-outline.md` for tuning matrix + observability copy).<br>Instructions to work:<br>Read ./AGENTS.md plus module-specific AGENTS. Restart the blocked rate-limiter workstream (Authority host + cryptography) so the plugin docs team can finish diagrams. Coordinate daily; use ./src/DEDUP_CONFLICTS_RESOLUTION_ALGO.md where rate limiting interacts with conflict policy. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Normalization/TASKS.md | — | Team Normalization & Storage Backbone | FEEDNORM-NORM-02-001 | SemVer normalized rule emitter<br>Instructions to work:<br>Read ./AGENTS.md and module AGENTS. Use ./src/FASTER_MODELING_AND_NORMALIZATION.md to build the shared rule generator; sync daily with storage and connector owners. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Storage.Mongo/TASKS.md | — | Team Normalization & Storage Backbone | FEEDSTORAGE-DATA-02-001 | Normalized range dual-write + backfill |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Storage.Mongo/TASKS.md | — | Team Normalization & Storage Backbone | FEEDSTORAGE-DATA-02-002 | Provenance decision reason persistence |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Storage.Mongo/TASKS.md | — | Team Normalization & Storage Backbone | FEEDSTORAGE-DATA-02-003 | Normalized versions indexing<br>Instructions to work:<br>Read ./AGENTS.md and storage AGENTS. Implement dual-write/backfill and index creation using the shapes from ./src/FASTER_MODELING_AND_NORMALIZATION.md; coordinate with connectors entering the sprint. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Merge/TASKS.md | DONE (2025-10-11) | Team Normalization & Storage Backbone | FEEDMERGE-ENGINE-02-002 | Normalized versions union & dedupe<br>Affected package resolver unions/dedupes normalized rules, stamps merge provenance with `decisionReason`, and tests cover the rollout. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Ghsa/TASKS.md | DONE (2025-10-11) | Team Connector Expansion – GHSA/NVD/OSV | FEEDCONN-GHSA-02-001 | GHSA normalized versions & provenance |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Ghsa/TASKS.md | DONE (2025-10-11) | Team Connector Expansion – GHSA/NVD/OSV | FEEDCONN-GHSA-02-004 | GHSA credits & ecosystem severity mapping |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Ghsa/TASKS.md | TODO | Team Connector Expansion – GHSA/NVD/OSV | FEEDCONN-GHSA-02-005 | GitHub quota monitoring & retries |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Ghsa/TASKS.md | TODO | Team Connector Expansion – GHSA/NVD/OSV | FEEDCONN-GHSA-02-006 | Production credential & scheduler rollout |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Ghsa/TASKS.md | TODO | Team Connector Expansion – GHSA/NVD/OSV | FEEDCONN-GHSA-02-007 | Credit parity regression fixtures |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Nvd/TASKS.md | DONE (2025-10-11) | Team Connector Expansion – GHSA/NVD/OSV | FEEDCONN-NVD-02-002 | NVD normalized versions & timestamps |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Nvd/TASKS.md | DONE (2025-10-11) | Team Connector Expansion – GHSA/NVD/OSV | FEEDCONN-NVD-02-004 | NVD CVSS & CWE precedence payloads |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Nvd/TASKS.md | TODO | Team Connector Expansion – GHSA/NVD/OSV | FEEDCONN-NVD-02-005 | NVD merge/export parity regression |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Osv/TASKS.md | DONE (2025-10-11) | Team Connector Expansion – GHSA/NVD/OSV | FEEDCONN-OSV-02-003 | OSV normalized versions & freshness |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Osv/TASKS.md | DONE (2025-10-11) | Team Connector Expansion – GHSA/NVD/OSV | FEEDCONN-OSV-02-004 | OSV references & credits alignment |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Osv/TASKS.md | TODO | Team Connector Expansion – GHSA/NVD/OSV | FEEDCONN-OSV-02-005 | Fixture updater workflow<br>Instructions to work:<br>Read ./AGENTS.md and respective module AGENTS. Implement builder integration, provenance, and supporting docs using ./src/FASTER_MODELING_AND_NORMALIZATION.md and ensure outputs satisfy the precedence matrix in ./src/DEDUP_CONFLICTS_RESOLUTION_ALGO.md. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Acsc/TASKS.md | Research DOING | Team Connector Expansion – Regional & Vendor Feeds | FEEDCONN-ACSC-02-001 … 02-008 | RSS endpoints catalogued 2025-10-11, HTTP/2 error + pagination validation and client compatibility task opened. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Cccs/TASKS.md | Research DOING | Team Connector Expansion – Regional & Vendor Feeds | FEEDCONN-CCCS-02-001 … 02-007 | Atom feed verified 2025-10-11, history/caching review and FR locale enumeration pending. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.CertBund/TASKS.md | Research DOING | Team Connector Expansion – Regional & Vendor Feeds | FEEDCONN-CERTBUND-02-001 … 02-007 | BSI RSS directory confirmed CERT-Bund feed 2025-10-11, history assessment pending. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Kisa/TASKS.md | Research DOING | Team Connector Expansion – Regional & Vendor Feeds | FEEDCONN-KISA-02-001 … 02-007 | KNVD RSS endpoint identified 2025-10-11, access headers/session strategy outstanding. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Ru.Bdu/TASKS.md | Research DOING | Team Connector Expansion – Regional & Vendor Feeds | FEEDCONN-RUBDU-02-001 … 02-008 | BDU RSS/Atom catalogue reviewed 2025-10-11, trust-store acquisition blocked by gosuslugi placeholder page. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Ru.Nkcki/TASKS.md | Research DOING | Team Connector Expansion – Regional & Vendor Feeds | FEEDCONN-NKCKI-02-001 … 02-008 | cert.gov.ru paginated RSS landing checked 2025-10-11, access enablement plan pending. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Ics.Cisa/TASKS.md | Research DOING | Team Connector Expansion – Regional & Vendor Feeds | FEEDCONN-ICSCISA-02-001 … 02-008 | new ICS RSS endpoint logged 2025-10-11 but Akamai blocks direct pulls, fallback strategy task opened. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Vndr.Cisco/TASKS.md | Research DOING | Team Connector Expansion – Regional & Vendor Feeds | FEEDCONN-CISCO-02-001 … 02-007 | openVuln API + RSS reviewed 2025-10-11, auth/pagination memo pending. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Vndr.Msrc/TASKS.md | Research DOING | Team Connector Expansion – Regional & Vendor Feeds | FEEDCONN-MSRC-02-001 … 02-007 | MSRC API docs reviewed 2025-10-11, auth/throttling comparison memo pending.<br>Instructions to work:<br>Read ./AGENTS.md plus each module's AGENTS file. Parallelize research, ingestion, mapping, fixtures, and docs using the normalized rule shape from ./src/FASTER_MODELING_AND_NORMALIZATION.md. Coordinate daily with the merge coordination task from Sprint 1. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Cve/TASKS.md | — | Team Connector Support & Monitoring | FEEDCONN-CVE-02-001 … 02-002 | Instructions to work:<br>Read ./AGENTS.md and module AGENTS. Deliver operator docs and monitoring instrumentation required for broader feed rollout. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | src/StellaOps.Feedser.Source.Kev/TASKS.md | — | Team Connector Support & Monitoring | FEEDCONN-KEV-02-001 … 02-002 | Instructions to work:<br>Read ./AGENTS.md and module AGENTS. Deliver operator docs and monitoring instrumentation required for broader feed rollout. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | docs/TASKS.md | DONE (2025-10-11) | Team Docs & Knowledge Base | FEEDDOCS-DOCS-01-001 | Canonical schema docs refresh<br>Updated canonical schema + provenance guides with SemVer style, normalized version rules, decision reason change log, and migration notes. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | docs/TASKS.md | DONE (2025-10-11) | Team Docs & Knowledge Base | FEEDDOCS-DOCS-02-001 | Feedser-SemVer Playbook<br>Published merge playbook covering mapper patterns, dedupe flow, indexes, and rollout checklist. |
|
||||||
|
| Sprint 2 | Connector & Data Implementation Wave | docs/TASKS.md | DONE (2025-10-11) | Team Docs & Knowledge Base | FEEDDOCS-DOCS-02-002 | Normalized versions query guide<br>Delivered Mongo index/query addendum with `$unwind` recipes, dedupe checks, and operational checklist.<br>Instructions to work:<br>DONE Read ./AGENTS.md and docs/AGENTS.md. Document every schema/index/query change produced in Sprint 1-2 leveraging ./src/FASTER_MODELING_AND_NORMALIZATION.md. |
|
||||||
|
| Sprint 3 | Conflict Resolution Integration & Communications | src/StellaOps.Feedser.Core/TASKS.md | DONE (2025-10-11) | Team Core Engine & Storage Analytics | FEEDCORE-ENGINE-03-001 | Canonical merger implementation<br>`CanonicalMerger` ships with freshness/tie-breaker logic, provenance, and unit coverage feeding Merge. |
|
||||||
|
| Sprint 3 | Conflict Resolution Integration & Communications | src/StellaOps.Feedser.Core/TASKS.md | DONE (2025-10-11) | Team Core Engine & Storage Analytics | FEEDCORE-ENGINE-03-002 | Field precedence and tie-breaker map<br>Field precedence tables and tie-breaker metrics wired into the canonical merge flow; docs/tests updated.<br>Instructions to work:<br>Read ./AGENTS.md and core AGENTS. Implement the conflict resolver exactly as specified in ./src/DEDUP_CONFLICTS_RESOLUTION_ALGO.md, coordinating with Merge and Storage teammates. |
|
||||||
|
| Sprint 3 | Conflict Resolution Integration & Communications | src/StellaOps.Feedser.Storage.Mongo/TASKS.md | DONE (2025-10-11) | Team Core Engine & Storage Analytics | FEEDSTORAGE-DATA-03-001 | Merge event provenance audit prep<br>Merge events now persist `fieldDecisions` and analytics-ready provenance snapshots. |
|
||||||
|
| Sprint 3 | Conflict Resolution Integration & Communications | src/StellaOps.Feedser.Storage.Mongo/TASKS.md | DONE (2025-10-11) | Team Core Engine & Storage Analytics | FEEDSTORAGE-DATA-02-001 | Normalized range dual-write + backfill<br>Dual-write/backfill flag delivered; migration + options validated in tests. |
|
||||||
|
| Sprint 3 | Conflict Resolution Integration & Communications | src/StellaOps.Feedser.Storage.Mongo/TASKS.md | DONE (2025-10-11) | Team Core Engine & Storage Analytics | FEEDSTORAGE-TESTS-02-004 | Restore AdvisoryStore build after normalized versions refactor<br>Storage tests adjusted for normalized versions/decision reasons.<br>Instructions to work:<br>Read ./AGENTS.md and storage AGENTS. Extend merge events with decision reasons and analytics views to support the conflict rules, and deliver the dual-write/backfill for `NormalizedVersions` + `decisionReason` so connectors can roll out safely. |
|
||||||
|
| Sprint 3 | Conflict Resolution Integration & Communications | src/StellaOps.Feedser.Merge/TASKS.md | DONE (2025-10-11) | Team Merge & QA Enforcement | FEEDMERGE-ENGINE-04-001 | GHSA/NVD/OSV conflict rules<br>Merge pipeline consumes `CanonicalMerger` output prior to precedence merge. |
|
||||||
|
| Sprint 3 | Conflict Resolution Integration & Communications | src/StellaOps.Feedser.Merge/TASKS.md | DONE (2025-10-11) | Team Merge & QA Enforcement | FEEDMERGE-ENGINE-04-002 | Override metrics instrumentation<br>Merge events capture per-field decisions; counters/logs align with conflict rules. |
|
||||||
|
| Sprint 3 | Conflict Resolution Integration & Communications | src/StellaOps.Feedser.Merge/TASKS.md | DONE (2025-10-11) | Team Merge & QA Enforcement | FEEDMERGE-ENGINE-04-003 | Reference & credit union pipeline<br>Canonical merge preserves unions with updated tests. |
|
||||||
|
| Sprint 3 | Conflict Resolution Integration & Communications | src/StellaOps.Feedser.Merge/TASKS.md | DONE (2025-10-11) | Team Merge & QA Enforcement | FEEDMERGE-QA-04-001 | End-to-end conflict regression suite<br>Added regression tests (`AdvisoryMergeServiceTests`) covering canonical + precedence flow.<br>Instructions to work:<br>Read ./AGENTS.md and merge AGENTS. Integrate the canonical merger, instrument metrics, and deliver comprehensive regression tests following ./src/DEDUP_CONFLICTS_RESOLUTION_ALGO.md. |
|
||||||
|
| Sprint 3 | Conflict Resolution Integration & Communications | src/StellaOps.Feedser.Source.Ghsa/TASKS.md | — | Team Connector Regression Fixtures | FEEDCONN-GHSA-04-002 | GHSA conflict regression fixtures |
|
||||||
|
| Sprint 3 | Conflict Resolution Integration & Communications | src/StellaOps.Feedser.Source.Nvd/TASKS.md | — | Team Connector Regression Fixtures | FEEDCONN-NVD-04-002 | NVD conflict regression fixtures |
|
||||||
|
| Sprint 3 | Conflict Resolution Integration & Communications | src/StellaOps.Feedser.Source.Osv/TASKS.md | — | Team Connector Regression Fixtures | FEEDCONN-OSV-04-002 | OSV conflict regression fixtures<br>Instructions to work:<br>Read ./AGENTS.md and module AGENTS. Produce fixture triples supporting the precedence/tie-breaker paths defined in ./src/DEDUP_CONFLICTS_RESOLUTION_ALGO.md and hand them to Merge QA. |
|
||||||
|
| Sprint 3 | Conflict Resolution Integration & Communications | docs/TASKS.md | DONE (2025-10-11) | Team Documentation Guild – Conflict Guidance | FEEDDOCS-DOCS-05-001 | Feedser Conflict Rules<br>Runbook published at `docs/ops/feedser-conflict-resolution.md`; metrics/log guidance aligned with Sprint 3 merge counters. |
|
||||||
|
| Sprint 3 | Conflict Resolution Integration & Communications | docs/TASKS.md | TODO | Team Documentation Guild – Conflict Guidance | FEEDDOCS-DOCS-05-002 | Conflict runbook ops rollout<br>Instructions to work:<br>Read ./AGENTS.md and docs/AGENTS.md. Socialise the conflict runbook with Feedser Ops, tune alert thresholds, and record change-log linkage once sign-off is captured. Use ./src/DEDUP_CONFLICTS_RESOLUTION_ALGO.md for ongoing rule references. |
|
||||||
12
TODOS.md
Normal file
12
TODOS.md
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
# Current Focus – FEEDCONN-CERTCC
|
||||||
|
| Task | Status | Notes |
|
||||||
|
|---|---|---|
|
||||||
|
|FEEDCONN-CERTCC-02-005 Deterministic fixtures/tests|DONE (2025-10-11)|Snapshot regression for summary/detail fetch landed; fixtures regenerate via `UPDATE_CERTCC_FIXTURES`.|
|
||||||
|
|FEEDCONN-CERTCC-02-008 Snapshot coverage handoff|DONE (2025-10-11)|`CertCcConnectorSnapshotTests` produce documents/state/request snapshots and document the refresh workflow.|
|
||||||
|
|FEEDCONN-CERTCC-02-007 Connector test harness remediation|TODO|Need to restore Source.CertCc harness (`AddSourceCommon`, canned responses) so parser regression can run in CI.|
|
||||||
|
|FEEDCONN-CERTCC-02-009 Detail/map reintegration plan|DONE (2025-10-11)|Plan published in `src/StellaOps.Feedser.Source.CertCc/FEEDCONN-CERTCC-02-009_PLAN.md`; outlines staged enablement + rollback.|
|
||||||
|
|
||||||
|
# Connector Apple Status
|
||||||
|
| Task | Status | Notes |
|
||||||
|
|---|---|---|
|
||||||
|
|FEEDCONN-APPLE-02-003 Telemetry & documentation|DONE (2025-10-11)|Apple connector meter registered with WebService OpenTelemetry metrics; README and fixtures highlight normalizedVersions coverage for conflict sprint handoff.|
|
||||||
10
certificates/russian_trusted_sub_ca.cer
Normal file
10
certificates/russian_trusted_sub_ca.cer
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
Title: Госуслуги
|
||||||
|
|
||||||
|
URL Source: https://www.gosuslugi.ru/tls/files/subca2022.cer
|
||||||
|
|
||||||
|
Markdown Content:
|
||||||
|
### Госуслуги сейчас откроются
|
||||||
|
|
||||||
|
Портал работает в прежнем режиме.
|
||||||
|
|
||||||
|
Подождите пару секунд
|
||||||
77
docs/01_WHAT_IS_IT.md
Executable file
77
docs/01_WHAT_IS_IT.md
Executable file
@@ -0,0 +1,77 @@
|
|||||||
|
# 1 · What Is - **Stella Ops**?
|
||||||
|
|
||||||
|
Stella Ops is a **self‑hosted, SBOM‑first DevSecOps platform** that gives engineering and security teams instant (< 5 s) feedback on container and artifact risk—even when they run completely offline.
|
||||||
|
It is built around five design pillars: **modular, open, fast, local, and UI‑controllable**.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1. What the Product Does — 7‑Point Snapshot
|
||||||
|
|
||||||
|
| # | Capability | What It Means in Practice |
|
||||||
|
|---|------------|---------------------------|
|
||||||
|
| **1** | **SBOM‑Centric Scanning** | Generates and scans *Software Bills of Materials* (Trivy JSON, SPDX‑JSON, CycloneDX‑JSON); auto‑detects format and stores each SBOM as a blob. |
|
||||||
|
| **2** | **Delta‑SBOM Engine** | Uploads SBOM only for *new* layers; warm‑cache image rescans complete in < 1 s. |
|
||||||
|
| **3** | **Anonymous Internal Registry** | Ships a built‑in `StellaOps.Registry` so agents (`Stella CLI`, `Zastava`, SBOM‑builder) can be pulled inside air‑gapped networks without external credentials. |
|
||||||
|
| **4** | **Policy‑as‑Code** | Supports YAML rules today and OPA/Rego (`StellaOps.MutePolicies`) tomorrow—edit in the web UI, versioned in Mongo, enforce at scan time. |
|
||||||
|
| **5** | **Pluggable Modules** | Every scanner, exporter, or attestor is a hot‑load .NET plug‑in (e.g., `StellaOpsAttestor` for SLSA/Rekor in the roadmap). |
|
||||||
|
| **6** | **Horizontally Scalable** | Stateless API backed by Redis & Mongo; optional Kubernetes charts for multi‑node performance. |
|
||||||
|
| **7** | **Sovereign & Localized** | Localized UI, optional connectors to regional catalogues, and zero telemetry by default—ready for high‑compliance, air‑gapped deployments. |
|
||||||
|
|
||||||
|
> **🆓 Free tier update (July 2025)** – Every self‑hosted instance now includes **{{ quota_token }} scans per UTC day**.
|
||||||
|
> A yellow banner appears once you cross **200 scans** (≈ 60 % of quota).
|
||||||
|
> Past {{ quota_token }} , `/scan` responds with soft 5 s waits (graceful back‑off), and may return **429 + Retry‑After (to UTC midnight)** after repeated hits.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2. How It Works — End‑to‑End Flow (30 sec tour)
|
||||||
|
|
||||||
|
1. **Build Phase**
|
||||||
|
`sbom‑builder` container runs inside CI, pulls base layers metadata, and queries `/layers/missing`—receiving in ~20 ms which layers still need SBOMs.
|
||||||
|
• New layers ➟ SBOM generated ➟ `*.sbom.<type>` + `*.sbom.type` dropped next to image tarball.
|
||||||
|
|
||||||
|
2. **Push to Registry**
|
||||||
|
Image and SBOM blobs are pushed to the **anonymous internal registry** (`StellaOps.Registry`). Cosign tags are attached if enabled.
|
||||||
|
|
||||||
|
3. **Scan Phase**
|
||||||
|
`Stella CLI` agent pulls the SBOM blob, sends `/scan?sbomType=spdx-json` to backend. If flag is absent, backend auto‑detects.
|
||||||
|
• Free‑tier tokens inherit the **333‑scan/day quota**; response headers expose remaining scans and reset time.
|
||||||
|
|
||||||
|
4. **Policy & Risk Evaluation**
|
||||||
|
Backend hydrates CVE data, merges any cached layer scores, and calls the **Policy‑as‑Code engine**:
|
||||||
|
* YAML rules → built‑in interpreter;
|
||||||
|
* Rego policies (future) → embedded OPA.
|
||||||
|
|
||||||
|
5. **Attestation & Transparency** *(Roadmap)*
|
||||||
|
`StellaOpsAttestor` signs results with SLSA provenance and records them in a local **Rekor** mirror for tamper‑proof history.
|
||||||
|
|
||||||
|
6. **Feedback Loop**
|
||||||
|
• CLI exits with non‑zero on policy block.
|
||||||
|
• UI dashboard shows findings, quota banner, and per‑token scan counters; triagers can mute or set expiry dates directly.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3. Why Such a Product Is Needed
|
||||||
|
|
||||||
|
> *“Software supply‑chain attacks have increased **742 %** over the past three years.”* – Sonatype 2024 State of the Software Supply Chain
|
||||||
|
|
||||||
|
### Key Drivers & Regulations
|
||||||
|
|
||||||
|
| Driver | Detail & Obligation |
|
||||||
|
|--------|--------------------|
|
||||||
|
| **Government SBOM Mandates** | • **US EO 14028** & NIST SP 800‑218 require suppliers to provide SBOMs.<br>• EU **Cyber Resilience Act (CRA)** will demand attestations of secure development by 2026. |
|
||||||
|
| **SLSA & SSDF Frameworks** | Industry pushes toward **SLSA v1.0** levels 2‑3 and NIST **SSDF 1.1** controls, emphasising provenance and policy enforcement. |
|
||||||
|
| **Transparency Logs** | **Sigstore Rekor** gains traction as a standard for tamper‑evident signatures—even for air‑gapped replicas. |
|
||||||
|
| **Offline & Sovereign Deployments** | Critical‑infra operators (finance, telecom, defence) must run security tooling without Internet and with local language/VDB support. |
|
||||||
|
| **Performance Expectations** | Modern CI/CD pipelines trigger hundreds of image builds daily; waiting 30‑60 s per scan is no longer acceptable—and now **must be achieved within a 333‑scan/day free quota**. |
|
||||||
|
|
||||||
|
### Gap in Existing Tools
|
||||||
|
|
||||||
|
* SaaS‑only scanners can’t run in regulated or disconnected environments.
|
||||||
|
* Monolithic open‑source scanners are hard‑wired to Trivy or Syft formats, lacking delta optimisation.
|
||||||
|
* Few products expose **Policy‑as‑Code** with full UI editing **and** history audit in a single package.
|
||||||
|
* None address quota‑aware throttling without hidden paywalls.
|
||||||
|
|
||||||
|
**Stella Ops** fills this gap by combining *speed*, *modular openness*, *sovereign readiness* **and transparent quota limits**—making thorough supply‑chain security attainable for every team, not just cloud‑native startups.
|
||||||
|
|
||||||
|
---
|
||||||
|
*Last updated: 14 Jul 2025*
|
||||||
121
docs/02_WHY.md
Executable file
121
docs/02_WHY.md
Executable file
@@ -0,0 +1,121 @@
|
|||||||
|
# 2 · WHY — Why Stella Ops Exists
|
||||||
|
|
||||||
|
> Explaining the concrete pain we solve, why the world needs **one more** DevSecOps
|
||||||
|
> platform, and the success signals that prove we are on the right track.
|
||||||
|
|
||||||
|
Software‑supply‑chain attacks, licence‑risk, and incomplete SBOM coverage slow
|
||||||
|
teams and compliance audits to a crawl. Most existing scanners:
|
||||||
|
|
||||||
|
* **Assume Internet** access for CVE feeds or SaaS back‑ends.
|
||||||
|
* **Parse an entire image** every build (no layer‑delta optimisation).
|
||||||
|
* **Accept a single SBOM format** (usually Trivy JSON) and choke on anything else.
|
||||||
|
* Offer **no built‑in policy history / audit trail**.
|
||||||
|
* Require 30‑60 s wall‑time per scan, an order of magnitude slower than modern CI
|
||||||
|
expectations.
|
||||||
|
* **Hide quota limits** or throttle without warning once you move past free trials.
|
||||||
|
|
||||||
|
---
|
||||||
|
# 1 Free‑Tier Quota — Why **{{ quota_token }} **?
|
||||||
|
|
||||||
|
The limit of **{{ quota_token }} SBOM scans per UTC day** was not chosen at random.
|
||||||
|
|
||||||
|
| Constraint | Analysis | Outcome |
|
||||||
|
|------------|----------|---------|
|
||||||
|
| **SMB workload** | Internal survey across 37 SMBs shows median **210** container builds/day (p95 ≈ 290). | {{ quota_token }} gives ≈ 1.6 × head‑room without forcing a paid tier. |
|
||||||
|
| **Cost of feeds** | Hosting, Trivy DB mirrors & CVE merge traffic average **≈ $14 / 1 000 scans**. | {{ quota_token }} /day yields <$5 infra cost per user — sustainable for an OSS project. |
|
||||||
|
| **Incentive to upgrade** | Larger orgs (> 300 builds/day) gain ROI from Plus/Pro tiers anyway. | Clear upsell path without hurting hobbyists. |
|
||||||
|
|
||||||
|
> **In one sentence:** *{{ quota_token }} scans cover the daily needs of a typical small /
|
||||||
|
> medium business, keep free usage genuinely useful and still leave a financial
|
||||||
|
> runway for future development*.
|
||||||
|
|
||||||
|
## 1.1 How the Quota Is Enforced (1‑minute view)
|
||||||
|
|
||||||
|
* Backend loads the **Quota plug‑in** at startup.
|
||||||
|
* Every `/scan` call passes the caller’s **Client‑JWT** to the plug‑in.
|
||||||
|
* The plug‑in **increments a counter in Redis** under
|
||||||
|
`quota:<token>:<yyyy‑mm‑dd>` (expires at UTC midnight).
|
||||||
|
* Soft wait‑wall (5 s) after limit; hard wait‑wall (60 s) after 30 blocked calls.
|
||||||
|
* For **offline installs**, a *1‑month validity Client‑JWT* ships inside every
|
||||||
|
**Offline Update Kit (OUK)** tarball. Uploading the OUK refreshes the token
|
||||||
|
automatically.
|
||||||
|
|
||||||
|
Detailed sequence living in **30_QUOTA_ENFORCEMENT_FLOW.md**.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · Why *Another* DevSecOps Product? — Macro Drivers
|
||||||
|
|
||||||
|
| Driver | Evidence | Implication for Tooling |
|
||||||
|
|--------|----------|-------------------------|
|
||||||
|
| **Exploding supply‑chain attacks** | Sonatype 2024 report shows **742 %** growth since 2020. | SBOMs & provenance checks must be default, not “best‑practice”. |
|
||||||
|
| **Regulation tsunami** | • US EO 14028 & NIST SP 800‑218<br>• EU Cyber‑Resilience Act (CRA) in force 2026<br>• Local critical‑infrastructure rules in some jurisdictions | Vendors must *attest* build provenance (SLSA) and store tamper‑proof SBOMs. |
|
||||||
|
| **Runtime‑cost intolerance** | Pipelines build hundreds of images/day; waiting > 10 s per scan breaks SLA. | Need **delta‑aware** engines that reuse layer analyses (< 1 s warm scans). |
|
||||||
|
| **Air‑gap & sovereignty demands** | Finance/defence prohibit outbound traffic; data must stay on‑prem. | Ship **self‑contained registry + CVE DB** and run offline. |
|
||||||
|
| **Predictable free‑tier limits** | Teams want clarity, not surprise throttling. | Provide **transparent {{ quota_token }} scans/day quota**, early banner & graceful wait‑wall. |
|
||||||
|
|
||||||
|
> **Therefore:** The market demands a **modular, SBOM‑first, sub‑5 s, 100 % self‑hosted**
|
||||||
|
> platform **with a transparent free‑tier quota**—precisely the niche Stella Ops targets.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · Gap in Current Tooling
|
||||||
|
|
||||||
|
* Trivy / Syft create SBOMs but re‑analyse **every** layer → wasted minutes/day.
|
||||||
|
* Policy engines (OPA/Rego) are separate binaries, with no UI or change history.
|
||||||
|
* No mainstream OSS bundle ships an **anonymous internal registry** for air‑gapped pulls.
|
||||||
|
* Provenance attestation (SLSA) and Rekor transparency logs remain “bring‑your‑own”.
|
||||||
|
* Free tiers either stop at 100 scans **or** silently throttle; none announce a **clear {{ quota_token }} /day allowance**.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · Why Stella Ops Can Win
|
||||||
|
|
||||||
|
1. **Speed First** — Delta‑SBOM flow uses cached layers to hit `< 1 s` warm scans.
|
||||||
|
2. **Multi‑Format Ready** — Auto‑detects Trivy‑JSON, SPDX‑JSON, CycloneDX‑JSON; UI
|
||||||
|
lets teams choose per‑project defaults.
|
||||||
|
3. **Offline by Default** — Ships an **anonymous internal Docker registry**
|
||||||
|
(`StellaOps.Registry`) plus Redis, Mongo, CVE DB, and UI in a single compose up.
|
||||||
|
4. **Open & Modular** — .NET hot‑load plug‑ins (`StellaOpsAttestor`, future scanners)
|
||||||
|
under AGPL; anyone can extend.
|
||||||
|
5. **Policy as Code** — YAML rules today, upgrade path to OPA/Rego with history stored
|
||||||
|
in Mongo via `StellaOps.MutePolicies`.
|
||||||
|
6. **Sovereign‑Ready** — Russian‑language UI, local vulnerability mirrors, zero
|
||||||
|
telemetry by default.
|
||||||
|
7. **Honest Free‑tier Boundaries** — Clear **{{ quota_token }} scans/day** limit, early banner at 200 and predictable wait‑wall—no hidden throttling.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 · Success Criteria — Signals We Solve the Problem
|
||||||
|
|
||||||
|
* **Performance:** P95 scan < 5 s on first pass; `< 1 s` for warm delta scans.
|
||||||
|
* **Compatibility:** SBOMs in at least three formats consumed by ≥ 3 downstream tools.
|
||||||
|
* **Adoption:** ≥ 1 000 reported installs & ≥ 2 000 binary downloads by Q2‑2026.
|
||||||
|
* **Compliance:** Positive audits referencing CRA / NIST / SLSA readiness.
|
||||||
|
* **Community:** ≥ 15 first‑time contributors merged per quarter by 2026.
|
||||||
|
* **Transparency:** 0 support tickets complaining about “mystery throttling”.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 · Non‑Goals (2025‑2027)
|
||||||
|
|
||||||
|
* Multi‑tenant SaaS offering.
|
||||||
|
* Automatic “fix‑PR” generation (left to ecosystem).
|
||||||
|
* Windows container **scanning** (Windows *agents* are on the 12‑month roadmap).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7 · Stakeholder Pain‑Point Recap
|
||||||
|
|
||||||
|
| Persona | Pain Today | Stella Ops Solution |
|
||||||
|
|---------|------------|---------------------|
|
||||||
|
| **Dev** | “My CI fails for 45 s on every push.” | < 5 s initial, < 1 s warm scans. |
|
||||||
|
| **Sec‑Ops** | Separate tools for SBOM, policy, and audit. | Unified UI + YAML / Rego policies with history. |
|
||||||
|
| **Infra** | Internet‑blocked site; no public pulls allowed. | Offline compose bundle + internal registry. |
|
||||||
|
| **Compliance** | Need CRA‑ready provenance by 2026. | Future `StellaOpsAttestor` SLSA + Rekor integration. |
|
||||||
|
| **Budget owner** | Fears hidden overage charges in “free” tiers. | Transparent {{ quota_token }} scans/day limit, visible in UI/API. |
|
||||||
|
|
||||||
|
---
|
||||||
|
*Last updated: 14 Jul 2025 (sync with free‑tier quota rev 2.0).*
|
||||||
156
docs/03_QUICKSTART.md
Executable file
156
docs/03_QUICKSTART.md
Executable file
@@ -0,0 +1,156 @@
|
|||||||
|
# Five‑Minute Quick‑Start ⚡
|
||||||
|
Run your first container scan locally
|
||||||
|
|
||||||
|
> **Heads‑up** – the public α `v0.1.0` image drops **late 2025**.
|
||||||
|
> Once it is published as
|
||||||
|
> `registry.stella-ops.org/stella-ops/stella-ops:0.1.0‑alpha`
|
||||||
|
> every command on this page works without changes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 · What you need 🔧
|
||||||
|
|
||||||
|
| Requirement | Minimum | Notes |
|
||||||
|
|-------------|---------|-------|
|
||||||
|
| OS | Ubuntu 22.04 • Alma 9 | x86‑64 or arm64 |
|
||||||
|
| Docker | Engine 25 • Compose v2 | `docker -v` |
|
||||||
|
| CPU / RAM | 2 vCPU / 2 GiB | Dev‑laptop baseline |
|
||||||
|
| Disk | 10 GiB SSD | SBOM cache |
|
||||||
|
|
||||||
|
> **Tip –** If you already have Redis & MongoDB, skip the infra
|
||||||
|
> compose file and point Stella Ops at those hosts via `.env`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 · Fetch the signed Compose bundles 📦
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Infrastructure (Redis + MongoDB)
|
||||||
|
curl -LO https://get.stella-ops.org/docker-compose.infrastructure.yml
|
||||||
|
curl -LO https://get.stella-ops.org/docker-compose.infrastructure.yml.sig
|
||||||
|
|
||||||
|
# Core scanner stack
|
||||||
|
curl -LO https://get.stella-ops.org/docker-compose.stella-ops.yml
|
||||||
|
curl -LO https://get.stella-ops.org/docker-compose.stella-ops.yml.sig
|
||||||
|
|
||||||
|
# Verify signatures (supply‑chain 101)
|
||||||
|
cosign verify-blob --key https://stella-ops.org/keys/cosign.pub \
|
||||||
|
--signature docker-compose.infrastructure.yml.sig docker-compose.infrastructure.yml
|
||||||
|
cosign verify-blob --key https://stella-ops.org/keys/cosign.pub \
|
||||||
|
--signature docker-compose.stella-ops.yml.sig docker-compose.stella-ops.yml
|
||||||
|
````
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · Create `.env` 🗝️
|
||||||
|
|
||||||
|
```bash
|
||||||
|
|
||||||
|
# ─── Identity (shows in reports) ───────────────────────────
|
||||||
|
STELLA_OPS_COMPANY_NAME="Acme Corp"
|
||||||
|
STELLA_OPS_ISSUER_EMAIL="ops@acme.example"
|
||||||
|
STELLA_OPS_DEFAULT_ADMIN_USERNAME="admin"
|
||||||
|
STELLA_OPS_DEFAULT_ADMIN_PASSWORD="changeme!"
|
||||||
|
STELLA_OPS_DEFAULT_JWT="" # or load it later with
|
||||||
|
# docker --env-file .env compose -f docker-compose.stella-ops.yml exec stella set-jwt <JWT_FROM_EMAIL>
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Database secrets ──────────────────────────────────────
|
||||||
|
MONGO_INITDB_ROOT_USERNAME=stella_admin
|
||||||
|
MONGO_INITDB_ROOT_PASSWORD=$(openssl rand -base64 18)
|
||||||
|
MONGO_URL=mongodb
|
||||||
|
|
||||||
|
REDIS_PASSWORD=$(openssl rand -base64 18)
|
||||||
|
REDIS_URL=redis
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · Start the supporting services 🗄️
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose --env-file .env -f docker-compose.infrastructure.yml pull
|
||||||
|
docker compose --env-file .env -f docker-compose.infrastructure.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · Launch Stella Ops 🚀
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose --env-file .env -f docker-compose.stella-ops.yml pull
|
||||||
|
docker compose --env-file .env -f docker-compose.stella-ops.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
*Point your browser at* **`https://<host>:8443`** – the certificate is
|
||||||
|
self‑signed in the alpha.
|
||||||
|
Default credentials: **`admin / changeme`** (rotate immediately!).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 · Run a scan 🔍
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose --env-file .env -f docker-compose.stella-ops.yml \
|
||||||
|
exec stella-ops stella scan alpine:3.20
|
||||||
|
```
|
||||||
|
|
||||||
|
* First scan downloads CVE feeds (\~ 50 MB).
|
||||||
|
* Warm scans finish in **≈ 5 s** on a 4‑vCPU host thanks to the Δ‑SBOM engine.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 · Reload or add a token later 🔄
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# After adding STELLA_JWT to .env …
|
||||||
|
docker compose --env-file .env -f docker-compose.stella-ops.yml \
|
||||||
|
exec stella-ops stella jwt <JWT_FROM_EMAIL>
|
||||||
|
```
|
||||||
|
|
||||||
|
*Anonymous mode* → **{{ quota_anon }} scans/day**
|
||||||
|
*Token mode* → **{{ quota_token }} scans/day**
|
||||||
|
At **10 % of the daily max** a polite reminder appears; after {{ quota_token }} the server applies a **soft 5 s back‑off** and may return **429 + Retry‑After** until the daily reset.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7 · Typical next steps ➡️
|
||||||
|
|
||||||
|
| Task | Where to look |
|
||||||
|
| ---------------------------------------- | ------------------------------------------------------------------- |
|
||||||
|
| CI pipelines (GitHub / GitLab / Jenkins) | [`docs/ci/`](ci/) |
|
||||||
|
| Air‑gapped install | [Offline Update Kit](10_OFFLINE_KIT.md) |
|
||||||
|
| Feature overview | [20\_FEATURES.md](20_FEATURES.md) |
|
||||||
|
| Governance & licence | [`LICENSE.md`](LICENSE.md) • [`11_GOVERNANCE.md`](11_GOVERNANCE.md) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8 · Uninstall / cleanup 🧹
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose --env-file .env -f docker-compose.stella-ops.yml down -v
|
||||||
|
docker compose --env-file .env -f docker-compose.infrastructure.yml down -v
|
||||||
|
rm compose-*.yml compose-*.yml.sig .env
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Licence & provenance 📜
|
||||||
|
|
||||||
|
Stella Ops is **AGPL‑3.0‑or‑later**. Every release ships:
|
||||||
|
|
||||||
|
* **Cosign‑signed** container images
|
||||||
|
* A full **SPDX 2.3** SBOM
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cosign verify \
|
||||||
|
--key https://stella-ops.org/keys/cosign.pub \
|
||||||
|
registry.stella-ops.org/stella-ops/stella-ops:<VERSION>
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
© 2025‑2026 Stella Ops – free / libre / open‑source.
|
||||||
99
docs/03_VISION.md
Executable file
99
docs/03_VISION.md
Executable file
@@ -0,0 +1,99 @@
|
|||||||
|
# 3 · Product Vision — **Stella Ops**
|
||||||
|
*(v1.3 — 12 Jul 2025 · supersedes v1.2; expanded with ecosystem integration, refined metrics, and alignment to emerging trends)*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 Preamble
|
||||||
|
|
||||||
|
This Vision builds on the purpose and gap analysis defined in **01 WHY**.
|
||||||
|
It paints a three‑year “north‑star” picture of success for the open‑source project and sets the measurable guard‑rails that every roadmap item must serve, while fostering ecosystem growth and adaptability to trends like SBOM mandates, AI‑assisted security **and transparent usage quotas**.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 North‑Star Vision Statement (2027)
|
||||||
|
|
||||||
|
> *By mid‑2027, Stella Ops is the fastest, most‑trusted self‑hosted SBOM scanner. Developers expect vulnerability feedback in **five seconds or less**—even while the free tier enforces a transparent **{{ quota_token }} scans/day** limit with graceful waiting. The project thrives on a vibrant plug‑in marketplace, weekly community releases, transparent governance, and seamless integrations with major CI/CD ecosystems—while never breaking the five‑second promise.*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 Outcomes & Success Metrics
|
||||||
|
|
||||||
|
| KPI (community‑centric) | Baseline Jul 2025 | Target Q2‑2026 | North‑Star 2027 |
|
||||||
|
| -------------------------------- | ----------------- | -------------- | --------------- |
|
||||||
|
| ⭐ Gitea / GitHub stars | 0 | 4 000 | 10 000 |
|
||||||
|
| Weekly active Docker pulls | 0 | 1 500 | 4 000 |
|
||||||
|
| P95 SBOM scan time (alpine) | 5 s | **≤ 5 s** | **≤ 4 s** |
|
||||||
|
| Free‑tier scan satisfaction* | n/a | ≥ 90 % | ≥ 95 % |
|
||||||
|
| First‑time‑contributor PRs / qtr | 0 | 15 | 30 |
|
||||||
|
|
||||||
|
\*Measured via anonymous telemetry *opt‑in only*: ratio of successful scans to `429 QuotaExceeded` errors.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 Strategic Pillars
|
||||||
|
|
||||||
|
1. **Speed First** – preserve the sub‑5 s P95 wall‑time; any feature that hurts it must ship behind a toggle or plug‑in. **Quota throttling must apply a soft 5 s delay first, so “speed first” remains true even at the limit.**
|
||||||
|
2. **Offline‑by‑Design** – every byte required to scan ships in public images; Internet access is optional.
|
||||||
|
3. **Modular Forever** – capabilities land as hot‑load plug‑ins; the monolith can split without rewrites.
|
||||||
|
4. **Community Ownership** – ADRs and governance decisions live in public; new maintainers elected by meritocracy.
|
||||||
|
5. **Zero‑Surprise Upgrades & Limits** – SemVer discipline; `main` is always installable; minor upgrades never break CI YAML **and free‑tier limits are clearly documented, with early UI warnings.**
|
||||||
|
6. **Ecosystem Harmony** – Prioritise integrations with popular OSS tools (e.g., Trivy extensions, BuildKit hooks) to lower adoption barriers.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 Road‑map Themes (18‑24 months)
|
||||||
|
|
||||||
|
| Horizon | Theme | Example EPIC |
|
||||||
|
| ------------------ | ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| **Q3‑2025** (3 mo) | **Core Stability & UX** | One‑command installer; dark‑mode UI; baseline SBOM scanning; **Free‑tier Quota Service ({{ quota_token }} scans/day, early banner, wait‑wall).** |
|
||||||
|
| 6–12 mo | *Extensibility* | Scan‑service micro‑split PoC; community plugin marketplace beta. |
|
||||||
|
| 12–18 mo | *Ecosystem* | Community plug‑in marketplace launch; integrations with Syft and Harbor. |
|
||||||
|
| 18–24 mo | *Resilience & Scale* | Redis Cluster auto‑sharding; AI‑assisted triage plugin framework. |
|
||||||
|
|
||||||
|
*(Granular decomposition lives in 25_LEDGER.md.)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 Stakeholder Personas & Benefits
|
||||||
|
|
||||||
|
| Persona | Core Benefit |
|
||||||
|
| --------------------- | ---------------------------------------------------------------- |
|
||||||
|
| Solo OSS maintainer | Laptop scans in **≤ 5 s**; zero cloud reliance. |
|
||||||
|
| CI Platform Engineer | Single‑binary backend + Redis; stable YAML integrations. |
|
||||||
|
| Security Auditor | AGPL code, traceable CVE sources, reproducible benchmarks. |
|
||||||
|
| Community Contributor | Plugin hooks and good‑first issues; merit‑based maintainer path. |
|
||||||
|
| Budget‑conscious Lead | Clear **{{ quota_token }} scans/day** allowance before upgrades are required. |
|
||||||
|
|
||||||
|
(See **01 WHY §3** for detailed pain‑points & evidence.)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 Non‑Goals (2025‑2027)
|
||||||
|
|
||||||
|
* Multi‑tenant SaaS offering.
|
||||||
|
* Automated “fix PR” generation.
|
||||||
|
* Proprietary compliance certifications (left to downstream distros).
|
||||||
|
* Windows **container** scanning (agents only).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7 Review & Change Process
|
||||||
|
|
||||||
|
* **Cadence:** product owner leads a public Vision review every **2 sprints (≈ 1 quarter)**.
|
||||||
|
* **Amendments:** material changes require PR labelled `type:vision` + two maintainer approvals.
|
||||||
|
* **Versioning:** bump patch for typo, minor for KPI tweak, major if North‑Star statement shifts.
|
||||||
|
* **Community Feedback:** Open GitHub Discussions for input; incorporate top‑voted suggestions quarterly.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8 · Change Log
|
||||||
|
|
||||||
|
| Version | Date | Note (high‑level) |
|
||||||
|
| ------- | ----------- | ----------------------------------------------------------------------------------------------------- |
|
||||||
|
| v1.4 | 14‑Jul‑2025 | First public revision reflecting quarterly roadmap & KPI baseline. |
|
||||||
|
| v1.3 | 12‑Jul‑2025 | Expanded ecosystem pillar, added metrics/integrations, refined non-goals, community persona/feedback. |
|
||||||
|
| v1.2 | 11‑Jul‑2025 | Restructured to link with WHY; merged principles into Strategic Pillars; added review §7 |
|
||||||
|
| v1.1 | 11‑Jul‑2025 | Original OSS‑only vision |
|
||||||
|
| v1.0 | 09‑Jul‑2025 | First public draft |
|
||||||
|
|
||||||
|
*(End of Product Vision v1.3)*
|
||||||
34
docs/04_FEATURE_MATRIX.md
Executable file
34
docs/04_FEATURE_MATRIX.md
Executable file
@@ -0,0 +1,34 @@
|
|||||||
|
# 4 · Feature Matrix — **Stella Ops**
|
||||||
|
*(rev 2.0 · 14 Jul 2025)*
|
||||||
|
|
||||||
|
| Category | Capability | Free Tier (≤ 333 scans / day) | Community Plug‑in | Commercial Add‑On | Notes / ETA |
|
||||||
|
| ---------------------- | ------------------------------------- | ----------------------------- | ----------------- | ------------------- | ------------------------------------------ |
|
||||||
|
| **SBOM Ingestion** | Trivy‑JSON, SPDX‑JSON, CycloneDX‑JSON | ✅ | — | — | Auto‑detect on upload |
|
||||||
|
| | **Delta‑SBOM Cache** | ✅ | — | — | Warm scans < 1 s |
|
||||||
|
| **Scanning** | CVE lookup via local DB | ✅ | — | — | Update job ships weekly feeds |
|
||||||
|
| | Licence‑risk detection | ⏳ (roadmap Q4‑2025) | — | — | SPDX licence list |
|
||||||
|
| **Policy Engine** | YAML rules | ✅ | — | — | In‑UI editor |
|
||||||
|
| | OPA / Rego | ⏳ (β Q1‑2026) | ✅ plug‑in | — | Plug‑in enables Rego |
|
||||||
|
| **Registry** | Anonymous internal registry | ✅ | — | — | `StellaOps.Registry` image |
|
||||||
|
| **Attestation** | Cosign signing | ⏳ (Q1‑2026) | — | — | Requires `StellaOpsAttestor` |
|
||||||
|
| | SLSA provenance v1.0 | — | — | ⏳ (commercial 2026) | Enterprise need |
|
||||||
|
| | Rekor transparency log | — | ✅ plug‑in | — | Air‑gap replica support |
|
||||||
|
| **Quota & Throttling** | {{ quota_token }} scans/day soft limit | ✅ | — | — | Yellow banner at 200, wait‑wall post‑limit |
|
||||||
|
| | Usage API (`/quota`) | ✅ | — | — | CI can poll remaining scans |
|
||||||
|
| **User Interface** | Dark / light mode | ✅ | — | — | Auto‑detect OS theme |
|
||||||
|
| | Additional locale (Cyrillic) | ✅ | — | — | Default if `Accept‑Language: bg` or any other |
|
||||||
|
| | Audit trail | ✅ | — | — | Mongo history |
|
||||||
|
| **Deployment** | Docker Compose bundle | ✅ | — | — | Single‑node |
|
||||||
|
| | Helm chart (K8s) | ✅ | — | — | Horizontal scaling |
|
||||||
|
| | High‑availability split services | — | — | ✅ (Add‑On) | HA Redis & Mongo |
|
||||||
|
| **Extensibility** | .NET hot‑load plug‑ins | ✅ | N/A | — | AGPL reference SDK |
|
||||||
|
| | Community plug‑in marketplace | — | ⏳ (β Q2‑2026) | — | Moderated listings |
|
||||||
|
| **Telemetry** | Opt‑in anonymous metrics | ✅ | — | — | Required for quota satisfaction KPI |
|
||||||
|
| **Quota & Tokens** | **Client‑JWT issuance** | ✅ (online 12 h token) | — | — | `/connect/token` |
|
||||||
|
| | **Offline Client‑JWT (30 d)** | ✅ via OUK | — | — | Refreshed monthly in OUK |
|
||||||
|
|
||||||
|
> **Legend:** ✅ = Included ⏳ = Planned — = Not applicable
|
||||||
|
> Rows marked “Commercial Add‑On” are optional paid components shipping outside the AGPL‑core; everything else is FOSS.
|
||||||
|
|
||||||
|
---
|
||||||
|
*Last updated: 14 Jul 2025 (quota rev 2.0).*
|
||||||
6
docs/05_ROADMAP.md
Executable file
6
docs/05_ROADMAP.md
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
# Road‑map
|
||||||
|
|
||||||
|
Milestones are maintained on the project website.
|
||||||
|
👉 <https://stella‑ops.org/roadmap/>
|
||||||
|
|
||||||
|
_This stub exists to satisfy historic links._
|
||||||
204
docs/05_SYSTEM_REQUIREMENTS_SPEC.md
Executable file
204
docs/05_SYSTEM_REQUIREMENTS_SPEC.md
Executable file
@@ -0,0 +1,204 @@
|
|||||||
|
# SYSTEM REQUIREMENTS SPECIFICATION
|
||||||
|
Stella Ops · self‑hosted supply‑chain‑security platform
|
||||||
|
|
||||||
|
> **Audience** – core maintainers and external contributors who need an
|
||||||
|
> authoritative checklist of *what* the software must do (functional
|
||||||
|
> requirements) and *how well* it must do it (non‑functional
|
||||||
|
> requirements). Implementation details belong in Module Specs
|
||||||
|
> or ADRs—**not here**.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 · Purpose & Scope
|
||||||
|
|
||||||
|
This SRS defines everything the **v0.8‑beta** release of _Stella Ops_ must do, **including the Free‑tier daily quota of {{ quota_token }} SBOM scans per token**.
|
||||||
|
Scope includes core platform, CLI, UI, quota layer, and plug‑in host; commercial or closed‑source extensions are explicitly out‑of‑scope.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · References
|
||||||
|
|
||||||
|
* [02_WHY.md](02_WHY.md) – market gap & problem statement
|
||||||
|
* [03_VISION.md](03_VISION.md) – north‑star, KPIs, quarterly themes
|
||||||
|
* [07_HIGH_LEVEL_ARCHITECTURE.md](07_HIGH_LEVEL_ARCHITECTURE.md) – context & data flow diagrams
|
||||||
|
* [08_MODULE_SPECIFICATIONS.md](08_MODULE_SPECIFICATIONS.md) – component APIs & plug‑in contracts
|
||||||
|
* [09_API_CLI_REFERENCE.md](09_API_CLI_REFERENCE.md) – REST & CLI surface
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · Definitions & Acronyms
|
||||||
|
|
||||||
|
| Term | Meaning |
|
||||||
|
|------|---------|
|
||||||
|
| **SBOM** | Software Bill of Materials |
|
||||||
|
| **Delta SBOM** | Partial SBOM covering only image layers not previously analysed |
|
||||||
|
| **Registry** | Anonymous, read‑only Docker Registry v2 hosted internally |
|
||||||
|
| **OPA** | Open Policy Agent (Rego policy engine) |
|
||||||
|
| **Muting Policy** | Rule that downgrades or ignores specific findings |
|
||||||
|
| **SLSA** | Supply‑chain Levels for Software Artifacts (provenance framework) |
|
||||||
|
| **Rekor** | Sigstore transparency log for signatures |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · Overall System Description
|
||||||
|
|
||||||
|
The platform consists of:
|
||||||
|
|
||||||
|
* **Stella Ops Backend** – REST API, queue, policy engine, DB.
|
||||||
|
* **StellaOps.Registry** – internal container registry for agents.
|
||||||
|
* **Stella CLI** – extracts SBOMs; supports multi‑format & delta.
|
||||||
|
* **Zastava Agent** – enforcement hook for admission‑control scenarios.
|
||||||
|
* **Web UI** – React/Next.js SPA consuming backend APIs.
|
||||||
|
* **Plug‑ins** – hot‑load binaries extending scanners, attestations, etc.
|
||||||
|
|
||||||
|
All services run in Docker Compose or Kubernetes with optional Internet
|
||||||
|
access.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 · Functional Requirements (FR)
|
||||||
|
|
||||||
|
### 5.1 Core Scanning
|
||||||
|
|
||||||
|
| ID | Requirement | Priority | Verification |
|
||||||
|
|----|-------------|----------|--------------|
|
||||||
|
| F‑1 | System SHALL ingest **Trivy‑JSON, SPDX‑JSON, CycloneDX‑JSON** files. | MUST | UT‑SBOM‑001 |
|
||||||
|
| F‑2 | System SHALL **auto‑detect** SBOM type when `sbomType` param omitted. | MUST | UT‑SBOM‑002 |
|
||||||
|
| F‑3 | System SHALL **cache analysed layers** and reuse them in subsequent scans. | MUST | IT‑CACHE‑001 |
|
||||||
|
| F‑4 | System SHALL **enforce a soft limit of {{ quota_token }} scans per token per UTC day**. | MUST | IT‑QUOTA‑001 |
|
||||||
|
| F‑4a | Remaining quota SHALL be **persisted in Redis** under key `quota:<token>:<yyyy‑mm‑dd>`. | MUST | UT‑QUOTA‑REDIS |
|
||||||
|
| F‑4b | Exhausted quota SHALL trigger **HTTP 429** with `Retry‑After` header (UTC midnight). | MUST | IT‑QUOTA‑002 |
|
||||||
|
| F‑4c | When quota is ≤ 40 % remaining, **UI banner** MUST turn yellow and show count‑down. | SHOULD | UI‑E2E‑005 |
|
||||||
|
| F‑4d | `/quota` endpoint SHALL return JSON `{"limit":{{ quota_token }} ,"remaining":N,"resetsAt":"<ISO‑8601>"}`. | SHOULD | API‑DOC‑003 |
|
||||||
|
| F‑5 | Policy engine SHALL evaluate **YAML rules** against scan results. | MUST | UT‑POL‑001 |
|
||||||
|
| F‑6 | Hot‑pluggable .NET plug‑ins SHALL be loadable **without service restart**. | MUST | IT‑PLUGIN‑001 |
|
||||||
|
| F‑7 | CLI (`stella scan`) SHOULD exit **non‑zero** when CVSS≥7 vulnerabilities found. | SHOULD | CL‑INT‑003 |
|
||||||
|
| *(… all previously documented F‑8 – F‑12 rows retained unchanged …)* |
|
||||||
|
|
||||||
|
|
||||||
|
### 5.2 Internal Docker Repository
|
||||||
|
|
||||||
|
| Ref | Requirement |
|
||||||
|
|-----|-------------|
|
||||||
|
| **FR‑REPO‑1** | Platform SHALL include **StellaOps.Registry** exposing Docker Registry v2 API (ports 5000/443). |
|
||||||
|
| **FR‑REPO‑2** | Registry SHALL allow anonymous, *read‑only* pulls for at least three images:<br>• `stella/sbom‑builder`<br>• `stella/cli`<br>• `stella/zastava`. |
|
||||||
|
| **FR‑REPO‑3** | Registry MAY enable optional basic‑auth without code changes. |
|
||||||
|
|
||||||
|
### 5.3 SBOM Generation & Handling
|
||||||
|
|
||||||
|
| Ref | Requirement |
|
||||||
|
|-----|-------------|
|
||||||
|
| **FR‑SBOM‑1** | SBOM builder SHALL produce Trivy‑JSON **and** at least one additional format: SPDX‑JSON and CycloneDX‑JSON. |
|
||||||
|
| **FR‑SBOM‑2** | For every generated SBOM, builder SHALL create a side‑car file `<image>.sbom.type` containing the format identifier. |
|
||||||
|
| **FR‑SBOM‑3** | Stella CLI SHALL read the `.sbom.type` file and include `sbomType` parameter when uploading. |
|
||||||
|
| **FR‑SBOM‑4** | Backend SHALL auto‑detect SBOM type when parameter is missing. |
|
||||||
|
| **FR‑SBOM‑5** | UI Settings SHALL expose a dropdown to select default SBOM format (system‑wide fallback). |
|
||||||
|
|
||||||
|
#### 5.3.1 Delta SBOM (layer reuse)
|
||||||
|
|
||||||
|
| Ref | Requirement |
|
||||||
|
|-----|-------------|
|
||||||
|
| **FR‑DELTA‑1** | Builder SHALL compute SHA256 digests of each image layer and POST array to `/layers/missing`; response time ≤ 20 ms (P95). |
|
||||||
|
| **FR‑DELTA‑2** | Builder SHALL generate SBOM **only** for layers returned as “missing”. |
|
||||||
|
| **FR‑DELTA‑3** | End‑to‑end warm scan time (image differing by ≤ 2 layers) SHALL be ≤ 1 s (P95). |
|
||||||
|
|
||||||
|
### 5.4 Policy as Code (Muting & Expiration)
|
||||||
|
|
||||||
|
| Ref | Requirement |
|
||||||
|
|-----|-------------|
|
||||||
|
| **FR‑POLICY‑1** | Backend SHALL store policies as YAML by default, convertible to Rego for advanced use‑cases. |
|
||||||
|
| **FR‑POLICY‑2** | Each policy change SHALL create an immutable history record (timestamp, actor, diff). |
|
||||||
|
| **FR‑POLICY‑3** | REST endpoints `/policy/import`, `/policy/export`, `/policy/validate` SHALL accept YAML or Rego payloads. |
|
||||||
|
| **FR‑POLICY‑4** | Web UI Policies tab SHALL provide Monaco editor with linting for YAML and Rego. |
|
||||||
|
| **FR‑POLICY‑5** | **StellaOps.MutePolicies** module SHALL expose CLI `stella policies apply --file scan‑policy.yaml`. |
|
||||||
|
|
||||||
|
### 5.5 SLSA Attestations & Rekor (TODO > 6 mo)
|
||||||
|
|
||||||
|
| Ref | Requirement |
|
||||||
|
|-----|-------------|
|
||||||
|
| **FR‑SLSA‑1** | **TODO** – Generate provenance in SLSA‑Provenance v0.2 for each SBOM. |
|
||||||
|
| **FR‑REKOR‑1** | **TODO** – Sign SBOM hashes and upload to local Rekor mirror; verify during scan. |
|
||||||
|
|
||||||
|
### 5.6 CLI & API Interface
|
||||||
|
|
||||||
|
| Ref | Requirement |
|
||||||
|
|-----|-------------|
|
||||||
|
| **FR‑CLI‑1** | CLI `stella scan` SHALL accept `--sbom-type {trivy,spdx,cyclonedx,auto}`. |
|
||||||
|
| **FR‑API‑1** | API `/scan` SHALL accept `sbomType` query/body field (optional). |
|
||||||
|
| **FR‑API‑2** | API `/layers/missing` SHALL accept JSON array of digests and return JSON array of missing digests. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 · Non‑Functional Requirements (NFR)
|
||||||
|
|
||||||
|
| Ref | Category | Requirement |
|
||||||
|
|-----|----------|-------------|
|
||||||
|
| **NFR‑PERF‑1** | Performance | P95 cold scan ≤ 5 s; warm ≤ 1 s (see **FR‑DELTA‑3**). |
|
||||||
|
| **NFR‑PERF‑2** | Throughput | System shall sustain 60 concurrent scans on 8‑core node without queue depth >10. |
|
||||||
|
| **NFR‑AVAIL‑1** | Availability | All services shall start offline; any Internet call must be optional. |
|
||||||
|
| **NFR‑SCAL‑1** | Scalability | Horizontal scaling via Kubernetes replicas for backend, Redis Sentinel, Mongo replica set. |
|
||||||
|
| **NFR‑SEC‑1** | Security | All inter‑service traffic shall use TLS or localhost sockets. |
|
||||||
|
| **NFR‑COMP‑1** | Compatibility | Platform shall run on x86‑64 Linux kernel ≥ 5.10; Windows agents (TODO > 6 mo) must support Server 2019+. |
|
||||||
|
| **NFR‑I18N‑1** | Internationalisation | UI must support EN and at least one additional locale (Cyrillic). |
|
||||||
|
| **NFR‑OBS‑1** | Observability | Export Prometheus metrics for scan duration, queue length, policy eval duration. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7 Acceptance Criteria <a id="7-acceptance-criteria"></a>
|
||||||
|
|
||||||
|
1. Issue {{ quota_token }} `/scan` calls; next returns random slow down and `Retry‑After`.
|
||||||
|
2. Redis failure during test → API returns **0 remaining** & warns in logs.
|
||||||
|
3. UI banner activates at 133 remaining; clears next UTC midnight.
|
||||||
|
|
||||||
|
---
|
||||||
|
## 8 · System Interfaces
|
||||||
|
|
||||||
|
### 8.1 External APIs
|
||||||
|
|
||||||
|
*(This is the complete original table, plus new `/quota` row.)*
|
||||||
|
|
||||||
|
| Path | Method | Auth | Quota | Description |
|
||||||
|
|------|--------|------|-------|-------------|
|
||||||
|
| `/scan` | POST | Bearer | ✅ | Submit SBOM or `imageRef` for scanning. |
|
||||||
|
| `/quota` | GET | Bearer | ❌ | Return remaining quota for current token. |
|
||||||
|
| `/policy/rules` | GET/PUT | Bearer+RBAC | ❌ | CRUD YAML or Rego policies. |
|
||||||
|
| `/plugins` | POST/GET | Bearer+Admin | ❌ | Upload or list plug‑ins. |
|
||||||
|
|
||||||
|
```bash
|
||||||
|
GET /quota
|
||||||
|
Authorization: Bearer <token>
|
||||||
|
|
||||||
|
200 OK
|
||||||
|
{
|
||||||
|
"limit": {{ quota_token }},
|
||||||
|
"remaining": 121,
|
||||||
|
"resetsAt": "2025-07-14T23:59:59Z"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 9 · Assumptions & Constraints
|
||||||
|
|
||||||
|
* Hardware reference: 8 vCPU, 8 GB RAM, NVMe SSD.
|
||||||
|
* Mongo DB and Redis run co‑located unless horizontal scaling enabled.
|
||||||
|
* All docker images tagged `latest` are immutable (CI process locks digests).
|
||||||
|
* Rego evaluation runs in embedded OPA Go‑library (no external binary).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 10 · Future Work (Beyond 12 Months)
|
||||||
|
|
||||||
|
* Rekor transparency log cross‑cluster replication.
|
||||||
|
* AI‑assisted false‑positive triage plug‑in.
|
||||||
|
* Cluster‑wide injection for live runtime scanning.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 11 · Revision History
|
||||||
|
|
||||||
|
| Version | Date | Notes |
|
||||||
|
|---------|------|-------|
|
||||||
|
| **v1.2** | 11‑Jul‑2025 | Commercial references removed; plug‑in contract (§ 3.3) and new NFR categories added; added User Classes & Traceability. |
|
||||||
|
| v1.1 | 11‑Jul‑2025 | Split out RU‑specific items; OSS scope |
|
||||||
|
| v1.0 | 09‑Jul‑2025 | Original unified SRS |
|
||||||
|
|
||||||
|
*(End of System Requirements Specification v1.2‑core)*
|
||||||
388
docs/07_HIGH_LEVEL_ARCHITECTURE.md
Executable file
388
docs/07_HIGH_LEVEL_ARCHITECTURE.md
Executable file
@@ -0,0 +1,388 @@
|
|||||||
|
# 7 · High‑Level Architecture — **Stella Ops**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 Purpose & Scope
|
||||||
|
|
||||||
|
Give contributors, DevOps engineers and auditors a **complete yet readable map** of the Core:
|
||||||
|
|
||||||
|
* Major runtime components and message paths.
|
||||||
|
* Where plug‑ins, CLI helpers and runtime agents attach.
|
||||||
|
* Technology choices that enable the sub‑5 second SBOM goal.
|
||||||
|
* Typical operational scenarios (pipeline scan, mute, nightly re‑scan, etc.).
|
||||||
|
|
||||||
|
Anything enterprise‑only (signed PDF, custom/regulated TLS, LDAP, enforcement) **must arrive as a plug‑in**; the Core never hard‑codes those concerns.
|
||||||
|
---
|
||||||
|
## 1 Component Overview
|
||||||
|
|
||||||
|
| # | Component | Responsibility |
|
||||||
|
|---|-----------|---------------|
|
||||||
|
| 1 | **API Gateway** | REST endpoints (`/scan`, `/quota`, **`/token/offline`**); token auth; quota enforcement |
|
||||||
|
| 2 | **Scan Service** | SBOM parsing, Delta‑SBOM cache, vulnerability lookup |
|
||||||
|
| 3 | **Policy Engine** | YAML / (optional) Rego rule evaluation; verdict assembly |
|
||||||
|
| 4 | **Quota Service** | Per‑token counters; **333 scans/day**; waits & HTTP 429 |
|
||||||
|
| 5 | **Client‑JWT Issuer** | Issues 30‑day offline tokens; bundles them into OUK |
|
||||||
|
| 5 | **Registry** | Anonymous internal Docker registry for agents, SBOM uploads |
|
||||||
|
| 6 | **Web UI** | React/Blazor SPA; dashboards, policy editor, quota banner |
|
||||||
|
| 7 | **Data Stores** | **Redis** (cache, quota) & **MongoDB** (SBOMs, findings, audit) |
|
||||||
|
| 8 | **Plugin Host** | Hot‑load .NET DLLs; isolates community plug‑ins |
|
||||||
|
| 9 | **Agents** | `sbom‑builder`, `Stella CLI` scanner CLI, future `StellaOpsAttestor` |
|
||||||
|
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
subgraph "External Actors"
|
||||||
|
DEV["Developer / DevSecOps / Manager"]
|
||||||
|
CI["CI/CD Pipeline (e.g., Stella CLI)"]
|
||||||
|
K8S["Kubernetes Cluster (e.g., Zastava Agent)"]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "Stella Ops Runtime"
|
||||||
|
subgraph "Core Services"
|
||||||
|
CORE["Stella Core<br>(REST + gRPC APIs, Orchestration)"]
|
||||||
|
REDIS[("Redis<br>(Cache, Queues, Trivy DB Mirror)")]
|
||||||
|
MONGO[("MongoDB<br>(Optional: Long-term Storage)")]
|
||||||
|
POL["Mute Policies<br>(OPA & YAML Evaluator)"]
|
||||||
|
REG["StellaOps Registry<br>(Docker Registry v2)"]
|
||||||
|
ATT["StellaOps Attestor<br>(SLSA + Rekor)"]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "Agents & Builders"
|
||||||
|
SB["SBOM Builder<br>(Go Binary: Extracts Layers, Generates SBOMs)"]
|
||||||
|
SA["Stella CLI<br>(Pipeline Helper: Invokes Builder, Triggers Scans)"]
|
||||||
|
ZA["Zastava Agent<br>(K8s Webhook: Enforces Policies, Inventories Containers)"]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "Scanners & UI"
|
||||||
|
TRIVY["Trivy Scanner<br>(Plugin Container: Vulnerability Scanning)"]
|
||||||
|
UI["Web UI<br>(Vue3 + Tailwind: Dashboards, Policy Editor)"]
|
||||||
|
CLI["Stella CLI<br>(CLI Helper: Triggers Scans, Mutes)"]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
DEV -->|Browses Findings, Mutes CVEs| UI
|
||||||
|
DEV -->|Triggers Scans| CLI
|
||||||
|
CI -->|Generates SBOM, Calls /scan| SA
|
||||||
|
K8S -->|Inventories Containers, Enforces Gates| ZA
|
||||||
|
|
||||||
|
UI -- "REST" --> CORE
|
||||||
|
CLI -- "REST/gRPC" --> CORE
|
||||||
|
SA -->|Scan Requests| CORE
|
||||||
|
SB -->|Uploads SBOMs| CORE
|
||||||
|
ZA -->|Policy Gates| CORE
|
||||||
|
|
||||||
|
CORE -- "Queues, Caches" --> REDIS
|
||||||
|
CORE -- "Persists Data" --> MONGO
|
||||||
|
CORE -->|Evaluates Policies| POL
|
||||||
|
CORE -->|Attests Provenance| ATT
|
||||||
|
CORE -->|Scans Vulnerabilities| TRIVY
|
||||||
|
|
||||||
|
SB -- "Pulls Images" --> REG
|
||||||
|
SA -- "Pulls Images" --> REG
|
||||||
|
ZA -- "Pulls Images" --> REG
|
||||||
|
|
||||||
|
style DEV fill:#f9f,stroke:#333
|
||||||
|
style CI fill:#f9f,stroke:#333
|
||||||
|
style K8S fill:#f9f,stroke:#333
|
||||||
|
style CORE fill:#ddf,stroke:#333
|
||||||
|
style REDIS fill:#fdd,stroke:#333
|
||||||
|
style MONGO fill:#fdd,stroke:#333
|
||||||
|
style POL fill:#dfd,stroke:#333
|
||||||
|
style REG fill:#dfd,stroke:#333
|
||||||
|
style ATT fill:#dfd,stroke:#333
|
||||||
|
style SB fill:#fdf,stroke:#333
|
||||||
|
style SA fill:#fdf,stroke:#333
|
||||||
|
style ZA fill:#fdf,stroke:#333
|
||||||
|
style TRIVY fill:#ffd,stroke:#333
|
||||||
|
style UI fill:#ffd,stroke:#333
|
||||||
|
style CLI fill:#ffd,stroke:#333
|
||||||
|
```
|
||||||
|
|
||||||
|
* **Developer / DevSecOps / Manager** – browses findings, mutes CVEs, triggers scans.
|
||||||
|
* **Stella CLI** – generates SBOMs and calls `/scan` during CI.
|
||||||
|
* **Zastava Agent** – inventories live containers; Core ships it in *passive* mode only (no kill).
|
||||||
|
|
||||||
|
### 1.1 Client‑JWT Lifecycle (offline aware)
|
||||||
|
|
||||||
|
1. **Online instance** – user signs in → `/connect/token` issues JWT valid 12 h.
|
||||||
|
2. **Offline instance** – JWT with `exp ≈ 30 days` ships in OUK; backend
|
||||||
|
**re‑signs** and stores it during import.
|
||||||
|
3. Tokens embed a `tier` claim (“Free”) and `maxScansPerDay: 333`.
|
||||||
|
4. On expiry the UI surfaces a red toast **7 days** in advance.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · Component Responsibilities (runtime view)
|
||||||
|
|
||||||
|
| Component | Core Responsibility | Implementation Highlights |
|
||||||
|
| -------------------------- | ---------------------------------------------------------------------------------------------------------- | --------------------------------------------------------- |
|
||||||
|
| **Stella Core** | Orchestrates scans, persists SBOM blobs, serves REST/gRPC APIs, fans out jobs to scanners & policy engine. | .NET {{ dotnet }}, CQRS, Redis Streams; pluggable runner interfaces. |
|
||||||
|
| **SBOM Builder** | Extracts image layers, queries Core for *missing* layers, generates SBOMs (multi‑format), uploads blobs. | Go binary; wraps Trivy & Syft libs. |
|
||||||
|
| **Stella CLI** | Pipeline‑side helper; invokes Builder, triggers scan, streams progress back to CI/CD. | Static musl build. |
|
||||||
|
| **Zastava Agent** | K8s admission webhook enforcing policy verdicts before Pod creation. | Rust for sub‑10 ms latencies. |
|
||||||
|
| **UI** | Angular 17 SPA for dashboards, settings, policy editor. | Tailwind CSS; Webpack module federation (future). |
|
||||||
|
| **Redis** | Cache, queue, Trivy‑DB mirror, layer diffing. | Single instance or Sentinel. |
|
||||||
|
| **MongoDB** (opt.) | Long‑term SBOM & policy audit storage (> 180 days). | Optional; enabled via flag. |
|
||||||
|
| **StellaOps.Registry** | Anonymous read‑only Docker v2 registry with optional Cosign verification. | `registry :2` behind nginx reverse proxy. |
|
||||||
|
| **StellaOps.MutePolicies** | YAML/Rego evaluator, policy version store, `/policy/*` API. | Embeds OPA‑WASM; falls back to `opa exec`. |
|
||||||
|
| **StellaOpsAttestor** | Generate SLSA provenance & Rekor signatures; verify on demand. | Side‑car container; DSSE + Rekor CLI. |
|
||||||
|
|
||||||
|
All cross‑component calls use dependency‑injected interfaces—no
|
||||||
|
intra‑component reach‑ins.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · Principal Backend Modules & Plug‑in Hooks
|
||||||
|
|
||||||
|
| Namespace | Responsibility | Built‑in Tech / Default | Plug‑in Contract |
|
||||||
|
| --------------- | -------------------------------------------------- | ----------------------- | ------------------------------------------------- |
|
||||||
|
| `configuration` | Parse env/JSON, health‑check endpoint | .NET {{ dotnet }} Options | `IConfigValidator` |
|
||||||
|
| `identity` | Embedded OAuth2/OIDC (OpenIddict 6) | MIT OpenIddict | `IIdentityProvider` for LDAP/SAML/JWT gateway |
|
||||||
|
| `pluginloader` | Discover DLLs, SemVer gate, optional Cosign verify | Reflection + Cosign | `IPluginLifecycleHook` for telemetry |
|
||||||
|
| `scanning` | SBOM‑ & image‑flow orchestration; runner pool | Trivy CLI (default) | `IScannerRunner` – e.g., Grype, Copacetic, Clair |
|
||||||
|
| `feedser` (vulnerability ingest/merge/export service) | Nightly NVD merge & feed enrichment | Hangfire job | drop-in `*.Schedule.dll` for OSV, GHSA, NVD 2.0, CNNVD, CNVD, ENISA, JVN and BDU feeds |
|
||||||
|
| `tls` | TLS provider abstraction | OpenSSL | `ITlsProvider` for custom suites (incl. **SM2**, where law or security requires it) |
|
||||||
|
| `reporting` | Render HTML/PDF reports | RazorLight | `IReportRenderer` |
|
||||||
|
| `ui` | Angular SPA & i18n | Angular {{ angular }} | new locales via `/locales/{lang}.json` |
|
||||||
|
| `scheduling` | Cron + retries | Hangfire | any recurrent job via `*.Schedule.dll` |
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
classDiagram
|
||||||
|
class configuration
|
||||||
|
class identity
|
||||||
|
class pluginloader
|
||||||
|
class scanning
|
||||||
|
class feedser
|
||||||
|
class tls
|
||||||
|
class reporting
|
||||||
|
class ui
|
||||||
|
class scheduling
|
||||||
|
|
||||||
|
class AllModules
|
||||||
|
|
||||||
|
configuration ..> identity : Uses
|
||||||
|
identity ..> pluginloader : Authenticates Plugins
|
||||||
|
pluginloader ..> scanning : Loads Scanner Runners
|
||||||
|
scanning ..> feedser : Triggers Feed Merges
|
||||||
|
tls ..> AllModules : Provides TLS Abstraction
|
||||||
|
reporting ..> ui : Renders Reports for UI
|
||||||
|
scheduling ..> feedser : Schedules Nightly Jobs
|
||||||
|
|
||||||
|
note for scanning "Pluggable: ISScannerRunner<br>e.g., Trivy, Grype"
|
||||||
|
note for feedser "Pluggable: *.Schedule.dll<br>e.g., OSV, GHSA Feeds"
|
||||||
|
note for identity "Pluggable: IIdentityProvider<br>e.g., LDAP, SAML"
|
||||||
|
note for reporting "Pluggable: IReportRenderer<br>e.g., Custom PDF"
|
||||||
|
```
|
||||||
|
|
||||||
|
**When remaining = 0:**
|
||||||
|
API returns `429 Too Many Requests`, `Retry‑After: <UTC‑midnight>` (sequence omitted for brevity).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · Data Flows
|
||||||
|
|
||||||
|
### 4.1 SBOM‑First (≤ 5 s P95)
|
||||||
|
|
||||||
|
Builder produces SBOM locally, so Core never touches the Docker
|
||||||
|
socket.
|
||||||
|
Trivy path hits ≤ 5 s on alpine:3.19 with warmed DB.
|
||||||
|
Image‑unpack fallback stays ≤ 10 s for 200 MB images.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
participant CI as CI/CD Pipeline (Stella CLI)
|
||||||
|
participant SB as SBOM Builder
|
||||||
|
participant CORE as Stella Core
|
||||||
|
participant REDIS as Redis Queue
|
||||||
|
participant RUN as Scanner Runner (e.g., Trivy)
|
||||||
|
participant POL as Policy Evaluator
|
||||||
|
|
||||||
|
CI->>SB: Invoke SBOM Generation
|
||||||
|
SB->>CORE: Check Missing Layers (/layers/missing)
|
||||||
|
CORE->>REDIS: Query Layer Diff (SDIFF)
|
||||||
|
REDIS-->>CORE: Missing Layers List
|
||||||
|
CORE-->>SB: Return Missing Layers
|
||||||
|
SB->>SB: Generate Delta SBOM
|
||||||
|
SB->>CORE: Upload SBOM Blob (POST /scan(sbom))
|
||||||
|
CORE->>REDIS: Enqueue Scan Job
|
||||||
|
REDIS->>RUN: Fan Out to Runner
|
||||||
|
RUN->>RUN: Perform Vulnerability Scan
|
||||||
|
RUN-->>CORE: Return Scan Results
|
||||||
|
CORE->>POL: Evaluate Mute Policies
|
||||||
|
POL-->>CORE: Policy Verdict
|
||||||
|
CORE-->>CI: JSON Verdict & Progress Stream
|
||||||
|
Note over CORE,CI: Achieves ≤5s P95 with Warmed DB
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4.2 Delta SBOM
|
||||||
|
|
||||||
|
Builder collects layer digests.
|
||||||
|
`POST /layers/missing` → Redis SDIFF → missing layer list (< 20 ms).
|
||||||
|
SBOM generated only for those layers and uploaded.
|
||||||
|
|
||||||
|
### 4.3 Feedser Harvest & Export
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
participant SCHED as Feedser Scheduler
|
||||||
|
participant CONN as Source Connector Plug-in
|
||||||
|
participant FEEDSER as Feedser Core
|
||||||
|
participant MONGO as MongoDB (Canonical Advisories)
|
||||||
|
participant EXPORT as Exporter (JSON / Trivy DB)
|
||||||
|
participant ART as Artifact Store / Offline Kit
|
||||||
|
|
||||||
|
SCHED->>CONN: Trigger window (init/resume)
|
||||||
|
CONN->>CONN: Fetch source documents + metadata
|
||||||
|
CONN->>FEEDSER: Submit raw document for parsing
|
||||||
|
FEEDSER->>FEEDSER: Parse & normalize to DTO
|
||||||
|
FEEDSER->>FEEDSER: Merge & deduplicate canonical advisory
|
||||||
|
FEEDSER->>MONGO: Write advisory, provenance, merge_event
|
||||||
|
FEEDSER->>EXPORT: Queue export delta request
|
||||||
|
EXPORT->>MONGO: Read canonical snapshot/deltas
|
||||||
|
EXPORT->>EXPORT: Build deterministic JSON & Trivy DB artifacts
|
||||||
|
EXPORT->>ART: Publish artifacts / Offline Kit bundle
|
||||||
|
ART-->>FEEDSER: Record export state + digests
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4.4 Identity & Auth Flow
|
||||||
|
|
||||||
|
OpenIddict issues JWTs via client‑credentials or password grant.
|
||||||
|
An IIdentityProvider plug‑in can delegate to LDAP, SAML or external OIDC
|
||||||
|
without Core changes.
|
||||||
|
---
|
||||||
|
## 5 · Runtime Helpers
|
||||||
|
|
||||||
|
| Helper | Form | Purpose | Extensible Bits |
|
||||||
|
|-----------|---------------------------------------|--------------------------------------------------------------------|-------------------------------------------|
|
||||||
|
| **Stella CLI** | Distroless CLI | Generates SBOM, calls `/scan`, honours threshold flag | `--engine`, `--pdf-out` piped to plug‑ins |
|
||||||
|
| **Zastava** | Static Go binary / DaemonSet | Watches Docker/CRI‑O events; uploads SBOMs; can enforce gate | Policy plug‑in could alter thresholds |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 · Persistence & Cache Strategy
|
||||||
|
|
||||||
|
| Store | Primary Use | Why chosen |
|
||||||
|
|----------------|-----------------------------------------------|--------------------------------|
|
||||||
|
| **MongoDB** | Feedser canonical advisories, merge events, export state | Deterministic canonical store with flexible schema |
|
||||||
|
| **Redis 7** | CLI quotas, short-lived job scheduling, layer diff cache | Sub-1 ms P99 latency for hot-path coordination |
|
||||||
|
| **Local tmpfs**| Trivy layer cache (`/var/cache/trivy`) | Keeps disk I/O off hot path |
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
subgraph "Persistence Layers"
|
||||||
|
REDIS[(Redis: Quotas & Short-lived Queues<br>Sub-1ms P99)]
|
||||||
|
MONGO[(MongoDB: Canonical Advisories<br>Merge Events & Export State)]
|
||||||
|
TMPFS[(Local tmpfs: Trivy Layer Cache<br>Low I/O Overhead)]
|
||||||
|
end
|
||||||
|
|
||||||
|
CORE["Stella Core"] -- Queues & SBOM Cache --> REDIS
|
||||||
|
CORE -- Long-term Storage --> MONGO
|
||||||
|
TRIVY["Trivy Scanner"] -- Layer Unpack Cache --> TMPFS
|
||||||
|
|
||||||
|
style REDIS fill:#fdd,stroke:#333
|
||||||
|
style MONGO fill:#dfd,stroke:#333
|
||||||
|
style TMPFS fill:#ffd,stroke:#333
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7 · Typical Scenarios
|
||||||
|
|
||||||
|
| # | Flow | Steps |
|
||||||
|
|---------|----------------------------|-------------------------------------------------------------------------------------------------|
|
||||||
|
| **S‑1** | Pipeline Scan & Alert | Stella CLI → SBOM → `/scan` → policy verdict → CI exit code & link to *Scan Detail* |
|
||||||
|
| **S‑2** | Mute Noisy CVE | Dev toggles **Mute** in UI → rule stored in Redis → next build passes |
|
||||||
|
| **S‑3** | Nightly Re‑scan | `SbomNightly.Schedule` re‑queues SBOMs (mask‑filter) → dashboard highlights new Criticals |
|
||||||
|
| **S‑4** | Feed Update Cycle | `Feedser (vulnerability ingest/merge/export service)` refreshes feeds → UI *Feed Age* tile turns green |
|
||||||
|
| **S‑5** | Custom Report Generation | Plug‑in registers `IReportRenderer` → `/report/custom/{digest}` → CI downloads artifact |
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
participant DEV as Developer
|
||||||
|
participant UI as Web UI
|
||||||
|
participant CORE as Stella Core
|
||||||
|
participant REDIS as Redis
|
||||||
|
participant RUN as Scanner Runner
|
||||||
|
|
||||||
|
DEV->>UI: Toggle Mute for CVE
|
||||||
|
UI->>CORE: Update Mute Rule (POST /policy/mute)
|
||||||
|
CORE->>REDIS: Store Mute Policy
|
||||||
|
Note over CORE,REDIS: YAML/Rego Evaluator Updates
|
||||||
|
|
||||||
|
alt Next Pipeline Build
|
||||||
|
CI->>CORE: Trigger Scan (POST /scan)
|
||||||
|
CORE->>RUN: Enqueue & Scan
|
||||||
|
RUN-->>CORE: Raw Findings
|
||||||
|
CORE->>REDIS: Apply Mute Policies
|
||||||
|
REDIS-->>CORE: Filtered Verdict (Passes)
|
||||||
|
CORE-->>CI: Success Exit Code
|
||||||
|
end
|
||||||
|
```
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
participant CRON as SbomNightly.Schedule
|
||||||
|
participant CORE as Stella Core
|
||||||
|
participant REDIS as Redis Queue
|
||||||
|
participant RUN as Scanner Runner
|
||||||
|
participant UI as Dashboard
|
||||||
|
|
||||||
|
CRON->>CORE: Re-queue SBOMs (Mask-Filter)
|
||||||
|
CORE->>REDIS: Enqueue Filtered Jobs
|
||||||
|
REDIS->>RUN: Fan Out to Runners
|
||||||
|
RUN-->>CORE: New Scan Results
|
||||||
|
CORE->>UI: Highlight New Criticals
|
||||||
|
Note over CORE,UI: Focus on Changes Since Last Scan
|
||||||
|
```
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8 · UI Fast Facts
|
||||||
|
|
||||||
|
* **Stack** – Angular 17 + Vite dev server; Tailwind CSS.
|
||||||
|
* **State** – Signals + RxJS for live scan progress.
|
||||||
|
* **i18n / l10n** – JSON bundles served from `/locales/{lang}.json`.
|
||||||
|
* **Module Structure** – Lazy‑loaded feature modules (`dashboard`, `scans`, `settings`); runtime route injection by UI plug‑ins (road‑map Q2‑2026).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 9 · Cross‑Cutting Concerns
|
||||||
|
|
||||||
|
* **Security** – containers run non‑root, `CAP_DROP:ALL`, read‑only FS, hardened seccomp profiles.
|
||||||
|
* **Observability** – Serilog JSON, OpenTelemetry OTLP exporter, Prometheus `/metrics`.
|
||||||
|
* **Upgrade Policy** – `/api/v1` endpoints & CLI flags stable across a minor; breaking changes bump major.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 10 · Performance & Scalability
|
||||||
|
|
||||||
|
| Scenario | P95 target | Bottleneck | Mitigation |
|
||||||
|
|-----------------|-----------:|-----------------|-------------------------------------------------|
|
||||||
|
| SBOM‑first | ≤ 5 s | Redis queue | More CPU, increase `ScannerPool.Workers` |
|
||||||
|
| Image‑unpack | ≤ 10 s | Layer unpack | Prefer SBOM path, warm Docker cache |
|
||||||
|
| High concurrency| 40 rps | Runner CPU | Scale Core replicas + side‑car scanner services |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 11 · Future Architectural Anchors
|
||||||
|
|
||||||
|
* **ScanService micro‑split (gRPC)** – isolate heavy runners for large clusters.
|
||||||
|
* **UI route plug‑ins** – dynamic Angular module loader (road‑map Q2‑2026).
|
||||||
|
* **Redis Cluster** – transparently sharded cache once sustained > 100 rps.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 12 · Assumptions & Trade‑offs
|
||||||
|
|
||||||
|
Requires Docker/CRI‑O runtime; .NET 9 available on hosts; Windows containers are out‑of‑scope this cycle.
|
||||||
|
Embedded auth simplifies deployment but may need plug‑ins for enterprise IdPs.
|
||||||
|
Speed is prioritised over exhaustive feature parity with heavyweight commercial scanners.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 13 · References & Further Reading
|
||||||
|
|
||||||
|
* **C4 Model** – <https://c4model.com>
|
||||||
|
* **.NET Architecture Guides** – <https://learn.microsoft.com/dotnet/architecture>
|
||||||
|
* **OSS Examples** – Kubernetes Architecture docs, Prometheus design papers, Backstage.
|
||||||
|
|
||||||
|
*(End of High‑Level Architecture v2.2)*
|
||||||
208
docs/08_MODULE_SPECIFICATIONS.md
Executable file
208
docs/08_MODULE_SPECIFICATIONS.md
Executable file
@@ -0,0 +1,208 @@
|
|||||||
|
# 8 · Detailed Module Specifications — **Stella Ops Feedser**
|
||||||
|
_This document describes the Feedser service, its supporting libraries, connectors, exporters, and test assets that live in the OSS repository._
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 Scope
|
||||||
|
|
||||||
|
Feedser is the vulnerability ingest/merge/export subsystem of Stella Ops. It
|
||||||
|
fetches primary advisories, normalizes and deduplicates them into MongoDB, and
|
||||||
|
produces deterministic JSON and Trivy DB exports. This document lists the
|
||||||
|
projects that make up that workflow, the extension points they expose, and the
|
||||||
|
artefacts they ship.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 Repository layout (current)
|
||||||
|
|
||||||
|
```text
|
||||||
|
src/
|
||||||
|
├─ Directory.Build.props / Directory.Build.targets
|
||||||
|
├─ StellaOps.Plugin/
|
||||||
|
├─ StellaOps.Feedser.Core/
|
||||||
|
├─ StellaOps.Feedser.Core.Tests/
|
||||||
|
├─ StellaOps.Feedser.Models/ (+ .Tests/)
|
||||||
|
├─ StellaOps.Feedser.Normalization/ (+ .Tests/)
|
||||||
|
├─ StellaOps.Feedser.Merge/ (+ .Tests/)
|
||||||
|
├─ StellaOps.Feedser.Storage.Mongo/ (+ .Tests/)
|
||||||
|
├─ StellaOps.Feedser.Exporter.Json/ (+ .Tests/)
|
||||||
|
├─ StellaOps.Feedser.Exporter.TrivyDb/ (+ .Tests/)
|
||||||
|
├─ StellaOps.Feedser.Source.* / StellaOps.Feedser.Source.*.Tests/
|
||||||
|
├─ StellaOps.Feedser.Testing/
|
||||||
|
├─ StellaOps.Feedser.Tests.Shared/
|
||||||
|
├─ StellaOps.Feedser.WebService/ (+ .Tests/)
|
||||||
|
├─ PluginBinaries/
|
||||||
|
└─ StellaOps.Feedser.sln
|
||||||
|
```
|
||||||
|
|
||||||
|
Each folder is a .NET project (or set of projects) referenced by
|
||||||
|
`StellaOps.Feedser.sln`. Build assets are shared through the root
|
||||||
|
`Directory.Build.props/targets` so conventions stay consistent.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 Shared libraries
|
||||||
|
|
||||||
|
| Project | Purpose | Key extension points |
|
||||||
|
|---------|---------|----------------------|
|
||||||
|
| `StellaOps.Plugin` | Base contracts for connectors, exporters, and DI routines plus Cosign validation helpers. | `IFeedConnector`, `IExporterPlugin`, `IDependencyInjectionRoutine` |
|
||||||
|
| `StellaOps.DependencyInjection` | Composable service registrations for Feedser and plug-ins. | `IDependencyInjectionRoutine` discovery |
|
||||||
|
| `StellaOps.Feedser.Testing` | Common fixtures, builders, and harnesses for integration/unit tests. | `FeedserMongoFixture`, test builders |
|
||||||
|
| `StellaOps.Feedser.Tests.Shared` | Shared assembly metadata and fixtures wired in via `Directory.Build.props`. | Test assembly references |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 Core projects
|
||||||
|
|
||||||
|
| Project | Responsibility | Extensibility |
|
||||||
|
|---------|----------------|---------------|
|
||||||
|
| `StellaOps.Feedser.WebService` | ASP.NET Core minimal API hosting Feedser jobs, status endpoints, and scheduler. | DI-based plug-in discovery; configuration binding |
|
||||||
|
| `StellaOps.Feedser.Core` | Job orchestration, connector pipelines, merge workflows, export coordination. | `IFeedConnector`, `IExportJob`, deterministic merge policies |
|
||||||
|
| `StellaOps.Feedser.Models` | Canonical advisory DTOs and enums persisted in MongoDB and exported artefacts. | Partial classes for source-specific metadata |
|
||||||
|
| `StellaOps.Feedser.Normalization` | Version comparison, CVSS normalization, text utilities for canonicalization. | Helpers consumed by connectors/merge |
|
||||||
|
| `StellaOps.Feedser.Merge` | Precedence evaluation, alias graph maintenance, merge-event hashing. | Policy extensions via DI |
|
||||||
|
| `StellaOps.Feedser.Storage.Mongo` | Repository layer for documents, DTOs, advisories, merge events, export state. | Connection string/config via options |
|
||||||
|
| `StellaOps.Feedser.Exporter.Json` | Deterministic vuln-list JSON export pipeline. | Dependency injection for storage + plugin to host |
|
||||||
|
| `StellaOps.Feedser.Exporter.TrivyDb` | Builds Trivy DB artefacts from canonical advisories. | Optional ORAS push routines |
|
||||||
|
|
||||||
|
### 3.1 StellaOps.Feedser.WebService
|
||||||
|
|
||||||
|
* Hosts minimal API endpoints (`/health`, `/status`, `/jobs`).
|
||||||
|
* Runs the scheduler that triggers connectors and exporters according to
|
||||||
|
configured windows.
|
||||||
|
* Applies dependency-injection routines from `PluginBinaries/` at startup only
|
||||||
|
(restart-time plug-ins).
|
||||||
|
|
||||||
|
### 3.2 StellaOps.Feedser.Core
|
||||||
|
|
||||||
|
* Defines job primitives (fetch, parse, map, merge, export) used by connectors.
|
||||||
|
* Coordinates deterministic merge flows and writes `merge_event` documents.
|
||||||
|
* Provides telemetry/log scopes consumed by WebService and exporters.
|
||||||
|
|
||||||
|
### 3.3 StellaOps.Feedser.Storage.Mongo
|
||||||
|
|
||||||
|
* Persists raw documents, DTO records, canonical advisories, aliases, affected
|
||||||
|
packages, references, merge events, export state, and job leases.
|
||||||
|
* Exposes repository helpers for exporters to stream full/delta snapshots.
|
||||||
|
|
||||||
|
### 3.4 StellaOps.Feedser.Exporter.*
|
||||||
|
|
||||||
|
* `Exporter.Json` mirrors the Aqua vuln-list tree with canonical ordering.
|
||||||
|
* `Exporter.TrivyDb` builds Trivy DB Bolt archives and optional OCI bundles.
|
||||||
|
* Both exporters honour deterministic hashing and respect export cursors.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 Source connectors
|
||||||
|
|
||||||
|
Connectors live under `StellaOps.Feedser.Source.*` and conform to the interfaces
|
||||||
|
in `StellaOps.Plugin`.
|
||||||
|
|
||||||
|
| Family | Project(s) | Notes |
|
||||||
|
|--------|------------|-------|
|
||||||
|
| Distro PSIRTs | `StellaOps.Feedser.Source.Distro.*` | Debian, Red Hat, SUSE, Ubuntu connectors with NEVRA/EVR helpers. |
|
||||||
|
| Vendor PSIRTs | `StellaOps.Feedser.Source.Vndr.*` | Adobe, Apple, Cisco, Chromium, Microsoft, Oracle, VMware. |
|
||||||
|
| Regional CERTs | `StellaOps.Feedser.Source.Cert*`, `Source.Ru.*`, `Source.Ics.*`, `Source.Kisa` | Provide enrichment metadata while preserving vendor precedence. |
|
||||||
|
| OSS ecosystems | `StellaOps.Feedser.Source.Ghsa`, `Source.Osv`, `Source.Cve`, `Source.Kev`, `Source.Acsc`, `Source.Cccs`, `Source.Jvn` | Emit SemVer/alias-rich advisories. |
|
||||||
|
|
||||||
|
Each connector ships fixtures/tests under the matching `*.Tests` project.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 · Module Details
|
||||||
|
|
||||||
|
> _Focus on the Feedser-specific services that replace the legacy FeedMerge cron._
|
||||||
|
|
||||||
|
### 5.1 Feedser.Core
|
||||||
|
|
||||||
|
* Owns the fetch → parse → merge → export job pipeline and enforces deterministic
|
||||||
|
merge hashes (`merge_event`).
|
||||||
|
* Provides `JobSchedulerBuilder`, job coordinator, and telemetry scopes consumed
|
||||||
|
by the WebService and exporters.
|
||||||
|
|
||||||
|
### 5.2 Feedser.Storage.Mongo
|
||||||
|
|
||||||
|
* Bootstrapper creates collections/indexes (documents, dto, advisory, alias,
|
||||||
|
affected, merge_event, export_state, jobs, locks).
|
||||||
|
* Repository APIs surface full/delta advisory reads for exporters, plus
|
||||||
|
SourceState and job lease persistence.
|
||||||
|
|
||||||
|
### 5.3 Feedser.Exporter.Json / Feedser.Exporter.TrivyDb
|
||||||
|
|
||||||
|
* JSON exporter mirrors vuln-list layout with per-file digests and manifest.
|
||||||
|
* Trivy DB exporter shells or native-builds Bolt archives, optionally pushes OCI
|
||||||
|
layers, and records export cursors. Delta runs reuse unchanged blobs from the
|
||||||
|
previous full baseline, annotating `metadata.json` with `mode`, `baseExportId`,
|
||||||
|
`baseManifestDigest`, `resetBaseline`, and `delta.changedFiles[]`/`delta.removedPaths[]`.
|
||||||
|
ORAS pushes honour `publishFull` / `publishDelta`, and offline bundles respect
|
||||||
|
`includeFull` / `includeDelta` for air-gapped syncs.
|
||||||
|
|
||||||
|
### 5.4 Feedser.WebService
|
||||||
|
|
||||||
|
* Minimal API host exposing `/health`, `/ready`, `/jobs` and wiring telemetry.
|
||||||
|
* Loads restart-time plug-ins from `PluginBinaries/`, executes Mongo bootstrap,
|
||||||
|
and registers built-in connectors/exporters with the scheduler.
|
||||||
|
|
||||||
|
### 5.5 Plugin host & DI bridge
|
||||||
|
|
||||||
|
* `StellaOps.Plugin` + `StellaOps.DependencyInjection` provide the contracts and
|
||||||
|
helper routines for connectors/exporters to integrate with the WebService.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 · Plug-ins & Agents
|
||||||
|
|
||||||
|
* **Plug-in discovery** – restart-only; the WebService enumerates
|
||||||
|
`PluginBinaries/` (or configured directories) and executes the contained
|
||||||
|
`IDependencyInjectionRoutine` implementations.
|
||||||
|
* **Connector/exporter packages** – each source/exporter can ship as a plug-in
|
||||||
|
assembly with its own options and HttpClient configuration, keeping the core
|
||||||
|
image minimal.
|
||||||
|
* **StellaOps CLI (agent)** – new `StellaOps.Cli` module that exposes
|
||||||
|
`scanner`, `scan`, and `db` verbs (via System.CommandLine 2.0) to download
|
||||||
|
scanner container bundles, install them locally, execute scans against target
|
||||||
|
directories, automatically upload results, and trigger Feedser jobs (`db
|
||||||
|
fetch/merge/export`) aligned with the SBOM-first workflow described in
|
||||||
|
`AGENTS.md`.
|
||||||
|
* **Offline Kit** – bundles Feedser plug-ins, JSON tree, Trivy DB, and export
|
||||||
|
manifests so air-gapped sites can load the latest vulnerability data without
|
||||||
|
outbound connectivity.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7 · Docker & Distribution Artefacts
|
||||||
|
|
||||||
|
| Artefact | Path / Identifier | Notes |
|
||||||
|
|----------|-------------------|-------|
|
||||||
|
| Feedser WebService image | `containers/feedser/Dockerfile` (built via CI) | Self-contained ASP.NET runtime hosting scheduler/endpoints. |
|
||||||
|
| Plugin bundle | `PluginBinaries/` | Mounted or baked-in assemblies for connectors/exporters. |
|
||||||
|
| Offline Kit tarball | Produced by CI release pipeline | Contains JSON tree, Trivy DB OCI layout, export manifest, and plug-ins. |
|
||||||
|
| Local dev compose | `scripts/` + future compose overlays | Developers can run MongoDB, Redis (optional), and WebService locally. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8 · Performance Budget
|
||||||
|
|
||||||
|
| Scenario | Budget | Source |
|
||||||
|
|----------|--------|--------|
|
||||||
|
| Advisory upsert (large advisory) | ≤ 500 ms/advisory | `AdvisoryStorePerformanceTests` (Mongo) |
|
||||||
|
| Advisory fetch (`GetRecent`) | ≤ 200 ms/advisory | Same performance test harness |
|
||||||
|
| Advisory point lookup (`Find`) | ≤ 200 ms/advisory | Same performance test harness |
|
||||||
|
| Bulk upsert/fetch cycle | ≤ 28 s total for 30 large advisories | Same performance test harness |
|
||||||
|
| Feedser job scheduling | Deterministic cron execution via `JobSchedulerHostedService` | `StellaOps.Feedser.Core` tests |
|
||||||
|
| Trivy DB export | Deterministic digests across runs (ongoing TODO for end-to-end test) | `Exporter.TrivyDb` backlog |
|
||||||
|
|
||||||
|
Budgets are enforced in automated tests where available; outstanding TODO/DOING
|
||||||
|
items (see task boards) continue tracking gaps such as exporter determinism.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 9 Testing
|
||||||
|
|
||||||
|
* Unit and integration tests live alongside each component (`*.Tests`).
|
||||||
|
* Shared fixtures come from `StellaOps.Feedser.Testing` and
|
||||||
|
`StellaOps.Feedser.Tests.Shared` (linked via `Directory.Build.props`).
|
||||||
|
* Integration suites use ephemeral MongoDB and Redis via Testcontainers to
|
||||||
|
validate end-to-end flow without external dependencies.
|
||||||
|
|
||||||
|
---
|
||||||
461
docs/09_API_CLI_REFERENCE.md
Executable file
461
docs/09_API_CLI_REFERENCE.md
Executable file
@@ -0,0 +1,461 @@
|
|||||||
|
# API & CLI Reference
|
||||||
|
|
||||||
|
*Purpose* – give operators and integrators a single, authoritative spec for REST/GRPC calls **and** first‑party CLI tools (`stella-cli`, `zastava`, `stella`).
|
||||||
|
Everything here is *source‑of‑truth* for generated Swagger/OpenAPI and the `--help` screens in the CLIs.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 Quick Glance
|
||||||
|
|
||||||
|
| Area | Call / Flag | Notes |
|
||||||
|
| ------------------ | ------------------------------------------- | ------------------------------------------------------------------------------ |
|
||||||
|
| Scan entry | `POST /scan` | Accepts SBOM or image; sub‑5 s target |
|
||||||
|
| Delta check | `POST /layers/missing` | <20 ms reply; powers *delta SBOM* feature |
|
||||||
|
| Rate‑limit / quota | — | Headers **`X‑Stella‑Quota‑Remaining`**, **`X‑Stella‑Reset`** on every response |
|
||||||
|
| Policy I/O | `GET /policy/export`, `POST /policy/import` | YAML now; Rego coming |
|
||||||
|
| Policy lint | `POST /policy/validate` | Returns 200 OK if ruleset passes |
|
||||||
|
| Auth | `POST /connect/token` (OpenIddict) | Client‑credentials preferred |
|
||||||
|
| Health | `GET /healthz` | Simple liveness probe |
|
||||||
|
| Attestation * | `POST /attest` (TODO Q1‑2026) | SLSA provenance + Rekor log |
|
||||||
|
| CLI flags | `--sbom-type` `--delta` `--policy-file` | Added to `stella` |
|
||||||
|
|
||||||
|
\* Marked **TODO** → delivered after sixth month (kept on Feature Matrix “To Do” list).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 Authentication
|
||||||
|
|
||||||
|
Stella Ops uses **OAuth 2.0 / OIDC** (token endpoint mounted via OpenIddict).
|
||||||
|
|
||||||
|
```
|
||||||
|
POST /connect/token
|
||||||
|
Content‑Type: application/x-www-form-urlencoded
|
||||||
|
|
||||||
|
grant_type=client_credentials&
|
||||||
|
client_id=ci‑bot&
|
||||||
|
client_secret=REDACTED&
|
||||||
|
scope=stella.api
|
||||||
|
```
|
||||||
|
|
||||||
|
Successful response:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"access_token": "eyJraWQi...",
|
||||||
|
"token_type": "Bearer",
|
||||||
|
"expires_in": 3600
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
> **Tip** – pass the token via `Authorization: Bearer <token>` on every call.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 REST API
|
||||||
|
|
||||||
|
### 2.0 Obtain / Refresh Offline‑Token
|
||||||
|
|
||||||
|
```text
|
||||||
|
POST /token/offline
|
||||||
|
Authorization: Bearer <admin‑token>
|
||||||
|
```
|
||||||
|
|
||||||
|
| Body field | Required | Example | Notes |
|
||||||
|
|------------|----------|---------|-------|
|
||||||
|
| `expiresDays` | no | `30` | Max 90 days |
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"jwt": "eyJhbGciOiJSUzI1NiIsInR5cCI6...",
|
||||||
|
"expires": "2025‑08‑17T00:00:00Z"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Token is signed with the backend’s private key and already contains
|
||||||
|
`"maxScansPerDay": {{ quota_token }}`.
|
||||||
|
|
||||||
|
|
||||||
|
### 2.1 Scan – Upload SBOM **or** Image
|
||||||
|
|
||||||
|
```
|
||||||
|
POST /scan
|
||||||
|
```
|
||||||
|
|
||||||
|
| Param / Header | In | Required | Description |
|
||||||
|
| -------------------- | ------ | -------- | --------------------------------------------------------------------- |
|
||||||
|
| `X‑Stella‑Sbom‑Type` | header | no | `trivy-json-v2`, `spdx-json`, `cyclonedx-json`; omitted ➞ auto‑detect |
|
||||||
|
| `?threshold` | query | no | `low`, `medium`, `high`, `critical`; default **critical** |
|
||||||
|
| body | body | yes | *Either* SBOM JSON *or* Docker image tarball/upload URL |
|
||||||
|
|
||||||
|
Every successful `/scan` response now includes:
|
||||||
|
|
||||||
|
| Header | Example |
|
||||||
|
|--------|---------|
|
||||||
|
| `X‑Stella‑Quota‑Remaining` | `129` |
|
||||||
|
| `X‑Stella‑Reset` | `2025‑07‑18T23:59:59Z` |
|
||||||
|
| `X‑Stella‑Token‑Expires` | `2025‑08‑17T00:00:00Z` |
|
||||||
|
|
||||||
|
**Response 200** (scan completed):
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"digest": "sha256:…",
|
||||||
|
"summary": {
|
||||||
|
"Critical": 0,
|
||||||
|
"High": 3,
|
||||||
|
"Medium": 12,
|
||||||
|
"Low": 41
|
||||||
|
},
|
||||||
|
"policyStatus": "pass",
|
||||||
|
"quota": {
|
||||||
|
"remaining": 131,
|
||||||
|
"reset": "2025-07-18T00:00:00Z"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response 202** – queued; polling URL in `Location` header.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2.2 Delta SBOM – Layer Cache Check
|
||||||
|
|
||||||
|
```
|
||||||
|
POST /layers/missing
|
||||||
|
Content‑Type: application/json
|
||||||
|
Authorization: Bearer <token>
|
||||||
|
```
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"layers": [
|
||||||
|
"sha256:d38b...",
|
||||||
|
"sha256:af45..."
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response 200** — <20 ms target:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"missing": [
|
||||||
|
"sha256:af45..."
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Client then generates SBOM **only** for the `missing` layers and re‑posts `/scan`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2.3 Policy Endpoints
|
||||||
|
|
||||||
|
| Method | Path | Purpose |
|
||||||
|
| ------ | ------------------ | ------------------------------------ |
|
||||||
|
| `GET` | `/policy/export` | Download live YAML ruleset |
|
||||||
|
| `POST` | `/policy/import` | Upload YAML or Rego; replaces active |
|
||||||
|
| `POST` | `/policy/validate` | Lint only; returns 400 on error |
|
||||||
|
| `GET` | `/policy/history` | Paginated change log (audit trail) |
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Example import payload (YAML)
|
||||||
|
version: "1.0"
|
||||||
|
rules:
|
||||||
|
- name: Ignore Low dev
|
||||||
|
severity: [Low, None]
|
||||||
|
environments: [dev, staging]
|
||||||
|
action: ignore
|
||||||
|
```
|
||||||
|
|
||||||
|
Validation errors come back as:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"errors": [
|
||||||
|
{
|
||||||
|
"path": "$.rules[0].severity",
|
||||||
|
"msg": "Invalid level 'None'"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2.4 Attestation (Planned – Q1‑2026)
|
||||||
|
|
||||||
|
```
|
||||||
|
POST /attest
|
||||||
|
```
|
||||||
|
|
||||||
|
| Param | Purpose |
|
||||||
|
| ----------- | ------------------------------------- |
|
||||||
|
| body (JSON) | SLSA v1.0 provenance doc |
|
||||||
|
| | Signed + stored in local Rekor mirror |
|
||||||
|
|
||||||
|
Returns `202 Accepted` and `Location: /attest/{id}` for async verify.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 StellaOps CLI (`stellaops-cli`)
|
||||||
|
|
||||||
|
The new CLI is built on **System.CommandLine 2.0.0‑beta5** and mirrors the Feedser backend REST API.
|
||||||
|
Configuration follows the same precedence chain everywhere:
|
||||||
|
|
||||||
|
1. Environment variables (e.g. `API_KEY`, `STELLAOPS_BACKEND_URL`, `StellaOps:ApiKey`)
|
||||||
|
2. `appsettings.json` → `appsettings.local.json`
|
||||||
|
3. `appsettings.yaml` → `appsettings.local.yaml`
|
||||||
|
4. Defaults (`ApiKey = ""`, `BackendUrl = ""`, cache folders under the current working directory)
|
||||||
|
|
||||||
|
**Authority auth client resilience settings**
|
||||||
|
|
||||||
|
| Setting | Environment variable | Default | Purpose |
|
||||||
|
|---------|----------------------|---------|---------|
|
||||||
|
| `StellaOps:Authority:Resilience:EnableRetries` | `STELLAOPS_AUTHORITY_ENABLE_RETRIES` | `true` | Toggle Polly wait-and-retry handlers for discovery/token calls |
|
||||||
|
| `StellaOps:Authority:Resilience:RetryDelays` | `STELLAOPS_AUTHORITY_RETRY_DELAYS` | `1s,2s,5s` | Comma/space-separated backoff sequence (HH:MM:SS) |
|
||||||
|
| `StellaOps:Authority:Resilience:AllowOfflineCacheFallback` | `STELLAOPS_AUTHORITY_ALLOW_OFFLINE_CACHE_FALLBACK` | `true` | Reuse cached discovery/JWKS metadata when Authority is temporarily unreachable |
|
||||||
|
| `StellaOps:Authority:Resilience:OfflineCacheTolerance` | `STELLAOPS_AUTHORITY_OFFLINE_CACHE_TOLERANCE` | `00:10:00` | Additional tolerance window added to the discovery/JWKS cache lifetime |
|
||||||
|
|
||||||
|
See `docs/dev/32_AUTH_CLIENT_GUIDE.md` for recommended profiles (online vs. air-gapped) and testing guidance.
|
||||||
|
|
||||||
|
| Command | Purpose | Key Flags / Arguments | Notes |
|
||||||
|
|---------|---------|-----------------------|-------|
|
||||||
|
| `stellaops-cli scanner download` | Fetch and install scanner container | `--channel <stable\|beta\|nightly>` (default `stable`)<br>`--output <path>`<br>`--overwrite`<br>`--no-install` | Saves artefact under `ScannerCacheDirectory`, verifies digest/signature, and executes `docker load` unless `--no-install` is supplied. |
|
||||||
|
| `stellaops-cli scan run` | Execute scanner container against a directory (auto-upload) | `--target <directory>` (required)<br>`--runner <docker\|dotnet\|self>` (default from config)<br>`--entry <image-or-entrypoint>`<br>`[scanner-args...]` | Runs the scanner, writes results into `ResultsDirectory`, emits a structured `scan-run-*.json` metadata file, and automatically uploads the artefact when the exit code is `0`. |
|
||||||
|
| `stellaops-cli scan upload` | Re-upload existing scan artefact | `--file <path>` | Useful for retries when automatic upload fails or when operating offline. |
|
||||||
|
| `stellaops-cli db fetch` | Trigger connector jobs | `--source <id>` (e.g. `redhat`, `osv`)<br>`--stage <fetch\|parse\|map>` (default `fetch`)<br>`--mode <resume|init|cursor>` | Translates to `POST /jobs/source:{source}:{stage}` with `trigger=cli` |
|
||||||
|
| `stellaops-cli db merge` | Run canonical merge reconcile | — | Calls `POST /jobs/merge:reconcile`; exit code `0` on acceptance, `1` on failures/conflicts |
|
||||||
|
| `stellaops-cli db export` | Kick JSON / Trivy exports | `--format <json\|trivy-db>` (default `json`)<br>`--delta`<br>`--publish-full/--publish-delta`<br>`--bundle-full/--bundle-delta` | Sets `{ delta = true }` parameter when requested and can override ORAS/bundle toggles per run |
|
||||||
|
| `stellaops-cli auth <login\|logout\|status\|whoami>` | Manage cached tokens for StellaOps Authority | `auth login --force` (ignore cache)<br>`auth status`<br>`auth whoami` | Uses `StellaOps.Auth.Client`; honours `StellaOps:Authority:*` configuration, stores tokens under `~/.stellaops/tokens` by default, and `whoami` prints subject/scope/expiry |
|
||||||
|
|
||||||
|
When running on an interactive terminal without explicit override flags, the CLI uses Spectre.Console prompts to let you choose per-run ORAS/offline bundle behaviour.
|
||||||
|
| `stellaops-cli config show` | Display resolved configuration | — | Masks secret values; helpful for air‑gapped installs |
|
||||||
|
|
||||||
|
**Logging & exit codes**
|
||||||
|
|
||||||
|
- Structured logging via `Microsoft.Extensions.Logging` with single-line console output (timestamps in UTC).
|
||||||
|
- `--verbose / -v` raises log level to `Debug`.
|
||||||
|
- Command exit codes bubble up: backend conflict → `1`, cancelled via `CTRL+C` → `130`, scanner exit codes propagate as-is.
|
||||||
|
|
||||||
|
**Artifact validation**
|
||||||
|
|
||||||
|
- Downloads are verified against the `X-StellaOps-Digest` header (SHA-256). When `StellaOps:ScannerSignaturePublicKeyPath` points to a PEM-encoded RSA key, the optional `X-StellaOps-Signature` header is validated as well.
|
||||||
|
- Metadata for each bundle is written alongside the artefact (`*.metadata.json`) with digest, signature, source URL, and timestamps.
|
||||||
|
- Retry behaviour is controlled via `StellaOps:ScannerDownloadAttempts` (default **3** with exponential backoff).
|
||||||
|
- Successful `scan run` executions create timestamped JSON artefacts inside `ResultsDirectory` plus a `scan-run-*.json` metadata envelope documenting the runner, arguments, timing, and stdout/stderr. The artefact is posted back to Feedser automatically.
|
||||||
|
|
||||||
|
#### Trivy DB export metadata (`metadata.json`)
|
||||||
|
|
||||||
|
`stellaops-cli db export --format trivy-db` (and the backing `POST /jobs/export:trivy-db`) always emits a `metadata.json` document in the OCI layout root. Operators consuming the bundle or delta updates should inspect the following fields:
|
||||||
|
|
||||||
|
| Field | Type | Purpose |
|
||||||
|
| ----- | ---- | ------- |
|
||||||
|
| `mode` | `full` \| `delta` | Indicates whether the current run rebuilt the entire database (`full`) or only the changed files (`delta`). |
|
||||||
|
| `baseExportId` | string? | Export ID of the last full baseline that the delta builds upon. Only present for `mode = delta`. |
|
||||||
|
| `baseManifestDigest` | string? | SHA-256 digest of the manifest belonging to the baseline OCI layout. |
|
||||||
|
| `resetBaseline` | boolean | `true` when the exporter rotated the baseline (e.g., repo change, delta chain reset). Treat as a full refresh. |
|
||||||
|
| `treeDigest` | string | Canonical SHA-256 digest of the JSON tree used to build the database. |
|
||||||
|
| `treeBytes` | number | Total bytes across exported JSON files. |
|
||||||
|
| `advisoryCount` | number | Count of advisories included in the export. |
|
||||||
|
| `exporterVersion` | string | Version stamp of `StellaOps.Feedser.Exporter.TrivyDb`. |
|
||||||
|
| `builder` | object? | Raw metadata emitted by `trivy-db build` (version, update cadence, etc.). |
|
||||||
|
| `delta.changedFiles[]` | array | Present when `mode = delta`. Each entry lists `{ "path": "<relative json>", "length": <bytes>, "digest": "sha256:..." }`. |
|
||||||
|
| `delta.removedPaths[]` | array | Paths that existed in the previous manifest but were removed in the new run. |
|
||||||
|
|
||||||
|
When the planner opts for a delta run, the exporter copies unmodified blobs from the baseline layout identified by `baseManifestDigest`. Consumers that cache OCI blobs only need to fetch the `changedFiles` and the new manifest/metadata unless `resetBaseline` is true.
|
||||||
|
When pushing to ORAS, set `feedser:exporters:trivyDb:oras:publishFull` / `publishDelta` to control whether full or delta runs are copied to the registry. Offline bundles follow the analogous `includeFull` / `includeDelta` switches under `offlineBundle`.
|
||||||
|
|
||||||
|
Example configuration (`appsettings.yaml`):
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
feedser:
|
||||||
|
exporters:
|
||||||
|
trivyDb:
|
||||||
|
oras:
|
||||||
|
enabled: true
|
||||||
|
publishFull: true
|
||||||
|
publishDelta: false
|
||||||
|
offlineBundle:
|
||||||
|
enabled: true
|
||||||
|
includeFull: true
|
||||||
|
includeDelta: false
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
**Authentication**
|
||||||
|
|
||||||
|
- API key is sent as `Authorization: Bearer <token>` automatically when configured.
|
||||||
|
- Anonymous operation is permitted only when Feedser runs with
|
||||||
|
`authority.allowAnonymousFallback: true`. This flag is temporary—plan to disable
|
||||||
|
it before **2025-12-31 UTC** so bearer tokens become mandatory.
|
||||||
|
|
||||||
|
Authority-backed auth workflow:
|
||||||
|
1. Configure Authority settings via config or env vars (see sample below). Minimum fields: `Url`, `ClientId`, and either `ClientSecret` (client credentials) or `Username`/`Password` (password grant).
|
||||||
|
2. Run `stellaops-cli auth login` to acquire and cache a token. Use `--force` if you need to ignore an existing cache entry.
|
||||||
|
3. Execute CLI commands as normal—the backend client injects the cached bearer token automatically and retries on transient 401/403 responses with operator guidance.
|
||||||
|
4. Inspect the cache with `stellaops-cli auth status` (shows expiry, scope, mode) or clear it via `stellaops-cli auth logout`.
|
||||||
|
5. Run `stellaops-cli auth whoami` to dump token subject, audience, issuer, scopes, and remaining lifetime (verbose mode prints additional claims).
|
||||||
|
6. Expect Feedser to emit audit logs for each `/jobs*` request showing `subject`,
|
||||||
|
`clientId`, `scopes`, `status`, and whether network bypass rules were applied.
|
||||||
|
|
||||||
|
Tokens live in `~/.stellaops/tokens` unless `StellaOps:Authority:TokenCacheDirectory` overrides it. Cached tokens are reused offline until they expire; the CLI surfaces clear errors if refresh fails.
|
||||||
|
|
||||||
|
**Configuration file template**
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"StellaOps": {
|
||||||
|
"ApiKey": "your-api-token",
|
||||||
|
"BackendUrl": "https://feedser.example.org",
|
||||||
|
"ScannerCacheDirectory": "scanners",
|
||||||
|
"ResultsDirectory": "results",
|
||||||
|
"DefaultRunner": "docker",
|
||||||
|
"ScannerSignaturePublicKeyPath": "",
|
||||||
|
"ScannerDownloadAttempts": 3,
|
||||||
|
"Authority": {
|
||||||
|
"Url": "https://authority.example.org",
|
||||||
|
"ClientId": "feedser-cli",
|
||||||
|
"ClientSecret": "REDACTED",
|
||||||
|
"Username": "",
|
||||||
|
"Password": "",
|
||||||
|
"Scope": "feedser.jobs.trigger",
|
||||||
|
"TokenCacheDirectory": ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Drop `appsettings.local.json` or `.yaml` beside the binary to override per environment.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2.5 Misc Endpoints
|
||||||
|
|
||||||
|
| Path | Method | Description |
|
||||||
|
| ---------- | ------ | ---------------------------- |
|
||||||
|
| `/healthz` | GET | Liveness; returns `"ok"` |
|
||||||
|
| `/metrics` | GET | Prometheus exposition (OTel) |
|
||||||
|
| `/version` | GET | Git SHA + build date |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 First‑Party CLI Tools
|
||||||
|
|
||||||
|
### 3.1 `stella`
|
||||||
|
|
||||||
|
> *Package SBOM + Scan + Exit code* – designed for CI.
|
||||||
|
|
||||||
|
```
|
||||||
|
Usage: stella [OPTIONS] IMAGE_OR_SBOM
|
||||||
|
```
|
||||||
|
|
||||||
|
| Flag / Option | Default | Description |
|
||||||
|
| --------------- | ----------------------- | -------------------------------------------------- |
|
||||||
|
| `--server` | `http://localhost:8080` | API root |
|
||||||
|
| `--token` | *env `STELLA_TOKEN`* | Bearer token |
|
||||||
|
| `--sbom-type` | *auto* | Force `trivy-json-v2`/`spdx-json`/`cyclonedx-json` |
|
||||||
|
| `--delta` | `false` | Enable delta layer optimisation |
|
||||||
|
| `--policy-file` | *none* | Override server rules with local YAML/Rego |
|
||||||
|
| `--threshold` | `critical` | Fail build if ≥ level found |
|
||||||
|
| `--output-json` | *none* | Write raw scan result to file |
|
||||||
|
| `--wait-quota` | `true` | If 429 received, automatically wait `Retry‑After` and retry once. |
|
||||||
|
|
||||||
|
**Exit codes**
|
||||||
|
|
||||||
|
| Code | Meaning |
|
||||||
|
| ---- | ------------------------------------------- |
|
||||||
|
| 0 | Scan OK, policy passed |
|
||||||
|
| 1 | Vulnerabilities ≥ threshold OR policy block |
|
||||||
|
| 2 | Internal error (network etc.) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3.2 `stella‑zastava`
|
||||||
|
|
||||||
|
> *Daemon / K8s DaemonSet* – watch container runtime, push SBOMs.
|
||||||
|
|
||||||
|
Core flags (excerpt):
|
||||||
|
|
||||||
|
| Flag | Purpose |
|
||||||
|
| ---------------- | ---------------------------------- |
|
||||||
|
| `--mode` | `listen` (default) / `enforce` |
|
||||||
|
| `--filter-image` | Regex; ignore infra/busybox images |
|
||||||
|
| `--threads` | Worker pool size |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3.3 `stellopsctl`
|
||||||
|
|
||||||
|
> *Admin utility* – policy snapshots, feed status, user CRUD.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
```
|
||||||
|
stellopsctl policy export > policies/backup-2025-07-14.yaml
|
||||||
|
stellopsctl feed refresh # force OSV merge
|
||||||
|
stellopsctl user add dev-team --role developer
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 Error Model
|
||||||
|
|
||||||
|
Uniform problem‑details object (RFC 7807):
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"type": "https://stella-ops.org/probs/validation",
|
||||||
|
"title": "Invalid request",
|
||||||
|
"status": 400,
|
||||||
|
"detail": "Layer digest malformed",
|
||||||
|
"traceId": "00-7c39..."
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 Rate Limits
|
||||||
|
|
||||||
|
Default **40 requests / second / token**.
|
||||||
|
429 responses include `Retry-After` seconds header.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 FAQ & Tips
|
||||||
|
|
||||||
|
* **Skip SBOM generation in CI** – supply a *pre‑built* SBOM and add `?sbom-only=true` to `/scan` for <1 s path.
|
||||||
|
* **Air‑gapped?** – point `--server` to `http://oukgw:8080` inside the Offline Update Kit.
|
||||||
|
* **YAML vs Rego** – YAML simpler; Rego unlocks time‑based logic (see samples).
|
||||||
|
* **Cosign verify plug‑ins** – enable `SCANNER_VERIFY_SIG=true` env to refuse unsigned plug‑ins.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7 Planned Changes (Beyond 6 Months)
|
||||||
|
|
||||||
|
These stay in *Feature Matrix → To Do* until design is frozen.
|
||||||
|
|
||||||
|
| Epic / Feature | API Impact Sketch |
|
||||||
|
| ---------------------------- | ---------------------------------- |
|
||||||
|
| **SLSA L1‑L3** attestation | `/attest` (see §2.4) |
|
||||||
|
| Rekor transparency log | `/rekor/log/{id}` (GET) |
|
||||||
|
| Plug‑in Marketplace metadata | `/plugins/market` (catalog) |
|
||||||
|
| Horizontal scaling controls | `POST /cluster/node` (add/remove) |
|
||||||
|
| Windows agent support | Update LSAPI to PDE, no API change |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8 References
|
||||||
|
|
||||||
|
* OpenAPI YAML → `/openapi/v1.yaml` (served by backend)
|
||||||
|
* OAuth2 spec: <https://datatracker.ietf.org/doc/html/rfc6749>
|
||||||
|
* SLSA spec: <https://slsa.dev/spec/v1.0>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 9 Changelog (truncated)
|
||||||
|
|
||||||
|
* **2025‑07‑14** – added *delta SBOM*, policy import/export, CLI `--sbom-type`.
|
||||||
|
* **2025‑07‑12** – initial public reference.
|
||||||
|
|
||||||
|
---
|
||||||
289
docs/10_FEEDSER_CLI_QUICKSTART.md
Normal file
289
docs/10_FEEDSER_CLI_QUICKSTART.md
Normal file
@@ -0,0 +1,289 @@
|
|||||||
|
# 10 · Feedser + CLI Quickstart
|
||||||
|
|
||||||
|
This guide walks through configuring the Feedser web service and the `stellaops-cli`
|
||||||
|
tool so an operator can ingest advisories, merge them, and publish exports from a
|
||||||
|
single workstation. It focuses on deployment-facing surfaces only (configuration,
|
||||||
|
runtime wiring, CLI usage) and leaves connector/internal customization for later.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 · Prerequisites
|
||||||
|
|
||||||
|
- .NET SDK **10.0.100-preview** (matches `global.json`)
|
||||||
|
- MongoDB instance reachable from the host (local Docker or managed)
|
||||||
|
- `trivy-db` binary on `PATH` for Trivy exports (and `oras` if publishing to OCI)
|
||||||
|
- Plugin assemblies present in `PluginBinaries/` (already included in the repo)
|
||||||
|
- Optional: Docker/Podman runtime if you plan to run scanners locally
|
||||||
|
|
||||||
|
> **Tip** – air-gapped installs should preload `trivy-db` and `oras` binaries into the
|
||||||
|
> runner image since Feedser never fetches them dynamically.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 · Configure Feedser
|
||||||
|
|
||||||
|
1. Copy the sample config to the expected location (CI/CD pipelines can stamp values
|
||||||
|
into this file during deployment—see the “Deployment automation” note below):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir -p etc
|
||||||
|
cp etc/feedser.yaml.sample etc/feedser.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Edit `etc/feedser.yaml` and update the MongoDB DSN (and optional database name).
|
||||||
|
The default template configures plug-in discovery to look in `PluginBinaries/`
|
||||||
|
and disables remote telemetry exporters by default.
|
||||||
|
|
||||||
|
3. (Optional) Override settings via environment variables. All keys are prefixed with
|
||||||
|
`FEEDSER_`. Example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export FEEDSER_STORAGE__DSN="mongodb://user:pass@mongo:27017/feedser"
|
||||||
|
export FEEDSER_TELEMETRY__ENABLETRACING=false
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Start the web service from the repository root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
dotnet run --project src/StellaOps.Feedser.WebService
|
||||||
|
```
|
||||||
|
|
||||||
|
On startup Feedser validates the options, boots MongoDB indexes, loads plug-ins,
|
||||||
|
and exposes:
|
||||||
|
|
||||||
|
- `GET /health` – returns service status and telemetry settings
|
||||||
|
- `GET /ready` – performs a MongoDB `ping`
|
||||||
|
- `GET /jobs` + `POST /jobs/{kind}` – inspect and trigger connector/export jobs
|
||||||
|
|
||||||
|
> **Security note** – authentication now ships via StellaOps Authority. Keep
|
||||||
|
> `authority.allowAnonymousFallback: true` only during the staged rollout and
|
||||||
|
> disable it before **2025-12-31 UTC** so tokens become mandatory.
|
||||||
|
|
||||||
|
### Authority companion configuration (preview)
|
||||||
|
|
||||||
|
1. Copy the Authority sample configuration:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cp etc/authority.yaml.sample etc/authority.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Update the issuer URL, token lifetimes, and plug-in descriptors to match your
|
||||||
|
environment. Authority expects per-plugin manifests in `etc/authority.plugins/`;
|
||||||
|
sample `standard.yaml` and `ldap.yaml` files are provided as starting points.
|
||||||
|
For air-gapped installs keep the default plug-in binary directory
|
||||||
|
(`../PluginBinaries/Authority`) so packaged plug-ins load without outbound access.
|
||||||
|
|
||||||
|
3. Environment variables prefixed with `STELLAOPS_AUTHORITY_` override individual
|
||||||
|
fields. Example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export STELLAOPS_AUTHORITY__ISSUER="https://authority.stella-ops.local"
|
||||||
|
export STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0="/srv/authority/plugins"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · Configure the CLI
|
||||||
|
|
||||||
|
The CLI reads configuration from JSON/YAML files *and* environment variables. The
|
||||||
|
defaults live in `src/StellaOps.Cli/appsettings.json` and expect overrides at runtime.
|
||||||
|
|
||||||
|
| Setting | Environment variable | Default | Purpose |
|
||||||
|
| ------- | -------------------- | ------- | ------- |
|
||||||
|
| `BackendUrl` | `STELLAOPS_BACKEND_URL` | _empty_ | Base URL of the Feedser web service |
|
||||||
|
| `ApiKey` | `API_KEY` | _empty_ | Reserved for legacy key auth; leave empty when using Authority |
|
||||||
|
| `ScannerCacheDirectory` | `STELLAOPS_SCANNER_CACHE_DIRECTORY` | `scanners` | Local cache folder |
|
||||||
|
| `ResultsDirectory` | `STELLAOPS_RESULTS_DIRECTORY` | `results` | Where scan outputs are written |
|
||||||
|
| `Authority.Url` | `STELLAOPS_AUTHORITY_URL` | _empty_ | StellaOps Authority issuer/token endpoint |
|
||||||
|
| `Authority.ClientId` | `STELLAOPS_AUTHORITY_CLIENT_ID` | _empty_ | Client identifier for the CLI |
|
||||||
|
| `Authority.ClientSecret` | `STELLAOPS_AUTHORITY_CLIENT_SECRET` | _empty_ | Client secret (omit when using username/password grant) |
|
||||||
|
| `Authority.Username` | `STELLAOPS_AUTHORITY_USERNAME` | _empty_ | Username for password grant flows |
|
||||||
|
| `Authority.Password` | `STELLAOPS_AUTHORITY_PASSWORD` | _empty_ | Password for password grant flows |
|
||||||
|
| `Authority.Scope` | `STELLAOPS_AUTHORITY_SCOPE` | `feedser.jobs.trigger` | OAuth scope requested for backend operations |
|
||||||
|
| `Authority.TokenCacheDirectory` | `STELLAOPS_AUTHORITY_TOKEN_CACHE_DIR` | `~/.stellaops/tokens` | Directory that persists cached tokens |
|
||||||
|
| `Authority.Resilience.EnableRetries` | `STELLAOPS_AUTHORITY_ENABLE_RETRIES` | `true` | Toggle Polly retry handler for Authority HTTP calls |
|
||||||
|
| `Authority.Resilience.RetryDelays` | `STELLAOPS_AUTHORITY_RETRY_DELAYS` | `1s,2s,5s` | Comma- or space-separated backoff delays (hh:mm:ss) |
|
||||||
|
| `Authority.Resilience.AllowOfflineCacheFallback` | `STELLAOPS_AUTHORITY_ALLOW_OFFLINE_CACHE_FALLBACK` | `true` | Allow CLI to reuse cached discovery/JWKS metadata when Authority is offline |
|
||||||
|
| `Authority.Resilience.OfflineCacheTolerance` | `STELLAOPS_AUTHORITY_OFFLINE_CACHE_TOLERANCE` | `00:10:00` | Additional tolerance window applied to cached metadata |
|
||||||
|
|
||||||
|
Example bootstrap:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export STELLAOPS_BACKEND_URL="http://localhost:5000"
|
||||||
|
export STELLAOPS_RESULTS_DIRECTORY="$HOME/.stellaops/results"
|
||||||
|
export STELLAOPS_AUTHORITY_URL="https://authority.local"
|
||||||
|
export STELLAOPS_AUTHORITY_CLIENT_ID="feedser-cli"
|
||||||
|
export STELLAOPS_AUTHORITY_CLIENT_SECRET="s3cr3t"
|
||||||
|
dotnet run --project src/StellaOps.Cli -- db merge
|
||||||
|
|
||||||
|
# Acquire a bearer token and confirm cache state
|
||||||
|
dotnet run --project src/StellaOps.Cli -- auth login
|
||||||
|
dotnet run --project src/StellaOps.Cli -- auth status
|
||||||
|
dotnet run --project src/StellaOps.Cli -- auth whoami
|
||||||
|
```
|
||||||
|
|
||||||
|
Refer to `docs/dev/32_AUTH_CLIENT_GUIDE.md` for deeper guidance on tuning retry/offline settings and rollout checklists.
|
||||||
|
|
||||||
|
To persist configuration, you can create `stellaops-cli.yaml` next to the binary or
|
||||||
|
rely on environment variables for ephemeral runners.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · Operating Workflow
|
||||||
|
|
||||||
|
1. **Trigger connector fetch stages**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
dotnet run --project src/StellaOps.Cli -- db fetch --source osv --stage fetch
|
||||||
|
dotnet run --project src/StellaOps.Cli -- db fetch --source osv --stage parse
|
||||||
|
dotnet run --project src/StellaOps.Cli -- db fetch --source osv --stage map
|
||||||
|
```
|
||||||
|
|
||||||
|
Use `--mode resume` when continuing from a previous window:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
dotnet run --project src/StellaOps.Cli -- db fetch --source redhat --stage fetch --mode resume
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Merge canonical advisories**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
dotnet run --project src/StellaOps.Cli -- db merge
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Produce exports**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# JSON tree (vuln-list style)
|
||||||
|
dotnet run --project src/StellaOps.Cli -- db export --format json
|
||||||
|
|
||||||
|
# Trivy DB (delta example)
|
||||||
|
dotnet run --project src/StellaOps.Cli -- db export --format trivy-db --delta
|
||||||
|
```
|
||||||
|
|
||||||
|
Feedser always produces a deterministic OCI layout. The first run after a clean
|
||||||
|
bootstrap emits a **full** baseline; subsequent `--delta` runs reuse the previous
|
||||||
|
baseline’s blobs when only JSON manifests change. If the exporter detects that a
|
||||||
|
prior delta is still active (i.e., `LastDeltaDigest` is recorded) it automatically
|
||||||
|
upgrades the next run to a full export and resets the baseline so operators never
|
||||||
|
chain deltas indefinitely. The CLI exposes `--publish-full/--publish-delta` (for
|
||||||
|
ORAS pushes) and `--include-full/--include-delta` (for offline bundles) should you
|
||||||
|
need to override the defaults interactively.
|
||||||
|
|
||||||
|
**Smoke-check delta reuse:** after the first baseline completes, run the export a
|
||||||
|
second time with `--delta` and verify that the new directory reports `mode=delta`
|
||||||
|
while reusing the previous layer blob.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export_root=${FEEDSER_EXPORT_ROOT:-exports/trivy}
|
||||||
|
base=$(ls -1d "$export_root"/* | sort | tail -n2 | head -n1)
|
||||||
|
delta=$(ls -1d "$export_root"/* | sort | tail -n1)
|
||||||
|
|
||||||
|
jq -r '.mode,.baseExportId' "$delta/metadata.json"
|
||||||
|
|
||||||
|
base_manifest=$(jq -r '.manifests[0].digest' "$base/index.json")
|
||||||
|
delta_manifest=$(jq -r '.manifests[0].digest' "$delta/index.json")
|
||||||
|
printf 'baseline manifest: %s\ndelta manifest: %s\n' "$base_manifest" "$delta_manifest"
|
||||||
|
|
||||||
|
layer_digest=$(jq -r '.layers[0].digest' "$base/blobs/sha256/${base_manifest#sha256:}")
|
||||||
|
cmp "$base/blobs/sha256/${layer_digest#sha256:}" \
|
||||||
|
"$delta/blobs/sha256/${layer_digest#sha256:}"
|
||||||
|
```
|
||||||
|
|
||||||
|
`cmp` returning exit code `0` confirms the delta export reuses the baseline’s
|
||||||
|
`db.tar.gz` layer instead of rebuilding it.
|
||||||
|
|
||||||
|
4. **Manage scanners (optional)**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
dotnet run --project src/StellaOps.Cli -- scanner download --channel stable
|
||||||
|
dotnet run --project src/StellaOps.Cli -- scan run --entry scanners/latest/Scanner.dll --target ./sboms
|
||||||
|
dotnet run --project src/StellaOps.Cli -- scan upload --file results/scan-001.json
|
||||||
|
```
|
||||||
|
|
||||||
|
Add `--verbose` to any command for structured console logs. All commands honour
|
||||||
|
`Ctrl+C` cancellation and exit with non-zero status codes when the backend returns
|
||||||
|
a problem document.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · Verification Checklist
|
||||||
|
|
||||||
|
- Feedser `/health` returns `"status":"healthy"` and Storage bootstrap is marked
|
||||||
|
complete after startup.
|
||||||
|
- CLI commands return HTTP 202 with a `Location` header (job tracking URL) when
|
||||||
|
triggering Feedser jobs.
|
||||||
|
- Export artefacts are materialised under the configured output directories and
|
||||||
|
their manifests record digests.
|
||||||
|
- MongoDB contains the expected `document`, `dto`, `advisory`, and `export_state`
|
||||||
|
collections after a run.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 · Deployment Automation
|
||||||
|
|
||||||
|
- Treat `etc/feedser.yaml.sample` as the canonical template. CI/CD should copy it to
|
||||||
|
the deployment artifact and replace placeholders (DSN, telemetry endpoints, cron
|
||||||
|
overrides) with environment-specific secrets.
|
||||||
|
- Keep secret material (Mongo credentials, OTLP tokens) outside of the repository;
|
||||||
|
inject them via secret stores or pipeline variables at stamp time.
|
||||||
|
- When building container images, include `trivy-db` (and `oras` if used) so air-gapped
|
||||||
|
clusters do not need outbound downloads at runtime.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 · Next Steps
|
||||||
|
|
||||||
|
- Enable authority-backed authentication in non-production first. Set
|
||||||
|
`authority.enabled: true` while keeping `authority.allowAnonymousFallback: true`
|
||||||
|
to observe logs, then flip it to `false` before 2025-12-31 UTC to enforce tokens.
|
||||||
|
- Automate the workflow above via CI/CD (compose stack or Kubernetes CronJobs).
|
||||||
|
- Pair with the Feedser connector teams when enabling additional sources so their
|
||||||
|
module-specific requirements are pulled in safely.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 · Authority Integration
|
||||||
|
|
||||||
|
- Feedser now authenticates callers through StellaOps Authority using OAuth 2.0
|
||||||
|
resource server flows. Populate the `authority` block in `feedser.yaml`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
authority:
|
||||||
|
enabled: true
|
||||||
|
allowAnonymousFallback: false # keep true only during the staged rollout window
|
||||||
|
issuer: "https://authority.example.org"
|
||||||
|
audiences:
|
||||||
|
- "api://feedser"
|
||||||
|
requiredScopes:
|
||||||
|
- "feedser.jobs.trigger"
|
||||||
|
clientId: "feedser-jobs"
|
||||||
|
clientSecretFile: "../secrets/feedser-jobs.secret"
|
||||||
|
clientScopes:
|
||||||
|
- "feedser.jobs.trigger"
|
||||||
|
bypassNetworks:
|
||||||
|
- "127.0.0.1/32"
|
||||||
|
- "::1/128"
|
||||||
|
```
|
||||||
|
|
||||||
|
- Store the client secret outside of source control. Either provide it via
|
||||||
|
`authority.clientSecret` (environment variable `FEEDSER_AUTHORITY__CLIENTSECRET`)
|
||||||
|
or point `authority.clientSecretFile` to a file mounted at runtime.
|
||||||
|
- Cron jobs running on the same host can keep using the API thanks to the loopback
|
||||||
|
bypass mask. Add additional CIDR ranges as needed; every bypass is logged.
|
||||||
|
- Export the same configuration to Kubernetes or systemd by setting environment
|
||||||
|
variables such as:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export FEEDSER_AUTHORITY__ENABLED=true
|
||||||
|
export FEEDSER_AUTHORITY__ALLOWANONYMOUSFALLBACK=false
|
||||||
|
export FEEDSER_AUTHORITY__ISSUER="https://authority.example.org"
|
||||||
|
export FEEDSER_AUTHORITY__CLIENTID="feedser-jobs"
|
||||||
|
export FEEDSER_AUTHORITY__CLIENTSECRETFILE="/var/run/secrets/feedser/authority-client"
|
||||||
|
```
|
||||||
|
|
||||||
|
- CLI commands already pass `Authorization` headers when credentials are supplied.
|
||||||
|
Configure the CLI with matching Authority settings (`docs/09_API_CLI_REFERENCE.md`)
|
||||||
|
so that automation can obtain tokens with the same client credentials. Feedser
|
||||||
|
logs every job request with the client ID, subject (if present), scopes, and
|
||||||
|
a `bypass` flag so operators can audit cron traffic.
|
||||||
139
docs/10_OFFLINE_KIT.md
Executable file
139
docs/10_OFFLINE_KIT.md
Executable file
@@ -0,0 +1,139 @@
|
|||||||
|
# Offline Update Kit (OUK) — 100 % Air‑Gap Operation
|
||||||
|
|
||||||
|
> **Status:** ships together with the public α `v0.1.0` (ETA **late 2025**).
|
||||||
|
> All commands below assume the bundle name
|
||||||
|
> `stella-ouk‑2025‑α.tar.gz` – adjust once the real date tag is known.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 · What’s in the bundle 📦
|
||||||
|
|
||||||
|
| Item | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| **Vulnerability database** | Pre‑merged snapshot of NVD 2.0, OSV, GHSA <br/> + optional **regional catalogue** feeds |
|
||||||
|
| **Container images** | Scanner + Zastava for **x86‑64** & **arm64** |
|
||||||
|
| **Cosign signatures** | Release attestation & SBOM integrity |
|
||||||
|
| **SPDX SBOM** | Cryptographically signed bill of materials |
|
||||||
|
| **Import manifest** | Check‑sums & version metadata |
|
||||||
|
|
||||||
|
Nightly **delta patches** keep the bundle < 350 MB while staying *T‑1 day*
|
||||||
|
current.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · Download & verify 🔒
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -LO https://get.stella-ops.org/releases/latest/stella-ops-offline-usage-kit-v0.1a.tar.gz
|
||||||
|
curl -LO https://get.stella-ops.org/releases/latest/stella-ops-offline-usage-kit-v0.1a.tar.gz.sig
|
||||||
|
|
||||||
|
cosign verify-blob \
|
||||||
|
--key https://stella-ops.org/keys/cosign.pub \
|
||||||
|
--signature stella-ops-offline-usage-kit-v0.1a.tar.gz.sig \
|
||||||
|
stella-ops-offline-usage-kit-v0.1a.tar.gz
|
||||||
|
```
|
||||||
|
|
||||||
|
The output shows `Verified OK` and the SHA‑256 digest ‑ compare with the
|
||||||
|
release notes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · Import on the isolated host 🚀
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose --env-file .env -f compose-stella.yml \
|
||||||
|
exec stella-ops stella ouk import stella-ops-offline-usage-kit-v0.1a.tar.gz
|
||||||
|
```
|
||||||
|
|
||||||
|
* The scanner verifies the Cosign signature **before** activation.
|
||||||
|
* DB switch is atomic – **no downtime** for running jobs.
|
||||||
|
* Import time on an SSD VM ≈ 5‑7 s.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · How the quota works offline 🔢
|
||||||
|
|
||||||
|
| Mode | Daily scans | Behaviour at 200 scans | Behaviour over limit |
|
||||||
|
| --------------- | ----------- | ---------------------- | ------------------------------------ |
|
||||||
|
| **Anonymous** | {{ quota_anon }} | Reminder banner | CLI slows \~10 % |
|
||||||
|
| **Token (JWT)** | {{ quota_token }} | Reminder banner | Throttle continues, **never blocks** |
|
||||||
|
|
||||||
|
*Request a free JWT:* send a blank e‑mail to
|
||||||
|
`token@stella-ops.org` – the bot replies with a signed token that you
|
||||||
|
store as `STELLA_JWT` in **`.env`**.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 · Updating the bundle ⤴️
|
||||||
|
|
||||||
|
1. Download the newer tarball & signature.
|
||||||
|
2. Repeat the **verify‑blob** step.
|
||||||
|
3. Run `stella ouk import <file>` – only the delta applies; average
|
||||||
|
upgrade time is **< 3 s**.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 · Road‑map highlights for Sovereign 🌐
|
||||||
|
|
||||||
|
| Release | Planned feature |
|
||||||
|
| ---------------------- | ---------------------------------------- |
|
||||||
|
| **v0.1 α (late 2025)** | Manual OUK import • Zastava beta |
|
||||||
|
| **v0.3 β (Q2 2026)** | Auto‑apply delta patch • nightly re‑scan |
|
||||||
|
| **v0.4 RC (Q3 2026)** | LDAP/AD SSO • registry scanner GA |
|
||||||
|
| **v1.0 GA (Q4 2026)** | Custom TLS/crypto adaptors (**incl. SM2**)—enabled where law or security requires it |
|
||||||
|
|
||||||
|
Full details live in the public [Road‑map](../roadmap/README.md).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7 · Troubleshooting 🩹
|
||||||
|
|
||||||
|
| Symptom | Fix |
|
||||||
|
| -------------------------------------------- | ------------------------------------------------------- |
|
||||||
|
| `cosign: signature mismatch` | File corrupted ‑ re‑download both tarball & `.sig` |
|
||||||
|
| `ouk import: no space left` | Ensure **8 GiB** free in `/var/lib/docker` |
|
||||||
|
| Import succeeds but scans still hit Internet | Confirm `STELLA_AIRGAP=true` in `.env` (v0.1‑α setting) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8 · FAQ — abbreviated ❓
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><strong>Does the JWT token work offline?</strong></summary>
|
||||||
|
|
||||||
|
Yes. Signature validation happens locally; no outbound call is made.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><strong>Can I mirror the bundle internally?</strong></summary>
|
||||||
|
|
||||||
|
Absolutely. Host the tarball on an intranet HTTP/S server or an object
|
||||||
|
store; signatures remain valid.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><strong>Is there a torrent alternative?</strong></summary>
|
||||||
|
|
||||||
|
Planned for the β releases – follow the
|
||||||
|
[community chat](https://matrix.to/#/#stellaops:libera.chat) for ETA.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Licence & provenance 📜
|
||||||
|
|
||||||
|
The Offline Update Kit is part of Stella Ops and therefore
|
||||||
|
**AGPL‑3.0‑or‑later**. All components inherit the same licence.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cosign verify-blob \
|
||||||
|
--key https://stella-ops.org/keys/cosign.pub \
|
||||||
|
--signature stella-ops-offline-usage-kit-v0.1a.tar.gz.sig \
|
||||||
|
stella-ops-offline-usage-kit-v0.1a.tar.gz
|
||||||
|
```
|
||||||
|
|
||||||
|
— **Happy air‑gap scanning!**
|
||||||
|
© 2025‑2026 Stella Ops
|
||||||
194
docs/10_PLUGIN_SDK_GUIDE.md
Executable file
194
docs/10_PLUGIN_SDK_GUIDE.md
Executable file
@@ -0,0 +1,194 @@
|
|||||||
|
# 10 · Plug‑in SDK Guide — **Stella Ops**
|
||||||
|
*(v 1.5 — 11 Jul 2025 · template install, no reload, IoC)*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 Audience & Scope
|
||||||
|
Guidance for developers who extend Stella Ops with schedule jobs, scanner adapters, TLS providers, notification channels, etc. Everything here is OSS; commercial variants simply ship additional signed plug‑ins.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 Prerequisites
|
||||||
|
|
||||||
|
| Tool | Min Version |
|
||||||
|
| ----------------------- | ----------------------------------------------------------------- |
|
||||||
|
| .NET SDK | {{ dotnet }} |
|
||||||
|
| **StellaOps templates** | install once via `bash dotnet new install StellaOps.Templates::*` |
|
||||||
|
| **Cosign** | 2.3 + — used to sign DLLs |
|
||||||
|
| xUnit | 2.6 |
|
||||||
|
| Docker CLI | only if your plug‑in shells out to containers |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 Repository & Build Output
|
||||||
|
|
||||||
|
Every plug‑in is hosted in **`git.stella‑ops.org`**.
|
||||||
|
At publish time it must copy its signed artefacts to:
|
||||||
|
|
||||||
|
~~~text
|
||||||
|
src/backend/Stella.Ops.Plugin.Binaries/<MyPlugin>/
|
||||||
|
├── MyPlugin.dll
|
||||||
|
└── MyPlugin.dll.sig
|
||||||
|
~~~
|
||||||
|
|
||||||
|
The back‑end scans this folder on start‑up, verifies the **Cosign** signature, confirms the `[StellaPluginVersion]` gate, then loads the DLL inside an **isolated AssemblyLoadContext** to avoid dependency clashes
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 Project Scaffold
|
||||||
|
|
||||||
|
Generate with the installed template:
|
||||||
|
|
||||||
|
~~~bash
|
||||||
|
dotnet new stellaops-plugin-schedule \
|
||||||
|
-n MyPlugin.Schedule \
|
||||||
|
--output src
|
||||||
|
~~~
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
~~~text
|
||||||
|
src/
|
||||||
|
├─ MyPlugin.Schedule/
|
||||||
|
│ ├─ MyJob.cs
|
||||||
|
│ └─ MyPlugin.Schedule.csproj
|
||||||
|
└─ tests/
|
||||||
|
└─ MyPlugin.Schedule.Tests/
|
||||||
|
~~~
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 MSBuild Wiring
|
||||||
|
|
||||||
|
Add this to **`MyPlugin.Schedule.csproj`** so the signed DLL + `.sig` land in the canonical plug‑in folder:
|
||||||
|
|
||||||
|
~~~xml
|
||||||
|
<PropertyGroup>
|
||||||
|
<StellaPluginOut>$(SolutionDir)src/backend/Stella.Ops.Plugin.Binaries/$(MSBuildProjectName)</StellaPluginOut>
|
||||||
|
</PropertyGroup>
|
||||||
|
|
||||||
|
<ItemGroup>
|
||||||
|
|
||||||
|
<ProjectReference Include="..\..\StellaOps.Common\StellaOps.Common.csproj"
|
||||||
|
PrivateAssets="all" />
|
||||||
|
</ItemGroup>
|
||||||
|
|
||||||
|
<Target Name="CopyStellaPlugin" AfterTargets="Publish">
|
||||||
|
<MakeDir Directories="$(StellaPluginOut)" />
|
||||||
|
<Copy SourceFiles="$(PublishDir)$(AssemblyName).dll;$(PublishDir)$(AssemblyName).dll.sig"
|
||||||
|
DestinationFolder="$(StellaPluginOut)" />
|
||||||
|
</Target>
|
||||||
|
~~~
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 Dependency‑Injection Entry‑point
|
||||||
|
|
||||||
|
Back‑end auto‑discovers the static method below:
|
||||||
|
|
||||||
|
~~~csharp
|
||||||
|
namespace StellaOps.DependencyInjection;
|
||||||
|
|
||||||
|
public static class IoCConfigurator
|
||||||
|
{
|
||||||
|
public static IServiceCollection Configure(this IServiceCollection services,
|
||||||
|
IConfiguration cfg)
|
||||||
|
{
|
||||||
|
services.AddSingleton<IJob, MyJob>(); // schedule job
|
||||||
|
services.Configure<MyPluginOptions>(cfg.GetSection("Plugins:MyPlugin"));
|
||||||
|
return services;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
~~~
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 Schedule Plug‑ins
|
||||||
|
|
||||||
|
### 6.1 Minimal Job
|
||||||
|
|
||||||
|
~~~csharp
|
||||||
|
using StellaOps.Scheduling; // contract
|
||||||
|
|
||||||
|
[StellaPluginVersion("2.0.0")]
|
||||||
|
public sealed class MyJob : IJob
|
||||||
|
{
|
||||||
|
public async Task ExecuteAsync(CancellationToken ct)
|
||||||
|
{
|
||||||
|
Console.WriteLine("Hello from plug‑in!");
|
||||||
|
await Task.Delay(500, ct);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
~~~
|
||||||
|
|
||||||
|
### 6.2 Cron Registration
|
||||||
|
|
||||||
|
```csharp
|
||||||
|
services.AddCronJob<MyJob>("0 15 * * *"); // everyday
|
||||||
|
```
|
||||||
|
|
||||||
|
15:00
|
||||||
|
Cron syntax follows Hangfire rules
|
||||||
|
|
||||||
|
## 7 Scanner Adapters
|
||||||
|
|
||||||
|
Implement IScannerRunner.
|
||||||
|
Register inside Configure:
|
||||||
|
```csharp
|
||||||
|
services.AddScanner<MyAltScanner>("alt"); // backend
|
||||||
|
```
|
||||||
|
|
||||||
|
selects by --engine alt
|
||||||
|
If the engine needs a side‑car container, include a Dockerfile in your repo and document resource expectations.
|
||||||
|
## 8 Packaging & Signing
|
||||||
|
|
||||||
|
```bash
|
||||||
|
dotnet publish -c Release -p:PublishSingleFile=true -o out
|
||||||
|
cosign sign --key $COSIGN_KEY out/MyPlugin.Schedule.dll # sign binary only
|
||||||
|
sha256sum out/MyPlugin.Schedule.dll > out/.sha256 # optional checksum
|
||||||
|
zip MyPlugin.zip out/* README.md
|
||||||
|
```
|
||||||
|
|
||||||
|
Unsigned DLLs are refused when StellaOps:Security:DisableUnsigned=false.
|
||||||
|
|
||||||
|
## 9 Deployment
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker cp MyPlugin.zip <backend>:/opt/plugins/ && docker restart <backend>
|
||||||
|
```
|
||||||
|
|
||||||
|
Check /health – "plugins":["MyPlugin.Schedule@2.0.0"].
|
||||||
|
(Hot‑reload was removed to keep the core process simple and memory‑safe.)
|
||||||
|
|
||||||
|
## 10 Configuration Patterns
|
||||||
|
|
||||||
|
| Need | Pattern |
|
||||||
|
| ------------ | --------------------------------------------------------- |
|
||||||
|
| Settings | Plugins:MyPlugin:* in appsettings.json. |
|
||||||
|
| Secrets | Redis secure:<plugin>:<key> (encrypted per TLS provider). |
|
||||||
|
| Dynamic cron | Implement ICronConfigurable; UI exposes editor. |
|
||||||
|
|
||||||
|
## 11 Testing & CI
|
||||||
|
|
||||||
|
| Layer | Tool | Gate |
|
||||||
|
| ----------- | -------------------------- | ------------------- |
|
||||||
|
| Unit | xUnit + Moq | ≥ 50 % lines |
|
||||||
|
| Integration | Testcontainers ‑ run in CI | Job completes < 5 s |
|
||||||
|
| Style | dotnet | format 0 warnings |
|
||||||
|
|
||||||
|
Use the pre‑baked workflow in StellaOps.Templates as starting point.
|
||||||
|
|
||||||
|
## 12 Publishing to the Community Marketplace
|
||||||
|
|
||||||
|
Tag Git release plugin‑vX.Y.Z and attach the signed ZIP.
|
||||||
|
Submit a PR to stellaops/community-plugins.json with metadata & git URL.
|
||||||
|
On merge, the plug‑in shows up in the UI Marketplace.
|
||||||
|
|
||||||
|
## 13 Common Pitfalls
|
||||||
|
|
||||||
|
| Symptom | Root cause | Fix |
|
||||||
|
| ------------------- | -------------------------- | ------------------------------------------- |
|
||||||
|
| NotDetected | .sig missing | cosign sign … |
|
||||||
|
| VersionGateMismatch | Backend 2.1 vs plug‑in 2.0 | Re‑compile / bump attribute |
|
||||||
|
| FileLoadException | Duplicate | StellaOps.Common Ensure PrivateAssets="all" |
|
||||||
|
| Redis | timeouts Large writes | Batch or use Mongo |
|
||||||
196
docs/11_DATA_SCHEMAS.md
Executable file
196
docs/11_DATA_SCHEMAS.md
Executable file
@@ -0,0 +1,196 @@
|
|||||||
|
# Data Schemas & Persistence Contracts
|
||||||
|
|
||||||
|
*Audience* – backend developers, plug‑in authors, DB admins.
|
||||||
|
*Scope* – describes **Redis**, **MongoDB** (optional), and on‑disk blob shapes that power Stella Ops.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 Document Conventions
|
||||||
|
|
||||||
|
* **CamelCase** for JSON.
|
||||||
|
* All timestamps are **RFC 3339 / ISO 8601** with `Z` (UTC).
|
||||||
|
* `⭑` = planned but *not* shipped yet (kept on Feature Matrix “To Do”).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 SBOM Wrapper Envelope
|
||||||
|
|
||||||
|
Every SBOM blob (regardless of format) is stored on disk or in object storage with a *sidecar* JSON file that indexes it for the scanners.
|
||||||
|
|
||||||
|
#### 1.1 JSON Shape
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"id": "sha256:417f…", // digest of the SBOM *file* itself
|
||||||
|
"imageDigest": "sha256:e2b9…", // digest of the original container image
|
||||||
|
"created": "2025-07-14T07:02:13Z",
|
||||||
|
"format": "trivy-json-v2", // NEW enum: trivy-json-v2 | spdx-json | cyclonedx-json
|
||||||
|
"layers": [
|
||||||
|
"sha256:d38b…", // layer digests (ordered)
|
||||||
|
"sha256:af45…"
|
||||||
|
],
|
||||||
|
"partial": false, // true => delta SBOM (only some layers)
|
||||||
|
"provenanceId": "prov_0291" // ⭑ link to SLSA attestation (Q1‑2026)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
*`format`* **NEW** – added to support **multiple SBOM formats**.
|
||||||
|
*`partial`* **NEW** – true when generated via the **delta SBOM** flow (§1.3).
|
||||||
|
|
||||||
|
#### 1.2 File‑system Layout
|
||||||
|
|
||||||
|
```
|
||||||
|
blobs/
|
||||||
|
├─ 417f… # digest prefix
|
||||||
|
│ ├─ sbom.json # payload (any format)
|
||||||
|
│ └─ sbom.meta.json # wrapper (shape above)
|
||||||
|
```
|
||||||
|
|
||||||
|
> **Note** – blob storage can point at S3, MinIO, or plain disk; driver plug‑ins adapt.
|
||||||
|
|
||||||
|
#### 1.3 Delta SBOM Extension
|
||||||
|
|
||||||
|
When `partial: true`, *only* the missing layers have been scanned.
|
||||||
|
Merging logic inside `scanning` module stitches new data onto the cached full SBOM in Redis.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 Redis Keyspace
|
||||||
|
|
||||||
|
| Key pattern | Type | TTL | Purpose |
|
||||||
|
|-------------------------------------|---------|------|--------------------------------------------------|
|
||||||
|
| `scan:<digest>` | string | ∞ | Last scan JSON result (as returned by `/scan`) |
|
||||||
|
| `layers:<digest>` | set | 90d | Layers already possessing SBOMs (delta cache) |
|
||||||
|
| `policy:active` | string | ∞ | YAML **or** Rego ruleset |
|
||||||
|
| `quota:<token>` | string | *until next UTC midnight* | Per‑token scan counter for Free tier ({{ quota_token }} scans). |
|
||||||
|
| `policy:history` | list | ∞ | Change audit IDs (see Mongo) |
|
||||||
|
| `feed:nvd:json` | string | 24h | Normalised feed snapshot |
|
||||||
|
| `locator:<imageDigest>` | string | 30d | Maps image digest → sbomBlobId |
|
||||||
|
| `metrics:…` | various | — | Prom / OTLP runtime metrics |
|
||||||
|
|
||||||
|
> **Delta SBOM** uses `layers:*` to skip work in <20 ms.
|
||||||
|
> **Quota enforcement** increments `quota:<token>` atomically; when {{ quota_token }} the API returns **429**.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 MongoDB Collections (Optional)
|
||||||
|
|
||||||
|
Only enabled when `MONGO_URI` is supplied (for long‑term audit).
|
||||||
|
|
||||||
|
| Collection | Shape (summary) | Indexes |
|
||||||
|
|--------------------|------------------------------------------------------------|-------------------------------------|
|
||||||
|
| `sbom_history` | Wrapper JSON + `replaceTs` on overwrite | `{imageDigest}` `{created}` |
|
||||||
|
| `policy_versions` | `{_id, yaml, rego, authorId, created}` | `{created}` |
|
||||||
|
| `attestations` ⭑ | SLSA provenance doc + Rekor log pointer | `{imageDigest}` |
|
||||||
|
| `audit_log` | Fully rendered RFC 5424 entries (UI & CLI actions) | `{userId}` `{ts}` |
|
||||||
|
|
||||||
|
Schema detail for **policy_versions**:
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"_id": "6619e90b8c5e1f76",
|
||||||
|
"yaml": "version: 1.0\nrules:\n - …",
|
||||||
|
"rego": null, // filled when Rego uploaded
|
||||||
|
"authorId": "u_1021",
|
||||||
|
"created": "2025-07-14T08:15:04Z",
|
||||||
|
"comment": "Imported via API"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 Policy Schema (YAML v1.0)
|
||||||
|
|
||||||
|
Minimal viable grammar (subset of OSV‑SCHEMA ideas).
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
version: "1.0"
|
||||||
|
rules:
|
||||||
|
- name: Block Critical
|
||||||
|
severity: [Critical]
|
||||||
|
action: block
|
||||||
|
- name: Ignore Low Dev
|
||||||
|
severity: [Low, None]
|
||||||
|
environments: [dev, staging]
|
||||||
|
action: ignore
|
||||||
|
expires: "2026-01-01"
|
||||||
|
- name: Escalate RegionalFeed High
|
||||||
|
sources: [NVD, CNNVD, CNVD, ENISA, JVN, BDU]
|
||||||
|
severity: [High, Critical]
|
||||||
|
action: escalate
|
||||||
|
```
|
||||||
|
|
||||||
|
Validation is performed by `policy:mapping.yaml` JSON‑Schema embedded in backend.
|
||||||
|
|
||||||
|
### 4.1 Rego Variant (Advanced – TODO)
|
||||||
|
|
||||||
|
*Accepted but stored as‑is in `rego` field.*
|
||||||
|
Evaluated via internal **OPA** side‑car once feature graduates from TODO list.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 SLSA Attestation Schema ⭑
|
||||||
|
|
||||||
|
Planned for Q1‑2026 (kept here for early plug‑in authors).
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"id": "prov_0291",
|
||||||
|
"imageDigest": "sha256:e2b9…",
|
||||||
|
"buildType": "https://slsa.dev/container/v1",
|
||||||
|
"builder": {
|
||||||
|
"id": "https://git.stella-ops.ru/ci/stella-runner@sha256:f7b7…"
|
||||||
|
},
|
||||||
|
"metadata": {
|
||||||
|
"invocation": {
|
||||||
|
"parameters": {"GIT_SHA": "f6a1…"},
|
||||||
|
"buildStart": "2025-07-14T06:59:17Z",
|
||||||
|
"buildEnd": "2025-07-14T07:01:22Z"
|
||||||
|
},
|
||||||
|
"completeness": {"parameters": true}
|
||||||
|
},
|
||||||
|
"materials": [
|
||||||
|
{"uri": "git+https://git…", "digest": {"sha1": "f6a1…"}}
|
||||||
|
],
|
||||||
|
"rekorLogIndex": 99817 // entry in local Rekor mirror
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 Validator Contracts
|
||||||
|
|
||||||
|
* For SBOM wrapper – `ISbomValidator` (DLL plug‑in) must return *typed* error list.
|
||||||
|
* For YAML policies – JSON‑Schema at `/schemas/policy‑v1.json`.
|
||||||
|
* For Rego – OPA `opa eval --fail-defined` under the hood.
|
||||||
|
* For **Free‑tier quotas** – `IQuotaService` integration tests ensure `quota:<token>` resets at UTC midnight and produces correct `Retry‑After` headers.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7 Migration Notes
|
||||||
|
|
||||||
|
1. **Add `format` column** to existing SBOM wrappers; default to `trivy-json-v2`.
|
||||||
|
2. **Populate `layers` & `partial`** via backfill script (ship with `stellopsctl migrate` wizard).
|
||||||
|
3. Policy YAML previously stored in Redis → copy to Mongo if persistence enabled.
|
||||||
|
4. Prepare `attestations` collection (empty) – safe to create in advance.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8 Open Questions / Future Work
|
||||||
|
|
||||||
|
* How to de‑duplicate *identical* Rego policies differing only in whitespace?
|
||||||
|
* Embed *GOST 34.11‑2018* digests when users enable Russian crypto suite?
|
||||||
|
* Should enterprise tiers share the same Redis quota keys or switch to JWT claim `tier != Free` bypass?
|
||||||
|
* Evaluate sliding‑window quota instead of strict daily reset.
|
||||||
|
* Consider rate‑limit for `/layers/missing` to avoid brute‑force enumeration.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 9 Change Log
|
||||||
|
|
||||||
|
| Date | Note |
|
||||||
|
|------------|--------------------------------------------------------------------------------|
|
||||||
|
| 2025‑07‑14 | **Added:** `format`, `partial`, delta cache keys, YAML policy schema v1.0. |
|
||||||
|
| 2025‑07‑12 | **Initial public draft** – SBOM wrapper, Redis keyspace, audit collections. |
|
||||||
|
|
||||||
|
---
|
||||||
93
docs/11_GOVERNANCE.md
Executable file
93
docs/11_GOVERNANCE.md
Executable file
@@ -0,0 +1,93 @@
|
|||||||
|
# Stella Ops Project Governance
|
||||||
|
*Lazy Consensus • Maintainer Charter • Transparent Veto*
|
||||||
|
|
||||||
|
> **Scope** – applies to **all** repositories under
|
||||||
|
> `https://git.stella-ops.org/stella-ops/*` unless a sub‑project overrides it
|
||||||
|
> with its own charter approved by the Core Maintainers.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 · Decision‑making workflow 🗳️
|
||||||
|
|
||||||
|
| Stage | Default vote | Timer |
|
||||||
|
|-------|--------------|-------|
|
||||||
|
| **Docs / non‑code PR** | `+1` | **48 h** |
|
||||||
|
| **Code / tests PR** | `+1` | **7 × 24 h** |
|
||||||
|
| **Security‑sensitive / breaking API** | `+1` + explicit **`security‑LGTM`** | **7 × 24 h** |
|
||||||
|
|
||||||
|
**Lazy‑consensus** – silence = approval once the timer elapses.
|
||||||
|
|
||||||
|
* **Veto `‑1`** must include a concrete concern **and** a path to resolution.
|
||||||
|
* After 3 unresolved vetoes the PR escalates to a **Maintainer Summit** call.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · Maintainer approval thresholds 👥
|
||||||
|
|
||||||
|
| Change class | Approvals required | Example |
|
||||||
|
|--------------|-------------------|---------|
|
||||||
|
| **Trivial** | 0 | Typos, comment fixes |
|
||||||
|
| **Non‑trivial** | **2 Maintainers** | New API endpoint, feature flag |
|
||||||
|
| **Security / breaking** | Lazy‑consensus **+ `security‑LGTM`** | JWT validation, crypto swap |
|
||||||
|
|
||||||
|
Approval is recorded via Git forge review or a signed commit trailer
|
||||||
|
`Signed-off-by: <maintainer>`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · Becoming (and staying) a Maintainer 🌱
|
||||||
|
|
||||||
|
1. **3 + months** of consistent, high‑quality contributions.
|
||||||
|
2. **Nomination** by an existing Maintainer via issue.
|
||||||
|
3. **7‑day vote** – needs ≥ **⅔ majority** “`+1`”.
|
||||||
|
4. Sign `MAINTAINER_AGREEMENT.md` and enable **2FA**.
|
||||||
|
5. Inactivity > 6 months → automatic emeritus status (can be re‑activated).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · Release authority & provenance 🔏
|
||||||
|
|
||||||
|
* Every tag is **co‑signed by at least one Security Maintainer**.
|
||||||
|
* CI emits a **signed SPDX SBOM** + **Cosign provenance**.
|
||||||
|
* Release cadence is fixed – see [public Road‑map](../roadmap/README.md).
|
||||||
|
* Security fixes may create out‑of‑band `x.y.z‑hotfix` tags.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 · Escalation lanes 🚦
|
||||||
|
|
||||||
|
| Situation | Escalation |
|
||||||
|
|-----------|------------|
|
||||||
|
| Technical deadlock | **Maintainer Summit** (recorded & published) |
|
||||||
|
| Security bug | Follow [Security Policy](../security/01_SECURITY_POLICY.md) |
|
||||||
|
| Code of Conduct violation | See `12_CODE_OF_CONDUCT.md` escalation ladder |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 · Contribution etiquette 🤝
|
||||||
|
|
||||||
|
* Draft PRs early – CI linting & tests help you iterate.
|
||||||
|
* “There are no stupid questions” – ask in **Matrix #dev**.
|
||||||
|
* Keep commit messages in **imperative mood** (`Fix typo`, `Add SBOM cache`).
|
||||||
|
* Run the `pre‑commit` hook locally before pushing.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7 · Licence reminder 📜
|
||||||
|
|
||||||
|
Stella Ops is **AGPL‑3.0‑or‑later**. By contributing you agree that your
|
||||||
|
patches are released under the same licence.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Appendix A – Maintainer list 📇
|
||||||
|
|
||||||
|
*(Generated via `scripts/gen-maintainers.sh` – edit the YAML, **not** this
|
||||||
|
section directly.)*
|
||||||
|
|
||||||
|
| Handle | Area | Since |
|
||||||
|
|--------|------|-------|
|
||||||
|
| `@alice` | Core scanner • Security | 2025‑04 |
|
||||||
|
| `@bob` | UI • Docs | 2025‑06 |
|
||||||
|
|
||||||
|
---
|
||||||
88
docs/12_CODE_OF_CONDUCT.md
Executable file
88
docs/12_CODE_OF_CONDUCT.md
Executable file
@@ -0,0 +1,88 @@
|
|||||||
|
# Stella Ops Code of Conduct
|
||||||
|
*Contributor Covenant v2.1 + project‑specific escalation paths*
|
||||||
|
|
||||||
|
> We pledge to make participation in the Stella Ops community a
|
||||||
|
> harassment‑free experience for everyone, regardless of age, body size,
|
||||||
|
> disability, ethnicity, sex characteristics, gender identity and expression,
|
||||||
|
> level of experience, education, socio‑economic status, nationality,
|
||||||
|
> personal appearance, race, religion, or sexual identity and orientation.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 · Our standard
|
||||||
|
|
||||||
|
This project adopts the
|
||||||
|
[**Contributor Covenant v2.1**](https://www.contributor-covenant.org/version/2/1/code_of_conduct/)
|
||||||
|
with the additions and clarifications listed below.
|
||||||
|
If anything here conflicts with the upstream covenant, *our additions win*.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 · Scope
|
||||||
|
|
||||||
|
| Applies to | Examples |
|
||||||
|
|------------|----------|
|
||||||
|
| **All official spaces** | Repos under `git.stella-ops.org/stella-ops.org/*`, Matrix rooms (`#stellaops:*`), issue trackers, pull‑request reviews, community calls, and any event officially sponsored by Stella Ops |
|
||||||
|
| **Unofficial spaces that impact the project** | Public social‑media posts that target or harass community members, coordinated harassment campaigns, doxxing, etc. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · Reporting a violation ☎️
|
||||||
|
|
||||||
|
| Channel | When to use |
|
||||||
|
|---------|-------------|
|
||||||
|
| `conduct@stella-ops.org` (PGP key [`keys/#pgp`](../keys/#pgp)) | **Primary, confidential** – anything from micro‑aggressions to serious harassment |
|
||||||
|
| Matrix `/msg @coc-bot:libera.chat` | Quick, in‑chat nudge for minor issues |
|
||||||
|
| Public issue with label `coc` | Transparency preferred and **you feel safe** doing so |
|
||||||
|
|
||||||
|
We aim to acknowledge **within 48 hours** (business days, UTC).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · Incident handlers 🛡️
|
||||||
|
|
||||||
|
| Name | Role | Alt‑contact |
|
||||||
|
|------|------|-------------|
|
||||||
|
| Alice Doe (`@alice`) | Core Maintainer • Security WG | `+1‑555‑0123` |
|
||||||
|
| Bob Ng (`@bob`) | UI Maintainer • Community lead | `+1‑555‑0456` |
|
||||||
|
|
||||||
|
If **any** handler is the subject of a complaint, skip them and contact another
|
||||||
|
handler directly or email `conduct@stella-ops.org` only.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · Enforcement ladder ⚖️
|
||||||
|
|
||||||
|
1. **Private coaches / mediation** – first attempt to resolve misunderstandings.
|
||||||
|
2. **Warning** – written, includes corrective actions & cooling‑off period.
|
||||||
|
3. **Temporary exclusion** – mute (chat), read‑only (repo) for *N* days.
|
||||||
|
4. **Permanent ban** – removal from all official spaces + revocation of roles.
|
||||||
|
|
||||||
|
All decisions are documented **privately** (for confidentiality) but a summary
|
||||||
|
is published quarterly in the “Community Health” report.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 · Appeals 🔄
|
||||||
|
|
||||||
|
A sanctioned individual may appeal **once** by emailing
|
||||||
|
`appeals@stella-ops.org` within **14 days** of the decision.
|
||||||
|
Appeals are reviewed by **three maintainers not involved in the original case**
|
||||||
|
and resolved within 30 days.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 · No‑retaliation policy 🛑
|
||||||
|
|
||||||
|
Retaliation against reporters **will not be tolerated** and results in
|
||||||
|
immediate progression to **Step 4** of the enforcement ladder.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7 · Attribution & licence 📜
|
||||||
|
|
||||||
|
* Text adapted from Contributor Covenant v2.1 –
|
||||||
|
Copyright © 2014‑2024 Contributor Covenant Contributors
|
||||||
|
Licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).
|
||||||
|
|
||||||
|
---
|
||||||
167
docs/12_PERFORMANCE_WORKBOOK.md
Executable file
167
docs/12_PERFORMANCE_WORKBOOK.md
Executable file
@@ -0,0 +1,167 @@
|
|||||||
|
# 12 - Performance Workbook
|
||||||
|
|
||||||
|
*Purpose* – define **repeatable, data‑driven** benchmarks that guard Stella Ops’ core pledge:
|
||||||
|
> *“P95 vulnerability feedback in ≤ 5 seconds.”*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 Benchmark Scope
|
||||||
|
|
||||||
|
| Area | Included | Excluded |
|
||||||
|
|------------------|----------------------------------|---------------------------|
|
||||||
|
| SBOM‑first scan | Trivy engine w/ warmed DB | Full image unpack ≥ 300 MB |
|
||||||
|
| Delta SBOM ⭑ | Missing‑layer lookup & merge | Multi‑arch images |
|
||||||
|
| Policy eval ⭑ | YAML → JSON → rule match | Rego (until GA) |
|
||||||
|
| Feed merge | NVD JSON 2023–2025 | GHSA GraphQL (plugin) |
|
||||||
|
| Quota wait‑path | 5 s soft‑wait, 60 s hard‑wait behaviour | Paid tiers (unlimited) |
|
||||||
|
| API latency | REST `/scan`, `/layers/missing` | UI SPA calls |
|
||||||
|
|
||||||
|
⭑ = new in July 2025.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 Hardware Baseline (Reference Rig)
|
||||||
|
|
||||||
|
| Element | Spec |
|
||||||
|
|-------------|------------------------------------|
|
||||||
|
| CPU | 8 vCPU (Intel Ice‑Lake equiv.) |
|
||||||
|
| Memory | 16 GiB |
|
||||||
|
| Disk | NVMe SSD, 3 GB/s R/W |
|
||||||
|
| Network | 1 Gbit virt. switch |
|
||||||
|
| Container | Docker 25.0 + overlay2 |
|
||||||
|
| OS | Ubuntu 22.04 LTS (kernel 6.8) |
|
||||||
|
|
||||||
|
*All P95 targets assume a **single‑node** deployment on this rig unless stated.*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 Phase Targets & Gates
|
||||||
|
|
||||||
|
| Phase (ID) | Target P95 | Gate (CI) | Rationale |
|
||||||
|
|-----------------------|-----------:|-----------|----------------------------------------|
|
||||||
|
| **SBOM_FIRST** | ≤ 5 s | `hard` | Core UX promise. |
|
||||||
|
| **IMAGE_UNPACK** | ≤ 10 s | `soft` | Fallback path for legacy flows. |
|
||||||
|
| **DELTA_SBOM** ⭑ | ≤ 1 s | `hard` | Needed to stay sub‑5 s for big bases. |
|
||||||
|
| **POLICY_EVAL** ⭑ | ≤ 50 ms | `hard` | Keeps gate latency invisible to users. |
|
||||||
|
| **QUOTA_WAIT** ⭑ | *soft* ≤ 5 s<br>*hard* ≤ 60 s | `hard` | Ensures graceful Free‑tier throttling. |
|
||||||
|
| **SCHED_RESCAN** | ≤ 30 s | `soft` | Nightly batch – not user‑facing. |
|
||||||
|
| **FEED_MERGE** | ≤ 60 s | `soft` | Off‑peak cron @ 01:00. |
|
||||||
|
| **API_P95** | ≤ 200 ms | `hard` | UI snappiness. |
|
||||||
|
|
||||||
|
*Gate* legend — `hard`: break CI if regression > 3 × target,
|
||||||
|
`soft`: raise warning & issue ticket.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 Test Harness
|
||||||
|
|
||||||
|
* **Runner** – `perf/run.sh`, accepts `--phase` and `--samples`.
|
||||||
|
* **Metrics** – Prometheus + `jq` extracts; aggregated via `scripts/aggregate.ts`.
|
||||||
|
* **CI** – GitLab CI job *benchmark* publishes JSON to `bench‑artifacts/`.
|
||||||
|
* **Visualisation** – Grafana dashboard *Stella‑Perf* (provisioned JSON).
|
||||||
|
|
||||||
|
> **Note** – harness mounts `/var/cache/trivy` tmpfs to avoid disk noise.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 Current Results (July 2025)
|
||||||
|
|
||||||
|
| Phase | Samples | Mean (s) | P95 (s) | Target OK? |
|
||||||
|
|---------------|--------:|---------:|--------:|-----------:|
|
||||||
|
| SBOM_FIRST | 100 | 3.7 | 4.9 | ✅ |
|
||||||
|
| IMAGE_UNPACK | 50 | 6.4 | 9.2 | ✅ |
|
||||||
|
| **DELTA_SBOM**| 100 | 0.46 | 0.83 | ✅ |
|
||||||
|
| **POLICY_EVAL** | 1 000 | 0.021 | 0.041 | ✅ |
|
||||||
|
| **QUOTA_WAIT** | 80 | 4.0* | 4.9* | ✅ |
|
||||||
|
| SCHED_RESCAN | 10 | 18.3 | 24.9 | ✅ |
|
||||||
|
| FEED_MERGE | 3 | 38.1 | 41.0 | ✅ |
|
||||||
|
| API_P95 | 20 000 | 0.087 | 0.143 | ✅ |
|
||||||
|
|
||||||
|
*Data files:* `bench-artifacts/2025‑07‑14/phase‑stats.json`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 Δ‑SBOM Micro‑Benchmark Detail
|
||||||
|
|
||||||
|
### 5.1 Scenario
|
||||||
|
|
||||||
|
1. Base image `python:3.12-slim` already scanned (all layers cached).
|
||||||
|
2. Application layer (`COPY . /app`) triggers new digest.
|
||||||
|
3. `Stella CLI` lists **7** layers, backend replies *6 hit*, *1 miss*.
|
||||||
|
4. Builder scans **only 1 layer** (~9 MiB, 217 files) & uploads delta.
|
||||||
|
|
||||||
|
### 5.2 Key Timings
|
||||||
|
|
||||||
|
| Step | Time (ms) |
|
||||||
|
|---------------------|----------:|
|
||||||
|
| `/layers/missing` | 13 |
|
||||||
|
| Trivy single layer | 655 |
|
||||||
|
| Upload delta blob | 88 |
|
||||||
|
| Backend merge + CVE | 74 |
|
||||||
|
| **Total wall‑time** | **830 ms** |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 Quota Wait‑Path Benchmark Detail
|
||||||
|
|
||||||
|
### 6.1 Scenario
|
||||||
|
|
||||||
|
1. Free‑tier token reaches **scan #200** – dashboard shows yellow banner.
|
||||||
|
|
||||||
|
### 6.2 Key Timings
|
||||||
|
|
||||||
|
| Step | Time (ms) |
|
||||||
|
|------------------------------------|----------:|
|
||||||
|
| `/quota/check` Redis LUA INCR | 0.8 |
|
||||||
|
| Soft wait sleep (server) | 5 000 |
|
||||||
|
| Hard wait sleep (server) | 60 000 |
|
||||||
|
| End‑to‑end wall‑time (soft‑hit) | 5 003 |
|
||||||
|
| End‑to‑end wall‑time (hard‑hit) | 60 004 |
|
||||||
|
|
||||||
|
---
|
||||||
|
## 7 Policy Eval Bench
|
||||||
|
|
||||||
|
### 7.1 Setup
|
||||||
|
|
||||||
|
* Policy YAML: **28** rules, mix severity & package conditions.
|
||||||
|
* Input: scan result JSON with **1 026** findings.
|
||||||
|
* Evaluator: custom rules engine (Go structs → map look‑ups).
|
||||||
|
|
||||||
|
### 7.2 Latency Histogram
|
||||||
|
|
||||||
|
```
|
||||||
|
0‑10 ms ▇▇▇▇▇▇▇▇▇▇ 38 %
|
||||||
|
10‑20 ms ▇▇▇▇▇▇▇▇▇▇ 42 %
|
||||||
|
20‑40 ms ▇▇▇▇▇▇ 17 %
|
||||||
|
40‑50 ms ▇ 3 %
|
||||||
|
```
|
||||||
|
|
||||||
|
P99 = 48 ms. Meets 50 ms gate.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8 Trend Snapshot
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
_Plot generated weekly by `scripts/update‑trend.py`; shows last 12 weeks P95 per phase._
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 9 Action Items
|
||||||
|
|
||||||
|
1. **Image Unpack** – Evaluate zstd for layer decompress; aim to shave 1 s.
|
||||||
|
2. **Feed Merge** – Parallelise regional XML feed parse (plugin) once stable.
|
||||||
|
3. **Rego Support** – Prototype OPA side‑car; target ≤ 100 ms eval.
|
||||||
|
4. **Concurrency** – Stress‑test 100 rps on 4‑node Redis cluster (Q4‑2025).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 10 Change Log
|
||||||
|
|
||||||
|
| Date | Note |
|
||||||
|
|------------|-------------------------------------------------------------------------|
|
||||||
|
| 2025‑07‑14 | Added Δ‑SBOM & Policy Eval phases; updated targets & current results. |
|
||||||
|
| 2025‑07‑12 | First public workbook (SBOM‑first, image‑unpack, feed merge). |
|
||||||
|
|
||||||
|
---
|
||||||
209
docs/13_RELEASE_ENGINEERING_PLAYBOOK.md
Executable file
209
docs/13_RELEASE_ENGINEERING_PLAYBOOK.md
Executable file
@@ -0,0 +1,209 @@
|
|||||||
|
# 13 · Release Engineering Playbook — Stella Ops
|
||||||
|
|
||||||
|
|
||||||
|
A concise, automation‑first guide describing **how source code on `main` becomes a verifiably signed, air‑gap‑friendly release**.
|
||||||
|
It is opinionated for offline use‑cases and supply‑chain security (SLSA ≥ level 2 today, aiming for level 3).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 Release Philosophy
|
||||||
|
|
||||||
|
* **Fast but fearless** – every commit on `main` must be releasable; broken builds break the build, not the team.
|
||||||
|
* **Reproducible** – anyone can rebuild byte‑identical artefacts with a single `make release` offline.
|
||||||
|
* **Secure by default** – every artefact ships with a SBOM, Cosign signature and (future) Rekor log entry.
|
||||||
|
* **Offline‑first** – all dependencies are vendored or mirrored into the internal registry; no Internet required at runtime.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 Versioning & Branching
|
||||||
|
|
||||||
|
| Branch | Purpose | Auto‑publish? |
|
||||||
|
| ------------- | ------------------------------ | --------------------------------------- |
|
||||||
|
| `main` | Always‑green development trunk | `nightly-*` images |
|
||||||
|
| `release/X.Y` | Stabilise a minor line | `stella:X.Y-rcN` |
|
||||||
|
| Tags | `X.Y.Z` = SemVer | `stella:X.Y.Z`, OUK tarball, Helm chart |
|
||||||
|
|
||||||
|
* **SemVer** – MAJOR for breaking API/CLI changes, MINOR for features, PATCH for fixes.
|
||||||
|
* Release tags are **signed** (`git tag -s`) with the Stella Ops GPG key (`0x90C4…`).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 CI/CD Overview (GitLab CI + GitLab Runner)
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph LR
|
||||||
|
A[push / MR] --> Lint
|
||||||
|
Lint --> Unit
|
||||||
|
Unit --> Build
|
||||||
|
Build --> Test-Container
|
||||||
|
Test-Container --> SBOM
|
||||||
|
SBOM --> Sign
|
||||||
|
Sign --> Publish
|
||||||
|
Publish --> E2E
|
||||||
|
Publish --> Notify
|
||||||
|
```
|
||||||
|
|
||||||
|
### Pipeline Stages
|
||||||
|
|
||||||
|
| Stage | Key tasks |
|
||||||
|
| ------------------ | ------------------------------------------------------------------------------------------------ |
|
||||||
|
| **Lint** | ESLint, golangci‑lint, hadolint, markdown‑lint. |
|
||||||
|
| **Unit** | `dotnet test`, `go test`, Jest UI tests. |
|
||||||
|
| **Quota unit‑tests 🏷** | Validate QuotaService logic: reset at UTC, 5 s vs 60 s waits, header correctness. |
|
||||||
|
| **Build** | Multi‑arch container build (`linux/amd64`, `linux/arm64`) using **BuildKit** + `--provenance` 📌. |
|
||||||
|
| **Test‑Container** | Spin up compose file, run smoke APIs. |
|
||||||
|
| **SBOM** 📌 | Invoke **StellaOps.SBOMBuilder** to generate SPDX JSON + attach `.sbom` label to image. |
|
||||||
|
| **Sign** | Sign image with **Cosign** (`cosign sign --key cosign.key`). |
|
||||||
|
| **Publish** | Push to `registry.git.stella-ops.org`. |
|
||||||
|
| **E2E** | Kind‑based Kubernetes test incl. Zastava DaemonSet; verify sub‑5 s scan SLA. |
|
||||||
|
| **Notify** | Report to Mattermost & GitLab Slack app. |
|
||||||
|
| **OfflineToken** | Call `JwtIssuer.Generate(exp=30d)` → store `client.jwt` artefact → attach to OUK build context |
|
||||||
|
|
||||||
|
*All stages run in parallel where possible; max wall‑time < 15 min.*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 Container Image Strategy
|
||||||
|
|
||||||
|
| Image | Registry Tag | Contents |
|
||||||
|
| ------------------------------ | --------------------------- | ---------------------------------------------------------------------- |
|
||||||
|
| **backend** | `stella/backend:{ver}` | ASP.NET API, plugin loader. |
|
||||||
|
| **ui** | `stella/ui:{ver}` | Pre‑built Angular SPA. |
|
||||||
|
| **runner-trivy** | `stella/runner-trivy:{ver}` | Trivy CLI + SPDX/CycloneDX 🛠. |
|
||||||
|
| **runner-grype** | `stella/runner-grype:{ver}` | Optional plug‑in scanner. |
|
||||||
|
| **🏷️ StellaOps.Registry** 📌 | `stella/registry:{ver}` | Scratch image embedding Docker Registry v2 + Cosign policy controller. |
|
||||||
|
| **🏷️ StellaOps.MutePolicies** 📌 | `stella/policies:{ver}` | Sidecar serving policy bundles. |
|
||||||
|
| **🏷️ StellaOps.Attestor** 📌 | `stella/attestor:{ver}` | SLSA provenance & Rekor signer (future). |
|
||||||
|
|
||||||
|
*Images are **`--label org.opencontainers.image.source=git.stella-ops.ru`** and include SBOMs generated at build time.*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 📌 Offline Update Kit (OUK) Build & Distribution
|
||||||
|
|
||||||
|
**Purpose** – deliver updated CVE feeds & Trivy DB to air‑gapped clusters.
|
||||||
|
|
||||||
|
### 4.1 CLI Tool
|
||||||
|
|
||||||
|
*Go binary `ouk` lives in `tools/ouk/`.*
|
||||||
|
|
||||||
|
```sh
|
||||||
|
ouk fetch \
|
||||||
|
--nvd --osv \
|
||||||
|
--trivy-db --date $(date -I) \
|
||||||
|
--output ouk-$(date +%Y%m%d).tar.gz \
|
||||||
|
--sign cosign.key
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4.2 Pipeline Hook
|
||||||
|
|
||||||
|
* Runs on **first Friday** each month (cron).
|
||||||
|
* Generates tarball, signs it, uploads to **GitLab Release asset**.
|
||||||
|
* SHA‑256 + signature published alongside.
|
||||||
|
|
||||||
|
### 4.3 Activation Flow (runtime)
|
||||||
|
|
||||||
|
1. Admin uploads `.tar.gz` via **UI → Settings → Offline Updates (OUK)**.
|
||||||
|
2. Backend verifies Cosign signature & digest.
|
||||||
|
3. Files extracted into `var/lib/stella/db`.
|
||||||
|
4. Redis caches invalidated; Dashboard “Feed Age” ticks green.
|
||||||
|
5. Audit event `ouk_update` stored.
|
||||||
|
|
||||||
|
### 4.4 Token Detail
|
||||||
|
|
||||||
|
client.jwt placed under /root/ inside the tarball.
|
||||||
|
CI job fails if token expiry < 29 days (guard against stale caches).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 Artifact Signing & Transparency
|
||||||
|
|
||||||
|
| Artefact | Signer | Tool |
|
||||||
|
| ------------ | --------------- | --------------------- |
|
||||||
|
| Git tags | GPG (`0x90C4…`) | `git tag -s` |
|
||||||
|
| Containers | Cosign key pair | `cosign sign` |
|
||||||
|
| Helm Charts | prov file | `helm package --sign` |
|
||||||
|
| OUK tarballs | Cosign | `cosign sign-blob` |
|
||||||
|
|
||||||
|
**Rekor** integration is **TODO** – once the internal Rekor mirror is online (`StellaOpsAttestor`) a post‑publish job will submit transparency log entries.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 Release Checklist
|
||||||
|
|
||||||
|
1. CI pipeline green.
|
||||||
|
2. Bump `VERSION` file.
|
||||||
|
3. Tag `git tag -s X.Y.Z -m "Release X.Y.Z"` & push.
|
||||||
|
4. GitLab CI auto‑publishes images & charts.
|
||||||
|
5. Draft GitLab **Release Notes** using `tools/release-notes-gen`.
|
||||||
|
6. Verify SBOM attachment with `stella sbom verify stella/backend:X.Y.Z`.
|
||||||
|
7. Smoke‑test OUK tarball in offline lab.
|
||||||
|
8. Announce in `#stella-release` Mattermost channel.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7 Hot‑fix Procedure
|
||||||
|
|
||||||
|
* Branch from latest tag → `hotfix/X.Y.Z+1-hf1`.
|
||||||
|
* Apply minimal patch, add regression test.
|
||||||
|
* CI pipeline (with reduced stages) must pass.
|
||||||
|
* Tag `X.Y.Z+1`.
|
||||||
|
* Publish only container + Helm chart; OUK not rebuilt.
|
||||||
|
* Cherry‑pick back to `main`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8 Deprecation & End‑of‑Life Policy
|
||||||
|
|
||||||
|
| Feature | Deprecation notice | Removal earliest |
|
||||||
|
| ------------------------ | ------------------ | ---------------- |
|
||||||
|
| Legacy CSV policy import | 2025‑10‑01 | 2026‑04‑01 |
|
||||||
|
| Docker v1 Registry auth | 2025‑12‑01 | 2026‑06‑01 |
|
||||||
|
| In‑image Trivy DB | 2025‑12‑15 | 2026‑03‑15 |
|
||||||
|
|
||||||
|
*At least 6 months notice; removal requires major version bump.*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 9 📌 Non‑Commercial Usage Rules (English canonical)
|
||||||
|
|
||||||
|
1. **Free for internal security assessments** (company or personal).
|
||||||
|
2. **SaaS resale / re‑hosting prohibited** without prior written consent (AGPL §13).
|
||||||
|
3. If you distribute a fork with UI or backend modifications **you must**:
|
||||||
|
* Publish the complete modified source code.
|
||||||
|
* Retain the original Stella Ops attribution in UI footer and CLI `--version`.
|
||||||
|
4. All third‑party dependencies remain under their respective licences (MIT, Apache‑2.0, ISC, BSD).
|
||||||
|
5. Deployments in state‑regulated or classified environments must obey**applicable local regulations** governing cryptography and software distribution.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 10 Best Practices Snapshot 📌
|
||||||
|
|
||||||
|
* **SBOM‑per‑image** → attach at build time; store as OCI artifact for supply‑chain introspection.
|
||||||
|
* **Provenance flag** (`--provenance=true`) in BuildKit fulfils SLSA 2 requirement.
|
||||||
|
* Use **multi‑arch, reproducible builds** (`SOURCE_DATE_EPOCH` pins timestamps).
|
||||||
|
* All pipelines enforce **Signed‑off‑by (DCO)**; CI fails if trailer missing.
|
||||||
|
* `cosign policy` ensures only images signed by the project key run in production.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 11 Contributing to Release Engineering
|
||||||
|
|
||||||
|
* Fork & create MR to `infra/release-*`.
|
||||||
|
* All infra changes require green **`integration-e2e-offline`** job.
|
||||||
|
* Discuss larger infra migrations in `#sig-release` Mattermost; decisions recorded in `ADR/` folder.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 12 Change Log (high‑level)
|
||||||
|
|
||||||
|
| Version | Date | Note |
|
||||||
|
| ------- | ---------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| v2.1 | 2025‑07‑15 | Added OUK build/publish pipeline, internal registry image (`StellaOps.Registry`), non‑commercial usage rules extraction, SBOM stage, BuildKit provenance. |
|
||||||
|
| v2.0 | 2025‑07‑12 | Initial open‑sourcing of Release Engineering guide. |
|
||||||
|
| v1.1 | 2025‑07‑09 | Fixed inner fencing; added retention policy |
|
||||||
|
| v1.0 | 2025‑07‑09 | Initial playbook |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*(End of Release Engineering Playbook v1.1)*
|
||||||
101
docs/13_SECURITY_POLICY.md
Executable file
101
docs/13_SECURITY_POLICY.md
Executable file
@@ -0,0 +1,101 @@
|
|||||||
|
# Stella Ops Security Policy & Responsible Disclosure
|
||||||
|
*Version 3 · 2025‑07‑15*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 · Supported versions 🗓️
|
||||||
|
|
||||||
|
| Release line | Status | Security fix window |
|
||||||
|
|--------------|--------|---------------------|
|
||||||
|
| **v0.1 α** (late 2025) | *Upcoming* | 90 days after GA of v0.2 |
|
||||||
|
| **v0.2 β** (Q1 2026) | *Planned* | 6 months after GA of v0.3 |
|
||||||
|
| **v0.3 β** (Q2 2026) | *Planned* | 6 months after GA of v0.4 |
|
||||||
|
| **v0.4 RC** (Q3 2026) | *Planned* | Until v1.0 GA |
|
||||||
|
| **v1.0 GA** (Q4 2026) | *Future LTS* | 24 months from release |
|
||||||
|
|
||||||
|
Pre‑GA lines receive **critical** and **high**‑severity fixes only.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 · How to report a vulnerability 🔒
|
||||||
|
|
||||||
|
| Channel | PGP‑encrypted? | Target SLA |
|
||||||
|
|---------|---------------|-----------|
|
||||||
|
| `security@stella-ops.org` | **Yes** – PGP key: [`/keys/#pgp`](../keys/#pgp) | 72 h acknowledgement |
|
||||||
|
| Matrix DM → `@sec‑bot:libera.chat` | Optional | 72 h acknowledgement |
|
||||||
|
| Public issue with label `security` | No (for non‑confidential flaws) | 7 d acknowledgement |
|
||||||
|
|
||||||
|
Please include:
|
||||||
|
|
||||||
|
* Affected version(s) and environment
|
||||||
|
* Reproduction steps or PoC
|
||||||
|
* Impact assessment (data exposure, RCE, DoS, etc.)
|
||||||
|
* Preferred disclosure timeline / CVE request info
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · Our disclosure process 📜
|
||||||
|
|
||||||
|
1. **Triage** – confirm the issue, assess severity, assign CVSS v4 score.
|
||||||
|
2. **Patch development** – branch created in a private mirror; PoCs kept confidential.
|
||||||
|
3. **Pre‑notification** – downstream packagers & large adopters alerted **72 h** before release.
|
||||||
|
4. **Co‑ordinated release** – patched version + advisory (GHSA + CVE) + SBOM delta.
|
||||||
|
5. **Credits** – researchers listed in release notes (opt‑in).
|
||||||
|
|
||||||
|
We aim for **30 days** from report to release for critical/high issues; medium/low may wait for the next scheduled release.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · Existing safeguards ✅
|
||||||
|
|
||||||
|
| Layer | Control |
|
||||||
|
|-------|---------|
|
||||||
|
| **Release integrity** | `cosign` signatures + SPDX SBOM on every artefact |
|
||||||
|
| **Build pipeline** | Reproducible, fully declarative CI; SBOM diff verified in CI |
|
||||||
|
| **Runtime hardening** | Non‑root UID, distroless‑glibc base, SELinux/AppArmor profiles, cgroup CPU/RAM caps |
|
||||||
|
| **Access logs** | Retained **7 days**, then `sha256(ip)` hash |
|
||||||
|
| **Quota ledger** | Stores *token‑ID hash* only, no plain e‑mail/IP |
|
||||||
|
| **Air‑gap support** | Signed **Offline Update Kit** (OUK) validated before import |
|
||||||
|
| **Secure defaults** | TLS 1.3 (or stronger via plug‑in), HTTP Strict‑Transport‑Security, Content‑Security‑Policy |
|
||||||
|
| **SBOM re‑scan** | Nightly cron re‑checks previously “clean” images against fresh CVE feeds |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · Cryptographic keys 🔑
|
||||||
|
|
||||||
|
| Purpose | Fingerprint | Where to fetch |
|
||||||
|
|---------|-------------|----------------|
|
||||||
|
| **PGP (sec‑team)** | `3A5C 71F3 ... 7D9B` | [`/keys/#pgp`](../keys/#pgp) |
|
||||||
|
| **Cosign release key** | `AB12 ... EF90` | [`/keys/#cosign`](../keys/#cosign) |
|
||||||
|
|
||||||
|
Verify all downloads (TLS 1.3 by default; 1.2 allowed only via a custom TLS provider such as GOST):
|
||||||
|
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cosign verify \
|
||||||
|
--key https://stella-ops.org/keys/cosign.pub \
|
||||||
|
registry.stella-ops.org/stella-ops/stella-ops:<VERSION>
|
||||||
|
````
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 · Private‑feed mirrors 🌐
|
||||||
|
|
||||||
|
The **Feedser (vulnerability ingest/merge/export service)** provides signed JSON and Trivy DB snapshots that merge:
|
||||||
|
|
||||||
|
* OSV + GHSA
|
||||||
|
* (optional) NVD 2.0, CNNVD, CNVD, ENISA, JVN and BDU regionals
|
||||||
|
|
||||||
|
The snapshot ships in every Offline Update Kit and is validated with an in‑toto SLSA attestation at import time.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 · Hall of Thanks 🏆
|
||||||
|
|
||||||
|
We are grateful to the researchers who help keep Stella Ops safe:
|
||||||
|
|
||||||
|
| Release | Researcher | Handle / Org |
|
||||||
|
| ------- | ------------------ | ------------ |
|
||||||
|
| *empty* | *(your name here)* | |
|
||||||
|
|
||||||
|
---
|
||||||
112
docs/14_GLOSSARY_OF_TERMS.md
Executable file
112
docs/14_GLOSSARY_OF_TERMS.md
Executable file
@@ -0,0 +1,112 @@
|
|||||||
|
# 14 · Glossary of Terms — Stella Ops
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 0 Purpose
|
||||||
|
A concise, single‑page **“what does that acronym actually mean?”** reference for
|
||||||
|
developers, DevOps engineers, IT managers and auditors who are new to the
|
||||||
|
Stella Ops documentation set.
|
||||||
|
|
||||||
|
*If you meet a term in any Stella Ops doc that is **not** listed here, please
|
||||||
|
open a PR and append it alphabetically.*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## A – C
|
||||||
|
|
||||||
|
| Term | Short definition | Links / notes |
|
||||||
|
|------|------------------|---------------|
|
||||||
|
| **ADR** | *Architecture Decision Record* – lightweight Markdown file that captures one irreversible design decision. | ADR template lives at `/docs/adr/` |
|
||||||
|
| **AIRE** | *AI Risk Evaluator* – optional Plus/Pro plug‑in that suggests mute rules using an ONNX model. | Commercial feature |
|
||||||
|
| **Azure‑Pipelines** | CI/CD service in Microsoft Azure DevOps. | Recipe in Pipeline Library |
|
||||||
|
| **BDU** | Russian (FSTEC) national vulnerability database: *База данных уязвимостей*. | Merged with NVD by Feedser (vulnerability ingest/merge/export service) |
|
||||||
|
| **BuildKit** | Modern Docker build engine with caching and concurrency. | Needed for layer cache patterns |
|
||||||
|
| **CI** | *Continuous Integration* – automated build/test pipeline. | Stella integrates via CLI |
|
||||||
|
| **Cosign** | Open‑source Sigstore tool that signs & verifies container images **and files**. | Images & OUK tarballs |
|
||||||
|
| **CWV / CLS** | *Core Web Vitals* metric – Cumulative Layout Shift. | UI budget ≤ 0.1 |
|
||||||
|
| **CycloneDX** | Open SBOM (BOM) standard alternative to SPDX. | Planned report format plug‑in |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## D – G
|
||||||
|
|
||||||
|
| Term | Definition | Notes |
|
||||||
|
|------|------------|-------|
|
||||||
|
| **Digest (image)** | SHA‑256 hash uniquely identifying a container image or layer. | Pin digests for reproducible builds |
|
||||||
|
| **Docker‑in‑Docker (DinD)** | Running Docker daemon inside a CI container. | Used in GitHub / GitLab recipes |
|
||||||
|
| **DTO** | *Data Transfer Object* – C# record serialised to JSON. | Schemas in doc 11 |
|
||||||
|
| **Feedser** | Vulnerability ingest/merge/export service consolidating OVN, GHSA, NVD 2.0, CNNVD, CNVD, ENISA, JVN and BDU feeds into the canonical MongoDB store and export artifacts. | Cron default `0 1 * * *` |
|
||||||
|
| **FSTEC** | Russian regulator issuing SOBIT certificates. | Pro GA target |
|
||||||
|
| **Gitea** | Self‑hosted Git service – mirrors GitHub repo. | OSS hosting |
|
||||||
|
| **GOST TLS** | TLS cipher‑suites defined by Russian GOST R 34.10‑2012 / 34.11‑2012. | Provided by `OpenSslGost` or CryptoPro |
|
||||||
|
| **Grype** | Alternative OSS vulnerability scanner; can be hot‑loaded as plug‑in. | Scanner interface `IScannerRunner` |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## H – L
|
||||||
|
|
||||||
|
| Term | Definition | Notes |
|
||||||
|
|------|------------|-------|
|
||||||
|
| **Helm** | Kubernetes package manager (charts). | Beta chart under `/charts/core` |
|
||||||
|
| **Hot‑load** | Runtime discovery & loading of plug‑ins **without restart**. | Cosign‑signed DLLs |
|
||||||
|
| **Hyperfine** | CLI micro‑benchmark tool used in Performance Workbook. | Outputs CSV |
|
||||||
|
| **JWT** | *JSON Web Token* – bearer auth token issued by OpenIddict. | Scope `scanner`, `admin`, `ui` |
|
||||||
|
| **K3s / RKE2** | Lightweight Kubernetes distributions (Rancher). | Supported in K8s guide |
|
||||||
|
| **Kubernetes NetworkPolicy** | K8s resource controlling pod traffic. | Redis/Mongo isolation |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## M – O
|
||||||
|
|
||||||
|
| Term | Definition | Notes |
|
||||||
|
|------|------------|-------|
|
||||||
|
| **Mongo (optional)** | Document DB storing > 180 day history and audit logs. | Off by default in Core |
|
||||||
|
| **Mute rule** | JSON object that suppresses specific CVEs until expiry. | Schema `mute-rule‑1.json` |
|
||||||
|
| **NVD** | US‑based *National Vulnerability Database*. | Primary CVE source |
|
||||||
|
| **ONNX** | Portable neural‑network model format; used by AIRE. | Runs in‑process |
|
||||||
|
| **OpenIddict** | .NET library that implements OAuth2 / OIDC in Stella backend. | Embedded IdP |
|
||||||
|
| **OUK** | *Offline Update Kit* – signed tarball with images + feeds for air‑gap. | Admin guide #24 |
|
||||||
|
| **OTLP** | *OpenTelemetry Protocol* – exporter for traces & metrics. | `/metrics` endpoint |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## P – S
|
||||||
|
|
||||||
|
| Term | Definition | Notes |
|
||||||
|
|------|------------|-------|
|
||||||
|
| **P95** | 95th‑percentile latency metric. | Target ≤ 5 s SBOM path |
|
||||||
|
| **PDF SAR** | *Security Assessment Report* PDF produced by Pro edition. | Cosign‑signed |
|
||||||
|
| **Plug‑in** | Hot‑loadable DLL implementing a Stella contract (`IScannerRunner`, `ITlsProvider`, etc.). | Signed with Cosign |
|
||||||
|
| **Problem Details** | RFC 7807 JSON error format returned by API. | See API ref §0 |
|
||||||
|
| **Redis** | In‑memory datastore used for queue + cache. | Port 6379 |
|
||||||
|
| **Rekor** | Sigstore transparency log; future work for signature anchoring. | Road‑map P4 |
|
||||||
|
| **RPS** | *Requests Per Second*. | Backend perf budget 40 rps |
|
||||||
|
| **SBOM** | *Software Bill of Materials* – inventory of packages in an image. | Trivy JSON v2 |
|
||||||
|
| **Stella CLI** | Lightweight CLI that submits SBOMs for vulnerability scanning. | See CI recipes |
|
||||||
|
| **Seccomp** | Linux syscall filter JSON profile. | Backend shipped non‑root |
|
||||||
|
| **SLA** | *Service‑Level Agreement* – 24 h / 1‑ticket for Pro. | SRE runbook |
|
||||||
|
| **Span<T>** | .NET ref‑like struct for zero‑alloc slicing. | Allowed with benchmarks |
|
||||||
|
| **Styker.NET** | Mutation testing runner used on critical libs. | Coverage ≥ 60 % |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## T – Z
|
||||||
|
|
||||||
|
| Term | Definition | Notes |
|
||||||
|
|------|------------|-------|
|
||||||
|
| **Trivy** | OSS CVE scanner powering the default `IScannerRunner`. | CLI pinned 0.64 |
|
||||||
|
| **Trivy‑srv** | Long‑running Trivy server exposing gRPC API; speeds up remote scans. | Variant A |
|
||||||
|
| **UI tile** | Dashboard element showing live metric (scans today, feed age, etc.). | Angular Signals |
|
||||||
|
| **WebSocket** | Full‑duplex channel (`/ws/scan`, `/ws/stats`) for UI real‑time. | Used by tiles |
|
||||||
|
| **Zastava** | Lightweight agent that inventories running containers and can enforce kills. | |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 11 Change log
|
||||||
|
|
||||||
|
| Version | Date | Notes |
|
||||||
|
|---------|------|-------|
|
||||||
|
| **v1.0** | 2025‑07‑12 | First populated glossary – 52 terms covering Core docs. |
|
||||||
|
|
||||||
|
*(End of Glossary v1.0)*
|
||||||
234
docs/15_UI_GUIDE.md
Executable file
234
docs/15_UI_GUIDE.md
Executable file
@@ -0,0 +1,234 @@
|
|||||||
|
# 15 - Pragmatic UI Guide --- **Stella Ops**
|
||||||
|
|
||||||
|
# Stella Ops Web UI
|
||||||
|
|
||||||
|
A fast, modular single‑page application for controlling scans, policies, offline updates and platform‑wide settings.
|
||||||
|
Built for sub‑second feedback, dark‑mode by default, and **no external CDNs** – everything ships inside the anonymous internal registry.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 Fast Facts
|
||||||
|
|
||||||
|
| Aspect | Detail |
|
||||||
|
| ----------------- | -------------------------------------------------------------------------- |
|
||||||
|
| Tech Stack | **Angular {{ angular }}** + Vite dev server |
|
||||||
|
| Styling | **Tailwind CSS** |
|
||||||
|
| State | Angular Signals + RxJS |
|
||||||
|
| API Client | OpenAPI v3 generated services (Axios) |
|
||||||
|
| Auth | OAuth2 /OIDC (tokens from backend or external IdP) |
|
||||||
|
| i18n | JSON bundles – **`/locales/{lang}.json`** (English, Russian shipped) |
|
||||||
|
| Offline Updates 📌 | UI supports “OUK” tarball upload to refresh NVD / Trivy DB when air‑gapped |
|
||||||
|
| Build Artifacts | (`ui/dist/`) pushed to `registry.git.stella-ops.org/ui:${SHA}` |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 Navigation Map
|
||||||
|
|
||||||
|
```
|
||||||
|
Dashboard
|
||||||
|
└─ Scans
|
||||||
|
├─ Active
|
||||||
|
├─ History
|
||||||
|
└─ Reports
|
||||||
|
└─ Policies 📌
|
||||||
|
├─ Editor (YAML / Rego) 📌
|
||||||
|
├─ Import / Export 📌
|
||||||
|
└─ History
|
||||||
|
└─ Settings
|
||||||
|
├─ SBOM Format 📌
|
||||||
|
├─ Registry 📌
|
||||||
|
├─ Offline Updates (OUK) 📌
|
||||||
|
├─ Themes (Light / Dark / System) 📌
|
||||||
|
└─ Advanced
|
||||||
|
└─ Plugins 🛠
|
||||||
|
└─ Help / About
|
||||||
|
```
|
||||||
|
|
||||||
|
*The **Offline Updates (OUK)** node under **Settings** is new.*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 Technology Overview
|
||||||
|
|
||||||
|
### 2.1 Build & Deployment
|
||||||
|
|
||||||
|
1. `npm i && npm build` → generates `dist/` (~2.1 MB gzip).
|
||||||
|
2. A CI job tags and pushes the artifact as `ui:${GIT_SHA}` to the internal registry.
|
||||||
|
3. Backend serves static assets from `/srv/ui` (mounted from the image layer).
|
||||||
|
|
||||||
|
_No external fonts or JS – true offline guarantee._
|
||||||
|
|
||||||
|
### 2.2 Runtime Boot
|
||||||
|
|
||||||
|
1. **AppConfigService** pulls `/api/v1/config/ui` (contains feature flags, default theme, enabled plugins).
|
||||||
|
2. Locale JSON fetched (`/locales/{lang}.json`, falls back to `en`).
|
||||||
|
3. Root router mounts lazy‑loaded **feature modules** in the order supplied by backend – this is how future route plugins inject pages without forking the UI.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 Feature Walk‑Throughs
|
||||||
|
|
||||||
|
### 3.1 Dashboard – Real‑Time Status
|
||||||
|
|
||||||
|
* **Δ‑SBOM heat‑map** 📌 shows how many scans used delta mode vs. full unpack.
|
||||||
|
* “Feed Age” tile turns **orange** if NVD feed is older than 24 h; reverts after an **OUK** upload 📌.
|
||||||
|
* Live WebSocket updates for scans in progress (SignalR channel).
|
||||||
|
* **Quota Tile** – shows **Scans Today / {{ quota_token }}**; turns yellow at **≤ 10% remaining** (≈ 90% used),
|
||||||
|
red at {{ quota_token }} .
|
||||||
|
* **Token Expiry Tile** – shows days left on *client.jwt* (offline only);
|
||||||
|
turns orange at < 7 days.
|
||||||
|
|
||||||
|
### 3.2 Scans Module
|
||||||
|
|
||||||
|
| View | What you can do |
|
||||||
|
| ----------- | ------------------------------------------------------------------------------------------------- |
|
||||||
|
| **Active** | Watch progress bar (ETA ≤ 5 s) – newly added **Format** and **Δ** badges appear beside each item. |
|
||||||
|
| **History** | Filter by repo, tag, policy result (pass/block/soft‑fail). |
|
||||||
|
| **Reports** | Click row → HTML or PDF report rendered by backend (`/report/{digest}/html`). |
|
||||||
|
|
||||||
|
### 3.3 📌 Policies Module (new)
|
||||||
|
|
||||||
|
*Embedded **Monaco** editor with YAML + Rego syntax highlighting.*
|
||||||
|
|
||||||
|
| Tab | Capability |
|
||||||
|
| ------------------- | ------------------------------------------------------------------------------------------------ |
|
||||||
|
| **Editor** | Write or paste `scan-policy.yaml` or inline Rego snippet. Schema validation shown inline. |
|
||||||
|
| **Import / Export** | Buttons map to `/policy/import` and `/policy/export`. Accepts `.yaml`, `.rego`, `.zip` (bundle). |
|
||||||
|
| **History** | Immutable audit log; diff viewer highlights rule changes. |
|
||||||
|
|
||||||
|
#### 3.3.1 YAML → Rego Bridge
|
||||||
|
|
||||||
|
If you paste YAML but enable **Strict Mode** (toggle), backend converts to Rego under the hood, stores both representations, and shows a side‑by‑side diff.
|
||||||
|
|
||||||
|
### 3.4 📌 Settings Enhancements
|
||||||
|
|
||||||
|
| Setting | Details |
|
||||||
|
| --------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| **SBOM Format** | Dropdown – *Trivy JSON*, *SPDX JSON*, *CycloneDX JSON*. |
|
||||||
|
| **Registry** | Displays pull URL (`registry.git.stella-ops.ru`) and Cosign key fingerprint. |
|
||||||
|
| **Offline Updates (OUK)** 📌 | Upload **`ouk*.tar.gz`** produced by the Offline Update Kit CLI. Backend unpacks, verifies SHA‑256 checksum & Cosign signature, then reloads Redis caches without restart. |
|
||||||
|
| **Theme** | Light, Dark, or Auto (system). |
|
||||||
|
|
||||||
|
#### 3.4.1 OUK Upload Screen 📌
|
||||||
|
|
||||||
|
*Page path:* **Settings → Offline Updates (OUK)**
|
||||||
|
*Components:*
|
||||||
|
|
||||||
|
1. **Drop Zone** – drag or select `.tar.gz` (max 1 GB).
|
||||||
|
2. **Progress Bar** – streaming upload with chunked HTTP.
|
||||||
|
3. **Verification Step** – backend returns status:
|
||||||
|
* *Signature valid* ✔️
|
||||||
|
* *Digest mismatch* ❌
|
||||||
|
4. **Feed Preview** – table shows *NVD date*, *OUI source build tag*, *CVE count delta*.
|
||||||
|
5. **Activate** – button issues `/feeds/activate/{id}`; on success the Dashboard “Feed Age” tile refreshes to green.
|
||||||
|
6. **History List** – previous OUK uploads with user, date, version; supports rollback.
|
||||||
|
|
||||||
|
*All upload actions are recorded in the Policies → History audit log as type `ouk_update`.*
|
||||||
|
|
||||||
|
### 3.5 Plugins Panel 🛠 (ships after UI modularisation)
|
||||||
|
|
||||||
|
Lists discovered UI plugins; each can inject routes/panels. Toggle on/off without reload.
|
||||||
|
|
||||||
|
### 3.6 Settings → **Quota & Tokens** (new)
|
||||||
|
|
||||||
|
* View current **Client‑JWT claims** (tier, maxScansPerDay, expiry).
|
||||||
|
* **Generate Offline Token** – admin‑only button → POST `/token/offline` (UI wraps the API).
|
||||||
|
* Upload new token file for manual refresh.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 i18n & l10n
|
||||||
|
|
||||||
|
* JSON files under `/locales`.
|
||||||
|
* Russian (`ru`) ships first‑class, translated security terms align with **GOST R ISO/IEC 27002‑2020**.
|
||||||
|
* “Offline Update Kit” surfaces as **“Оффлайн‑обновление базы уязвимостей”** in Russian locale.
|
||||||
|
* Community can add locales by uploading a new JSON via Plugins Panel once 🛠 ships.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 Accessibility
|
||||||
|
|
||||||
|
* WCAG 2.1 AA conformance targeted.
|
||||||
|
* All color pairs pass contrast (checked by `vite-plugin-wcag`).
|
||||||
|
* Keyboard navigation fully supported; focus outlines visible in both themes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 Theming 📌
|
||||||
|
|
||||||
|
| Layer | How to change |
|
||||||
|
| --------------- | ------------------------------------------------------------ |
|
||||||
|
| Tailwind | Palette variables under `tailwind.config.js > theme.colors`. |
|
||||||
|
| Runtime toggle | Stored in `localStorage.theme`, synced across tabs. |
|
||||||
|
| Plugin override | Future route plugins may expose additional palettes 🛠. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7 Extensibility Hooks
|
||||||
|
|
||||||
|
| Area | Contract | Example |
|
||||||
|
| ------------- | ---------------------------------------- | ---------------------------------------------- |
|
||||||
|
| New route | `window.stella.registerRoute()` | “Secrets” scanner plugin adds `/secrets` page. |
|
||||||
|
| External link | `window.stella.addMenuLink(label, href)` | “Docs” link opens corporate Confluence. |
|
||||||
|
| Theme | `window.stella.registerTheme()` | High‑contrast palette for accessibility. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8 Road‑Map Tags
|
||||||
|
|
||||||
|
| Feature | Status |
|
||||||
|
| ------------------------- | ------ |
|
||||||
|
| Policy Editor (YAML) | ✅ |
|
||||||
|
| Inline Rego validation | 🛠 |
|
||||||
|
| OUK Upload UI | ✅ |
|
||||||
|
| Plugin Marketplace UI | 🚧 |
|
||||||
|
| SLSA Verification banner | 🛠 |
|
||||||
|
| Rekor Transparency viewer | 🚧 |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 9 Non‑Commercial Usage Rules 📌
|
||||||
|
|
||||||
|
*(Extracted & harmonised from the Russian UI help page so that English docs remain licence‑complete.)*
|
||||||
|
|
||||||
|
1. **Free for internal security assessments.**
|
||||||
|
2. Commercial resale or SaaS re‑hosting **prohibited without prior written consent** under AGPL §13.
|
||||||
|
3. If you distribute a fork **with UI modifications**, you **must**:
|
||||||
|
* Make the complete source code (including UI assets) publicly available.
|
||||||
|
* Retain original project attribution in footer.
|
||||||
|
4. All dependencies listed in `ui/package.json` remain under their respective OSS licences (MIT, Apache 2.0, ISC).
|
||||||
|
5. Use in government‑classified environments must comply with**applicable local regulations** governing cryptography and software distribution.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 10 Troubleshooting Tips
|
||||||
|
|
||||||
|
| Symptom | Cause | Remedy |
|
||||||
|
| ----------------------------------- | ----------------------------------- | ----------------------------------------------------------------- |
|
||||||
|
| **White page** after login | `ui/dist/` hash mismatch | Clear browser cache; backend auto‑busts on version change. |
|
||||||
|
| Policy editor shows “Unknown field” | YAML schema drift | Sync your policy file to latest sample in *Settings → Templates*. |
|
||||||
|
| **OUK upload fails** at 99 % | Tarball built with outdated OUK CLI | Upgrade CLI (`ouk --version`) and rebuild package. |
|
||||||
|
| Icons look broken in Safari | *SVG `mask` unsupported* | Use Safari 17+ or switch to PNG icon set in Settings > Advanced. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 11 Contributing
|
||||||
|
|
||||||
|
* Run `npm dev` and open `http://localhost:5173`.
|
||||||
|
* Ensure `ng lint` and `ng test` pass before PR.
|
||||||
|
* Sign the **DCO** in your commit footer (`Signed-off-by`).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 12 Change Log
|
||||||
|
|
||||||
|
| Version | Date | Highlights |
|
||||||
|
| ------- | ---------- |
|
||||||
|
| v2.4 | 2025‑07‑15 | **Added full OUK Offline Update upload flow** – navigation node, Settings panel, dashboard linkage, audit hooks. |
|
||||||
|
| v2.3 | 2025‑07‑14 | Added Policies module, SBOM Format & Registry settings, theming toggle, Δ‑SBOM indicators, extracted non‑commercial usage rules. |
|
||||||
|
| v2.2 | 2025‑07‑12 | Added user tips/workflows, CI notes, DevSecOps section, troubleshooting, screenshots placeholders. |
|
||||||
|
| v2.1 | 2025‑07‑12 | Removed PWA/Service‑worker; added oidc‑client‑ts; simplified roadmap |
|
||||||
|
| v2.0 | 2025‑07‑12 | Accessibility, Storybook, perf budgets, security rules |
|
||||||
|
| v1.1 | 2025‑07‑11 | Original OSS‑only guide |
|
||||||
|
|
||||||
|
(End of Pragmatic UI Guide v2.2)
|
||||||
201
docs/17_SECURITY_HARDENING_GUIDE.md
Executable file
201
docs/17_SECURITY_HARDENING_GUIDE.md
Executable file
@@ -0,0 +1,201 @@
|
|||||||
|
# 17 · Security Hardening Guide — **Stella Ops**
|
||||||
|
*(v2.0 — 12 Jul 2025)*
|
||||||
|
|
||||||
|
> **Audience** — Site‑reliability and platform teams deploying **the open‑source Core** in production or restricted networks.
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 Table of Contents
|
||||||
|
|
||||||
|
1. Threat model (summary)
|
||||||
|
2. Host‑OS baseline
|
||||||
|
3. Container & runtime hardening
|
||||||
|
4. Network‑plane guidance
|
||||||
|
5. Secrets & key management
|
||||||
|
6. Image, SBOM & plug‑in supply‑chain controls
|
||||||
|
7. Logging, monitoring & audit
|
||||||
|
8. Update & patch strategy
|
||||||
|
9. Incident‑response workflow
|
||||||
|
10. Pen‑testing & continuous assurance
|
||||||
|
11. Contacts & vulnerability disclosure
|
||||||
|
12. Change log
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 Threat model (summary)
|
||||||
|
|
||||||
|
| Asset | Threats | Mitigations |
|
||||||
|
| -------------------- | --------------------- | ---------------------------------------------------------------------- |
|
||||||
|
| SBOMs & scan results | Disclosure, tamper | TLS‑in‑transit, read‑only Redis volume, RBAC, Cosign‑verified plug‑ins |
|
||||||
|
| Backend container | RCE, code‑injection | Distroless image, non‑root UID, read‑only FS, seccomp + `CAP_DROP:ALL` |
|
||||||
|
| Update artefacts | Supply‑chain attack | Cosign‑signed images & SBOMs, enforced by admission controller |
|
||||||
|
| Admin credentials | Phishing, brute force | OAuth 2.0 with 12‑h token TTL, optional mTLS |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 Host‑OS baseline checklist
|
||||||
|
|
||||||
|
| Item | Recommended setting |
|
||||||
|
| ------------- | --------------------------------------------------------- |
|
||||||
|
| OS | Ubuntu 22.04 LTS (kernel ≥ 5.15) or Alma 9 |
|
||||||
|
| Patches | `unattended‑upgrades` or vendor‑equivalent enabled |
|
||||||
|
| Filesystem | `noexec,nosuid` on `/tmp`, `/var/tmp` |
|
||||||
|
| Docker Engine | v24.*, API socket root‑owned (`0660`) |
|
||||||
|
| Auditd | Watch `/etc/docker`, `/usr/bin/docker*` and Compose files |
|
||||||
|
| Time sync | `chrony` or `systemd‑timesyncd` |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 Container & runtime hardening
|
||||||
|
|
||||||
|
### 3.1 Docker Compose reference (`compose-core.yml`)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
backend:
|
||||||
|
image: registry.stella-ops.org/stella-ops/stella-ops:<PINNED_TAG_OR_DIGEST>
|
||||||
|
user: "101:101" # non‑root
|
||||||
|
read_only: true
|
||||||
|
security_opt:
|
||||||
|
- "no-new-privileges:true"
|
||||||
|
- "seccomp:./seccomp-backend.json"
|
||||||
|
cap_drop: [ALL]
|
||||||
|
tmpfs:
|
||||||
|
- /tmp:size=64m,exec,nosymlink
|
||||||
|
environment:
|
||||||
|
- ASPNETCORE_URLS=https://+:8080
|
||||||
|
- TLSPROVIDER=OpenSslGost
|
||||||
|
depends_on: [redis]
|
||||||
|
networks: [core-net]
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "-qO-", "https://localhost:8080/health"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
|
||||||
|
redis:
|
||||||
|
image: redis:7.2-alpine
|
||||||
|
command: ["redis-server", "--requirepass", "${REDIS_PASS}", "--rename-command", "FLUSHALL", ""]
|
||||||
|
user: "redis"
|
||||||
|
read_only: true
|
||||||
|
cap_drop: [ALL]
|
||||||
|
tmpfs:
|
||||||
|
- /data
|
||||||
|
networks: [core-net]
|
||||||
|
|
||||||
|
networks:
|
||||||
|
core-net:
|
||||||
|
driver: bridge
|
||||||
|
```
|
||||||
|
|
||||||
|
No dedicated “Redis” or “Mongo” sub‑nets are declared; the single bridge network suffices for the default stack.
|
||||||
|
|
||||||
|
### 3.2 Kubernetes deployment highlights
|
||||||
|
|
||||||
|
Use a separate NetworkPolicy that only allows egress from backend to Redis :6379.
|
||||||
|
securityContext: runAsNonRoot, readOnlyRootFilesystem, allowPrivilegeEscalation: false, drop all capabilities.
|
||||||
|
PodDisruptionBudget of minAvailable: 1.
|
||||||
|
Optionally add CosignVerified=true label enforced by an admission controller (e.g. Kyverno or Connaisseur).
|
||||||
|
|
||||||
|
## 4 Network‑plane guidance
|
||||||
|
|
||||||
|
| Plane | Recommendation |
|
||||||
|
| ------------------ | -------------------------------------------------------------------------- |
|
||||||
|
| North‑south | Terminate TLS 1.2+ (OpenSSL‑GOST default). Use LetsEncrypt or internal CA. |
|
||||||
|
| East‑west | Compose bridge or K8s ClusterIP only; no public Redis/Mongo ports. |
|
||||||
|
| Ingress controller | Limit methods to GET, POST, PATCH (no TRACE). |
|
||||||
|
| Rate‑limits | 40 rps default; tune ScannerPool.Workers and ingress limit‑req to match. |
|
||||||
|
|
||||||
|
## 5 Secrets & key management
|
||||||
|
|
||||||
|
| Secret | Storage | Rotation |
|
||||||
|
| --------------------------------- | ---------------------------------- | ----------------------------- |
|
||||||
|
| **Client‑JWT (offline)** | `/var/lib/stella/tokens/client.jwt` (root : 600) | **30 days** – provided by each OUK |
|
||||||
|
| REDIS_PASS | Docker/K8s secret | 90 days |
|
||||||
|
| OAuth signing key | /keys/jwt.pem (read‑only mount) | 180 days |
|
||||||
|
| Cosign public key | /keys/cosign.pub baked into image; | change on every major release |
|
||||||
|
| Trivy DB mirror token (if remote) | Secret + read‑only | 30 days |
|
||||||
|
|
||||||
|
Never bake secrets into images; always inject at runtime.
|
||||||
|
|
||||||
|
> **Operational tip:** schedule a cron reminding ops 5 days before
|
||||||
|
> `client.jwt` expiry. The backend also emits a Prometheus metric
|
||||||
|
> `stella_quota_token_days_remaining`.
|
||||||
|
|
||||||
|
## 6 Image, SBOM & plug‑in supply‑chain controls
|
||||||
|
|
||||||
|
* Images — Pull by digest not latest; verify:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cosign verify ghcr.io/stellaops/backend@sha256:<DIGEST> \
|
||||||
|
--key https://stella-ops.org/keys/cosign.pub
|
||||||
|
```
|
||||||
|
|
||||||
|
* SBOM — Each release ships an SPDX file; store alongside images for audit.
|
||||||
|
* Third‑party plug‑ins — Place in /plugins/; backend will:
|
||||||
|
* Validate Cosign signature.
|
||||||
|
* Check [StellaPluginVersion("major.minor")].
|
||||||
|
* Refuse to start if Security.DisablePluginUnsigned=false (default).
|
||||||
|
|
||||||
|
## 7 Logging, monitoring & audit
|
||||||
|
|
||||||
|
| Control | Implementation |
|
||||||
|
| ------------ | ----------------------------------------------------------------- |
|
||||||
|
| Log format | Serilog JSON; ship via Fluent‑Bit to ELK or Loki |
|
||||||
|
| Metrics | Prometheus /metrics endpoint; default Grafana dashboard in infra/ |
|
||||||
|
| Audit events | Redis stream audit; export daily to SIEM |
|
||||||
|
| Alert rules | Feed age ≥ 48 h, P95 wall‑time > 5 s, Redis used memory > 75 % |
|
||||||
|
|
||||||
|
### 7.1 Feedser authorization audits
|
||||||
|
|
||||||
|
- Enable the Authority integration for Feedser (`authority.enabled=true`). Keep
|
||||||
|
`authority.allowAnonymousFallback` set to `true` only during migration and plan
|
||||||
|
to disable it before **2025-12-31 UTC** so the `/jobs*` surface always demands
|
||||||
|
a bearer token.
|
||||||
|
- Store the Authority client secret using Docker/Kubernetes secrets and point
|
||||||
|
`authority.clientSecretFile` at the mounted path; the value is read at startup
|
||||||
|
and never logged.
|
||||||
|
- Watch the `Feedser.Authorization.Audit` logger. Each entry contains the HTTP
|
||||||
|
status, subject, client ID, scopes, remote IP, and a boolean `bypass` flag
|
||||||
|
showing whether a network bypass CIDR allowed the request. Configure your SIEM
|
||||||
|
to alert when unauthenticated requests (`status=401`) appear with
|
||||||
|
`bypass=true`, or when unexpected scopes invoke job triggers.
|
||||||
|
|
||||||
|
## 8 Update & patch strategy
|
||||||
|
|
||||||
|
| Layer | Cadence | Method |
|
||||||
|
| -------------------- | -------------------------------------------------------- | ------------------------------ |
|
||||||
|
| Backend & CLI images | Monthly or CVE‑driven docker pull + docker compose up -d |
|
||||||
|
| Trivy DB | 24 h scheduler via Feedser (vulnerability ingest/merge/export service) | configurable via Feedser scheduler options |
|
||||||
|
| Docker Engine | vendor LTS | distro package manager |
|
||||||
|
| Host OS | security repos enabled | unattended‑upgrades |
|
||||||
|
|
||||||
|
## 9 Incident‑response workflow
|
||||||
|
|
||||||
|
* Detect — PagerDuty alert from Prometheus or SIEM.
|
||||||
|
* Contain — Stop affected Backend container; isolate Redis RDB snapshot.
|
||||||
|
* Eradicate — Pull verified images, redeploy, rotate secrets.
|
||||||
|
* Recover — Restore RDB, replay SBOMs if history lost.
|
||||||
|
* Review — Post‑mortem within 72 h; create follow‑up issues.
|
||||||
|
* Escalate P1 incidents to <security@stella‑ops.org> (24 × 7).
|
||||||
|
|
||||||
|
|
||||||
|
## 10 Pen‑testing & continuous assurance
|
||||||
|
|
||||||
|
| Control | Frequency | Tool/Runner |
|
||||||
|
|----------------------|-----------------------|-------------------------------------------|
|
||||||
|
| OWASP ZAP baseline | Each merge to `main` | GitHub Action `zap-baseline-scan` |
|
||||||
|
| Dependency scanning | Per pull request | Trivy FS + Dependabot |
|
||||||
|
| External red‑team | Annual or pre‑GA | CREST‑accredited third‑party |
|
||||||
|
|
||||||
|
## 11 Vulnerability disclosure & contact
|
||||||
|
|
||||||
|
* Preferred channel: security@stella‑ops.org (GPG key on website).
|
||||||
|
* Coordinated disclosure reward: public credit and swag (no monetary bounty at this time).
|
||||||
|
|
||||||
|
## 12 Change log
|
||||||
|
|
||||||
|
| Version | Date | Notes |
|
||||||
|
| ------- | ---------- | -------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| v2.0 | 2025‑07‑12 | Full overhaul: host‑OS baseline, supply‑chain signing, removal of unnecessary sub‑nets, role‑based contact e‑mail, K8s guidance. |
|
||||||
|
| v1.1 | 2025‑07‑09 | Minor fence fixes. |
|
||||||
|
| v1.0 | 2025‑07‑09 | Original draft. |
|
||||||
169
docs/18_CODING_STANDARDS.md
Executable file
169
docs/18_CODING_STANDARDS.md
Executable file
@@ -0,0 +1,169 @@
|
|||||||
|
# 18 · Coding Standards & Contributor Guide — **Stella Ops**
|
||||||
|
*(v2.0 — 12 Jul 2025 · supersedes v1.0)*
|
||||||
|
|
||||||
|
> **Audience** — Anyone sending a pull‑request to the open‑source Core.
|
||||||
|
> **Goal** — Keep the code‑base small‑filed, plug‑in‑friendly, DI‑consistent, and instantly readable.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 Why read this?
|
||||||
|
|
||||||
|
* Cuts review time → quicker merges.
|
||||||
|
* Guarantees code is **hot‑load‑safe** for run‑time plug‑ins.
|
||||||
|
* Prevents style churn and merge conflicts.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 High‑level principles
|
||||||
|
|
||||||
|
1. **SOLID first** – especially Interface & Dependency Inversion.
|
||||||
|
2. **100‑line rule** – any file > 100 physical lines must be split or refactored.
|
||||||
|
3. **Contract‑level ownership** – public abstractions live in lightweight *Contracts* libraries; impl classes live in runtime projects.
|
||||||
|
4. **Single Composition Root** – all DI wiring happens in **`StellaOps.Web/Program.cs`** and in each plug‑in’s `IoCConfigurator`; nothing else calls `IServiceCollection.BuildServiceProvider`.
|
||||||
|
5. **No Service Locator** – constructor injection only; static `ServiceProvider` is banned.
|
||||||
|
6. **Fail‑fast startup** – configuration validated before the web‑host listens.
|
||||||
|
7. **Hot‑load compatible** – no static singletons that survive plug‑in unload; avoid `Assembly.LoadFrom` outside the built‑in plug‑in loader.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 Repository layout (flat, July‑2025)**
|
||||||
|
|
||||||
|
```text
|
||||||
|
src/
|
||||||
|
├─ backend/
|
||||||
|
│ ├─ StellaOps.Web/ # ASP.NET host + composition root
|
||||||
|
│ ├─ StellaOps.Common/ # Serilog, Result<T>, helpers
|
||||||
|
│ ├─ StellaOps.Contracts/ # DTO + interface contracts (no impl)
|
||||||
|
│ ├─ StellaOps.Configuration/ # Options + validation
|
||||||
|
│ ├─ StellaOps.Localization/
|
||||||
|
│ ├─ StellaOps.PluginLoader/ # Cosign verify, hot‑load
|
||||||
|
│ ├─ StellaOps.Scanners.Trivy/ # First‑party scanner
|
||||||
|
│ ├─ StellaOps.TlsProviders.OpenSsl/
|
||||||
|
│ └─ … (additional runtime projects)
|
||||||
|
├─ plugins-sdk/ # Templated contracts & abstractions
|
||||||
|
└─ frontend/ # Angular workspace
|
||||||
|
tests/ # Mirrors src structure 1‑to‑1
|
||||||
|
```
|
||||||
|
|
||||||
|
There are no folders named “Module” and no nested solutions.
|
||||||
|
|
||||||
|
## 3 Naming & style conventions
|
||||||
|
|
||||||
|
| Element | Rule | Example |
|
||||||
|
| ------------------------------------------------------------------------------- | --------------------------------------- | ------------------------------- |
|
||||||
|
| Namespaces | File‑scoped, StellaOps.<Area> | namespace StellaOps.Scanners; |
|
||||||
|
| Interfaces | I prefix, PascalCase | IScannerRunner |
|
||||||
|
| Classes / records | PascalCase | ScanRequest, TrivyRunner |
|
||||||
|
| Private fields | camelCase (no leading underscore) | redisCache, httpClient |
|
||||||
|
| Constants | SCREAMING_SNAKE_CASE | const int MAX_RETRIES = 3; |
|
||||||
|
| Async methods | End with Async | Task<ScanResult> ScanAsync() |
|
||||||
|
| File length | ≤ 100 lines incl. using & braces | enforced by dotnet format check |
|
||||||
|
| Using directives | Outside namespace, sorted, no wildcards | — |
|
||||||
|
|
||||||
|
Static analyzers (.editorconfig, StyleCop.Analyzers package) enforce the above.
|
||||||
|
|
||||||
|
## 4 Dependency‑injection policy
|
||||||
|
|
||||||
|
Composition root – exactly one per process:
|
||||||
|
|
||||||
|
```csharp
|
||||||
|
builder.Services
|
||||||
|
.AddStellaCore() // extension methods from each runtime project
|
||||||
|
.AddPluginLoader("/Plugins", cfg); // hot‑load signed DLLs
|
||||||
|
```
|
||||||
|
|
||||||
|
Plug‑ins register additional services via the IoCConfigurator convention described in the Plug‑in SDK Guide, §5.
|
||||||
|
Never resolve services manually (provider.GetService<T>()) outside the composition root; tests may use WebApplicationFactory or ServiceProvider.New() helpers.
|
||||||
|
Scoped lifetime is default; singletons only for stateless, thread‑safe helpers.
|
||||||
|
|
||||||
|
## 5 Project organisation rules
|
||||||
|
|
||||||
|
Contracts vs. Runtime – public DTO & interfaces live in <Area>.Contracts; implementation lives in sibling project.
|
||||||
|
Feature folders – inside each runtime project group classes by use‑case, e.g.
|
||||||
|
|
||||||
|
```text
|
||||||
|
├─ Scan/
|
||||||
|
│ ├─ ScanService.cs
|
||||||
|
│ └─ ScanController.cs
|
||||||
|
├─ Feed/
|
||||||
|
└─ Tls/
|
||||||
|
```
|
||||||
|
|
||||||
|
Tests – mirror the structure under tests/ one‑to‑one; no test code inside production projects.
|
||||||
|
|
||||||
|
## 6 C# language features
|
||||||
|
|
||||||
|
Nullable reference types enabled.
|
||||||
|
record for immutable DTOs.
|
||||||
|
Pattern matching encouraged; avoid long switch‑cascades.
|
||||||
|
Span<T> & Memory<T> OK when perf‑critical, but measure first.
|
||||||
|
Use await foreach over manual paginator loops.
|
||||||
|
|
||||||
|
## 7 Error‑handling template
|
||||||
|
|
||||||
|
```csharp
|
||||||
|
public async Task<IActionResult> PostScan([FromBody] ScanRequest req)
|
||||||
|
{
|
||||||
|
if (!ModelState.IsValid) return BadRequest(ModelState);
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
ScanResult result = await scanService.ScanAsync(req);
|
||||||
|
if (result.Quota != null)
|
||||||
|
{
|
||||||
|
Response.Headers.TryAdd("X-Stella-Quota-Remaining", result.Quota.Remaining.ToString());
|
||||||
|
Response.Headers.TryAdd("X-Stella-Reset", result.Quota.ResetUtc.ToString("o"));
|
||||||
|
}
|
||||||
|
return Ok(result);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
RFC 7807 ProblemDetails for all non‑200s.
|
||||||
|
Capture structured logs with Serilog’s message‑template syntax.
|
||||||
|
|
||||||
|
## 8 Async & threading
|
||||||
|
|
||||||
|
* All I/O is async; no .Result / .Wait().
|
||||||
|
* Library code: ConfigureAwait(false).
|
||||||
|
* Limit concurrency via Channel<T> or Parallel.ForEachAsync, never raw Task.Run loops.
|
||||||
|
|
||||||
|
## 9 Testing rules
|
||||||
|
|
||||||
|
| Layer | Framework | Coverage gate |
|
||||||
|
| ------------------------ | ------------------------ | -------------------------- |
|
||||||
|
| Unit | xUnit + FluentAssertions | ≥ 80 % line, ≥ 60 % branch |
|
||||||
|
| Integration | Testcontainers | Real Redis & Trivy |
|
||||||
|
| Mutation (critical libs) | Stryker.NET | ≥ 60 % score |
|
||||||
|
|
||||||
|
One test project per runtime/contract project; naming <Project>.Tests.
|
||||||
|
|
||||||
|
## 10 Static analysis & formatting
|
||||||
|
|
||||||
|
* dotnet format must exit clean (CI gate).
|
||||||
|
* StyleCop.Analyzers + Roslyn‑Security‑Guard run on every PR.
|
||||||
|
* CodeQL workflow runs nightly on main.
|
||||||
|
|
||||||
|
## 11 Commit & PR checklist
|
||||||
|
|
||||||
|
* Conventional Commit prefix (feat:, fix:, etc.).
|
||||||
|
* dotnet format & dotnet test both green.
|
||||||
|
* Added or updated XML‑doc comments for public APIs.
|
||||||
|
* File count & length comply with 100‑line rule.
|
||||||
|
* If new public contract → update relevant markdown doc & JSON‑Schema.
|
||||||
|
|
||||||
|
## 12 Common pitfalls
|
||||||
|
|
||||||
|
|Symptom| Root cause | Fix
|
||||||
|
|-------|-------------|-------------------
|
||||||
|
|InvalidOperationException: Cannot consume scoped service...| Mis‑matched DI lifetimes| Use scoped everywhere unless truly stateless
|
||||||
|
|Hot‑reload plug‑in crash| Static singleton caching plugin types| Store nothing static; rely on DI scopes
|
||||||
|
|
||||||
|
> 100‑line style violation |Large handlers or utils |Split into private helpers or new class
|
||||||
|
|
||||||
|
## 13 Change log
|
||||||
|
|
||||||
|
| Version | Date | Notes |
|
||||||
|
| ------- | ---------- | -------------------------------------------------------------------------------------------------- |
|
||||||
|
| v2.0 | 2025‑07‑12 | Updated DI policy, 100‑line rule, new repo layout, camelCase fields, removed “Module” terminology. |
|
||||||
|
| 1.0 | 2025‑07‑09 | Original standards. |
|
||||||
109
docs/19_TEST_SUITE_OVERVIEW.md
Executable file
109
docs/19_TEST_SUITE_OVERVIEW.md
Executable file
@@ -0,0 +1,109 @@
|
|||||||
|
# Automated Test‑Suite Overview
|
||||||
|
|
||||||
|
This document enumerates **every automated check** executed by the Stella Ops
|
||||||
|
CI pipeline, from unit level to chaos experiments. It is intended for
|
||||||
|
contributors who need to extend coverage or diagnose failures.
|
||||||
|
|
||||||
|
> **Build parameters** – values such as `{{ dotnet }}` (runtime) and
|
||||||
|
> `{{ angular }}` (UI framework) are injected at build time.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Layer map
|
||||||
|
|
||||||
|
| Layer | Tooling | Entry‑point | Frequency |
|
||||||
|
|-------|---------|-------------|-----------|
|
||||||
|
| **1. Unit** | `xUnit` (<code>dotnet test</code>) | `*.Tests.csproj` | per PR / push |
|
||||||
|
| **2. Property‑based** | `FsCheck` | `SbomPropertyTests` | per PR |
|
||||||
|
| **3. Integration (API)** | `Testcontainers` suite | `test/Api.Integration` | per PR + nightly |
|
||||||
|
| **4. Integration (DB-merge)** | in-memory Mongo + Redis | `Feedser.Integration` (vulnerability ingest/merge/export service) | per PR |
|
||||||
|
| **5. Contract (gRPC)** | `Buf breaking` | `buf.yaml` files | per PR |
|
||||||
|
| **6. Front‑end unit** | `Jest` | `ui/src/**/*.spec.ts` | per PR |
|
||||||
|
| **7. Front‑end E2E** | `Playwright` | `ui/e2e/**` | nightly |
|
||||||
|
| **8. Lighthouse perf / a11y** | `lighthouse-ci` (Chrome headless) | `ui/dist/index.html` | nightly |
|
||||||
|
| **9. Load** | `k6` scripted scenarios | `k6/*.js` | nightly |
|
||||||
|
| **10. Chaos CPU / OOM** | `pumba` | Docker Compose overlay | weekly |
|
||||||
|
| **11. Dependency scanning** | `Trivy fs` + `dotnet list package --vuln` | root | per PR |
|
||||||
|
| **12. License compliance** | `LicenceFinder` | root | per PR |
|
||||||
|
| **13. SBOM reproducibility** | `in‑toto attestation` diff | GitLab job | release tags |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Quality gates
|
||||||
|
|
||||||
|
| Metric | Budget | Gate |
|
||||||
|
|--------|--------|------|
|
||||||
|
| API unit coverage | ≥ 85 % lines | PR merge |
|
||||||
|
| API response P95 | ≤ 120 ms | nightly alert |
|
||||||
|
| Δ‑SBOM warm scan P95 (4 vCPU) | ≤ 5 s | nightly alert |
|
||||||
|
| Lighthouse performance score | ≥ 90 | nightly alert |
|
||||||
|
| Lighthouse accessibility score | ≥ 95 | nightly alert |
|
||||||
|
| k6 sustained RPS drop | < 5 % vs baseline | nightly alert |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Local runner
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# minimal run: unit + property + frontend tests
|
||||||
|
./scripts/dev-test.sh
|
||||||
|
|
||||||
|
# full stack incl. Playwright and lighthouse
|
||||||
|
./scripts/dev-test.sh --full
|
||||||
|
````
|
||||||
|
|
||||||
|
The script spins up MongoDB/Redis via Testcontainers and requires:
|
||||||
|
|
||||||
|
* Docker ≥ 25
|
||||||
|
* Node 20 (for Jest/Playwright)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Feedser OSV↔GHSA parity fixtures
|
||||||
|
|
||||||
|
The Feedser connector suite includes a regression test (`OsvGhsaParityRegressionTests`)
|
||||||
|
that checks a curated set of GHSA identifiers against OSV responses. The fixture
|
||||||
|
snapshots live in `src/StellaOps.Feedser.Source.Osv.Tests/Fixtures/` and are kept
|
||||||
|
deterministic so the parity report remains reproducible.
|
||||||
|
|
||||||
|
To refresh the fixtures when GHSA/OSV payloads change:
|
||||||
|
|
||||||
|
1. Ensure outbound HTTPS access to `https://api.osv.dev` and `https://api.github.com`.
|
||||||
|
2. Run `UPDATE_PARITY_FIXTURES=1 dotnet test src/StellaOps.Feedser.Source.Osv.Tests/StellaOps.Feedser.Source.Osv.Tests.csproj`.
|
||||||
|
3. Commit the regenerated `osv-ghsa.*.json` files that the test emits (raw snapshots and canonical advisories).
|
||||||
|
|
||||||
|
The regen flow logs `[Parity]` messages and normalises `recordedAt` timestamps so the
|
||||||
|
fixtures stay stable across machines.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## CI job layout
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
subgraph fast-path
|
||||||
|
U[xUnit] --> P[FsCheck] --> I1[Testcontainer API]
|
||||||
|
end
|
||||||
|
|
||||||
|
I1 --> FE[Jest]
|
||||||
|
FE --> E2E[Playwright]
|
||||||
|
E2E --> Lighthouse
|
||||||
|
Lighthouse --> INTEG2[Feedser]
|
||||||
|
INTEG2 --> LOAD[k6]
|
||||||
|
LOAD --> CHAOS[pumba]
|
||||||
|
CHAOS --> RELEASE[Attestation diff]
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Adding a new test layer
|
||||||
|
|
||||||
|
1. Extend `scripts/dev-test.sh` so local contributors get the layer by default.
|
||||||
|
2. Add a dedicated GitLab job in `.gitlab-ci.yml` (stage `test` or `nightly`).
|
||||||
|
3. Register the job in `docs/19_TEST_SUITE_OVERVIEW.md` *and* list its metric
|
||||||
|
in `docs/metrics/README.md`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Last updated {{ "now" | date: "%Y‑%m‑%d" }}*
|
||||||
|
|
||||||
169
docs/21_INSTALL_GUIDE.md
Executable file
169
docs/21_INSTALL_GUIDE.md
Executable file
@@ -0,0 +1,169 @@
|
|||||||
|
# Stella Ops — Installation Guide (Docker & Air‑Gap)
|
||||||
|
|
||||||
|
<!--
|
||||||
|
This file is processed by the Eleventy build.
|
||||||
|
Do **not** hard‑code versions or quota numbers; inherit from
|
||||||
|
docs/_includes/CONSTANTS.md instead.
|
||||||
|
{{ dotnet }} → ".NET 10 LTS"
|
||||||
|
{{ angular }} → "20"
|
||||||
|
-->
|
||||||
|
|
||||||
|
> **Status — public α not yet published.**
|
||||||
|
> The commands below will work as soon as the first image is tagged
|
||||||
|
> `registry.stella-ops.org/stella-ops/stella-ops:0.1.0-alpha`
|
||||||
|
> (target date: **late 2025**). Track progress on the
|
||||||
|
> [road‑map](/roadmap/).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 · Prerequisites
|
||||||
|
|
||||||
|
| Item | Minimum | Notes |
|
||||||
|
|------|---------|-------|
|
||||||
|
| Linux | Ubuntu 22.04 LTS / Alma 9 | x86‑64 or arm64 |
|
||||||
|
| CPU / RAM | 2 vCPU / 2 GiB | Laptop baseline |
|
||||||
|
| Disk | 10 GiB SSD | SBOM + vuln DB cache |
|
||||||
|
| Docker | **Engine 25 + Compose v2** | `docker -v` |
|
||||||
|
| TLS | OpenSSL 1.1 + | Self‑signed cert generated at first run |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 · Connected‑host install (Docker Compose)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Make a working directory
|
||||||
|
mkdir stella && cd stella
|
||||||
|
|
||||||
|
# 2. Download the signed Compose bundle + example .env
|
||||||
|
curl -LO https://get.stella-ops.org/releases/latest/.env.example
|
||||||
|
curl -LO https://get.stella-ops.org/releases/latest/.env.example.sig
|
||||||
|
curl -LO https://get.stella-ops.org/releases/latest/docker-compose.infrastructure.yml
|
||||||
|
curl -LO https://get.stella-ops.org/releases/latest/docker-compose.infrastructure.yml.sig
|
||||||
|
curl -LO https://get.stella-ops.org/releases/latest/docker-compose.stella-ops.yml
|
||||||
|
curl -LO https://get.stella-ops.org/releases/latest/docker-compose.stella-ops.yml.sig
|
||||||
|
|
||||||
|
# 3. Verify provenance (Cosign public key is stable)
|
||||||
|
cosign verify-blob \
|
||||||
|
--key https://stella-ops.org/keys/cosign.pub \
|
||||||
|
--signature .env.example.sig \
|
||||||
|
.env.example
|
||||||
|
|
||||||
|
cosign verify-blob \
|
||||||
|
--key https://stella-ops.org/keys/cosign.pub \
|
||||||
|
--signature docker-compose.infrastructure.yml.sig \
|
||||||
|
docker-compose.infrastructure.yml
|
||||||
|
|
||||||
|
cosign verify-blob \
|
||||||
|
--key https://stella-ops.org/keys/cosign.pub \
|
||||||
|
--signature docker-compose.stella-ops.yml.sig \
|
||||||
|
docker-compose.stella-ops.yml
|
||||||
|
|
||||||
|
# 4. Copy .env.example → .env and edit secrets
|
||||||
|
cp .env.example .env
|
||||||
|
$EDITOR .env
|
||||||
|
|
||||||
|
# 5. Launch databases (MongoDB + Redis)
|
||||||
|
docker compose --env-file .env -f docker-compose.infrastructure.yml up -d
|
||||||
|
|
||||||
|
# 6. Launch Stella Ops (first run pulls ~50 MB merged vuln DB)
|
||||||
|
docker compose --env-file .env -f docker-compose.stella-ops.yml up -d
|
||||||
|
````
|
||||||
|
|
||||||
|
*Default login:* `admin / changeme`
|
||||||
|
UI: [https://\<host\>:8443](https://<host>:8443) (self‑signed certificate)
|
||||||
|
|
||||||
|
> **Pinning best‑practice** – in production environments replace
|
||||||
|
> `stella-ops:latest` with the immutable digest printed by
|
||||||
|
> `docker images --digests`.
|
||||||
|
|
||||||
|
### 1.1 · Feedser authority configuration
|
||||||
|
|
||||||
|
The Feedser container reads configuration from `etc/feedser.yaml` plus
|
||||||
|
`FEEDSER_` environment variables. To enable the new Authority integration:
|
||||||
|
|
||||||
|
1. Add the following keys to `.env` (replace values for your environment):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
FEEDSER_AUTHORITY__ENABLED=true
|
||||||
|
FEEDSER_AUTHORITY__ALLOWANONYMOUSFALLBACK=true # temporary rollout only
|
||||||
|
FEEDSER_AUTHORITY__ISSUER="https://authority.internal"
|
||||||
|
FEEDSER_AUTHORITY__AUDIENCES__0="api://feedser"
|
||||||
|
FEEDSER_AUTHORITY__REQUIREDSCOPES__0="feedser.jobs.trigger"
|
||||||
|
FEEDSER_AUTHORITY__CLIENTID="feedser-jobs"
|
||||||
|
FEEDSER_AUTHORITY__CLIENTSECRETFILE="/run/secrets/feedser_authority_client"
|
||||||
|
FEEDSER_AUTHORITY__BYPASSNETWORKS__0="127.0.0.1/32"
|
||||||
|
FEEDSER_AUTHORITY__BYPASSNETWORKS__1="::1/128"
|
||||||
|
```
|
||||||
|
|
||||||
|
Store the client secret outside source control (Docker secrets, mounted file,
|
||||||
|
or Kubernetes Secret). Feedser loads the secret during post-configuration, so
|
||||||
|
the value never needs to appear in the YAML template.
|
||||||
|
|
||||||
|
2. Redeploy Feedser:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose --env-file .env -f docker-compose.stella-ops.yml up -d feedser
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Tail the logs: `docker compose logs -f feedser`. Successful `/jobs*` calls now
|
||||||
|
emit `Feedser.Authorization.Audit` entries listing subject, client ID, scopes,
|
||||||
|
remote IP, and whether the bypass CIDR allowed the call. 401 denials always log
|
||||||
|
`bypassAllowed=false` so unauthenticated cron jobs are easy to catch.
|
||||||
|
|
||||||
|
> **Enforcement deadline** – keep `FEEDSER_AUTHORITY__ALLOWANONYMOUSFALLBACK=true`
|
||||||
|
> only while validating the rollout. Set it to `false` (and restart Feedser)
|
||||||
|
> before **2025-12-31 UTC** to require tokens in production.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · Optional: request a free quota token
|
||||||
|
|
||||||
|
Anonymous installs allow **{{ quota\_anon }} scans per UTC day**.
|
||||||
|
Email `token@stella-ops.org` to receive a signed JWT that raises the limit to
|
||||||
|
**{{ quota\_token }} scans/day**. Insert it into `.env`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
STELLA_JWT="paste‑token‑here"
|
||||||
|
docker compose --env-file .env -f docker-compose.stella-ops.yml \
|
||||||
|
exec stella-ops stella set-jwt "$STELLA_JWT"
|
||||||
|
```
|
||||||
|
|
||||||
|
> The UI shows a reminder at 200 scans and throttles above the limit but will
|
||||||
|
> **never block** your pipeline.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · Air‑gapped install (Offline Update Kit)
|
||||||
|
|
||||||
|
When running on an isolated network use the **Offline Update Kit (OUK)**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Download & verify on a connected host
|
||||||
|
curl -LO https://get.stella-ops.org/ouk/stella-ops-offline-kit-v0.1a.tgz
|
||||||
|
curl -LO https://get.stella-ops.org/ouk/stella-ops-offline-kit-v0.1a.tgz.sig
|
||||||
|
|
||||||
|
cosign verify-blob \
|
||||||
|
--key https://stella-ops.org/keys/cosign.pub \
|
||||||
|
--signature stella-ops-offline-kit-v0.1a.tgz.sig \
|
||||||
|
stella-ops-offline-kit-v0.1a.tgz
|
||||||
|
|
||||||
|
# Transfer → air‑gap → import
|
||||||
|
docker compose --env-file .env -f docker-compose.stella-ops.yml \
|
||||||
|
exec stella admin import-offline-usage-kit stella-ops-offline-kit-v0.1a.tgz
|
||||||
|
```
|
||||||
|
|
||||||
|
*Import is atomic; no service downtime.*
|
||||||
|
|
||||||
|
For details see the dedicated [Offline Kit guide](/offline/).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · Next steps
|
||||||
|
|
||||||
|
* **5‑min Quick‑Start:** `/quickstart/`
|
||||||
|
* **CI recipes:** `docs/ci/20_CI_RECIPES.md`
|
||||||
|
* **Plug‑in SDK:** `/plugins/`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Generated {{ "now" | date: "%Y‑%m‑%d" }} — build tags inserted at render time.*
|
||||||
61
docs/23_FAQ_MATRIX.md
Executable file
61
docs/23_FAQ_MATRIX.md
Executable file
@@ -0,0 +1,61 @@
|
|||||||
|
# Stella Ops — Frequently Asked Questions (Matrix)
|
||||||
|
|
||||||
|
## Quick glance
|
||||||
|
|
||||||
|
| Question | Short answer |
|
||||||
|
|----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| What is Stella Ops? | A lightning‑fast, SBOM‑first container‑security scanner written in **.NET {{ dotnet }}** with an **Angular {{ angular }}** web UI. |
|
||||||
|
| How fast is it? | Warm scans finish in **\< 5 s** on a 4‑vCPU runner; first scans stay **\< 30 s**. |
|
||||||
|
| Is it free? | Yes – **{{ quota_anon }} scans / day** anonymously. Requesting a free JWT lifts the limit to **{{ quota_token }}**. A gentle reminder shows at 200; exceeding the cap throttles speed but never blocks. |
|
||||||
|
| Does it run offline? | Yes — download the signed **Offline Update Kit**; see `/offline/`. |
|
||||||
|
| Can I extend it? | Yes — restart‑time plug‑ins (`ISbomMutator`, `IVulnerabilityProvider`, `IResultSink`, OPA Rego). Marketplace GA in v1.0. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Road‑map (authoritative link)
|
||||||
|
|
||||||
|
The full, always‑up‑to‑date roadmap lives at <https://stella‑ops.org/roadmap/>.
|
||||||
|
Snapshot:
|
||||||
|
|
||||||
|
| Version | Target date | Locked‑in scope (freeze at β) |
|
||||||
|
|---------|-------------|--------------------------------|
|
||||||
|
| **v0.1 α** | *Late 2025* | Δ‑SBOM engine, nightly re‑scan, Offline Kit v1, {{ quota_anon }}/ {{ quota_token }} quota |
|
||||||
|
| **v0.2 β** | Q1 2026 | *Zastava* forbidden‑image scanner, registry sweeper, SDK β |
|
||||||
|
| **v0.3 β** | Q2 2026 | YAML/Rego policy‑as‑code, SARIF output, OUK auto‑import |
|
||||||
|
| **v0.4 RC** | Q3 2026 | AI remediation advisor, LDAP/AD SSO, pluggable TLS providers |
|
||||||
|
| **v1.0 GA** | Q4 2026 | SLSA L3 provenance, signed plug‑in marketplace |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Technical matrix
|
||||||
|
|
||||||
|
| Category | Detail |
|
||||||
|
|----------|--------|
|
||||||
|
| **Core runtime** | C# 14 on **.NET {{ dotnet }}** |
|
||||||
|
| **UI stack** | **Angular {{ angular }}** + TailwindCSS |
|
||||||
|
| **Container base** | Distroless glibc (x86‑64 & arm64) |
|
||||||
|
| **Data stores** | MongoDB 7 (SBOM + findings), Redis 7 (LRU cache + quota) |
|
||||||
|
| **Release integrity** | Cosign‑signed images & TGZ, reproducible build, SPDX 2.3 SBOM |
|
||||||
|
| **Extensibility** | Plug‑ins in any .NET language (restart load); OPA Rego policies |
|
||||||
|
| **Default quotas** | Anonymous **{{ quota_anon }} scans/day** · JWT **{{ quota_token }}** |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Quota enforcement (overview)
|
||||||
|
|
||||||
|
* Counters live in Redis with 24 h keys: `quota:ip:<sha256>` or `quota:tid:<hash>`.
|
||||||
|
* Soft reminder banner at 200 daily scans.
|
||||||
|
* Past the limit: first 30 excess requests delayed 5 s; afterwards 60 s.
|
||||||
|
* Behaviour is identical online and offline (validation local).
|
||||||
|
|
||||||
|
For full flow see `docs/30_QUOTA_ENFORCEMENT_FLOW1.md`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Further reading
|
||||||
|
|
||||||
|
* **Install guide:** `/install/`
|
||||||
|
* **Offline mode:** `/offline/`
|
||||||
|
* **Security policy:** `/security/`
|
||||||
|
* **Governance:** `/governance/`
|
||||||
|
* **Community chat:** Matrix `#stellaops:libera.chat`
|
||||||
94
docs/24_OFFLINE_KIT.md
Executable file
94
docs/24_OFFLINE_KIT.md
Executable file
@@ -0,0 +1,94 @@
|
|||||||
|
# Offline Update Kit (OUK) — Air‑Gap Bundle
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Build‑time variable injection:
|
||||||
|
{{ quota_anon }} = 33
|
||||||
|
{{ quota_token }} = 333
|
||||||
|
{{ dotnet }} = "10 LTS"
|
||||||
|
-->
|
||||||
|
|
||||||
|
The **Offline Update Kit** packages everything Stella Ops needs to run on a
|
||||||
|
completely isolated network:
|
||||||
|
|
||||||
|
| Component | Contents |
|
||||||
|
|-----------|----------|
|
||||||
|
| **Merged vulnerability feeds** | OSV, GHSA plus optional NVD 2.0, CNNVD, CNVD, ENISA, JVN and BDU |
|
||||||
|
| **Container images** | `stella-ops`, *Zastava* sidecar (x86‑64 & arm64) |
|
||||||
|
| **Provenance** | Cosign signature, SPDX 2.3 SBOM, in‑toto SLSA attestation |
|
||||||
|
| **Delta patches** | Daily diff bundles keep size \< 350 MB |
|
||||||
|
|
||||||
|
*Scanner core:* C# 12 on **.NET {{ dotnet }}**.
|
||||||
|
*Imports are idempotent and atomic — no service downtime.*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 · Download & verify
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -LO https://get.stella-ops.org/ouk/stella-ops-offline-kit-<DATE>.tgz
|
||||||
|
curl -LO https://get.stella-ops.org/ouk/stella-ops-offline-kit-<DATE>.tgz.sig
|
||||||
|
|
||||||
|
cosign verify-blob \
|
||||||
|
--key https://stella-ops.org/keys/cosign.pub \
|
||||||
|
--signature stella-ops-offline-kit-<DATE>.tgz.sig \
|
||||||
|
stella-ops-offline-kit-<DATE>.tgz
|
||||||
|
````
|
||||||
|
|
||||||
|
Verification prints **OK** and the SHA‑256 digest; cross‑check against the
|
||||||
|
[changelog](https://git.stella-ops.org/stella-ops/offline-kit/-/releases).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · Import on the air‑gapped host
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose --env-file .env \
|
||||||
|
-f docker-compose.stella-ops.yml \
|
||||||
|
exec stella-ops \
|
||||||
|
stella admin import-offline-usage-kit stella-ops-offline-kit-<DATE>.tgz
|
||||||
|
```
|
||||||
|
|
||||||
|
* The CLI validates the Cosign signature **before** activation.
|
||||||
|
* Old feeds are kept until the new bundle is fully verified.
|
||||||
|
* Import time on a SATA SSD: ≈ 25 s for a 300 MB kit.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · Delta patch workflow
|
||||||
|
|
||||||
|
1. **Connected site** fetches `stella-ouk-YYYY‑MM‑DD.delta.tgz`.
|
||||||
|
2. Transfer via any medium (USB, portable disk).
|
||||||
|
3. `stella admin import-offline-usage-kit <delta>` applies only changed CVE rows & images.
|
||||||
|
|
||||||
|
Daily deltas are **< 30 MB**; weekly roll‑up produces a fresh full kit.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · Quota behaviour offline
|
||||||
|
|
||||||
|
The scanner enforces the same fair‑use limits offline:
|
||||||
|
|
||||||
|
* **Anonymous:** {{ quota\_anon }} scans per UTC day
|
||||||
|
* **Free JWT:** {{ quota\_token }} scans per UTC day
|
||||||
|
|
||||||
|
Soft reminder at 200 scans; throttle above the ceiling but **never block**.
|
||||||
|
See the detailed rules in
|
||||||
|
[`33_333_QUOTA_OVERVIEW.md`](33_333_QUOTA_OVERVIEW.md).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 · Troubleshooting
|
||||||
|
|
||||||
|
| Symptom | Explanation | Fix |
|
||||||
|
| -------------------------------------- | ---------------------------------------- | ------------------------------------- |
|
||||||
|
| `could not verify SBOM hash` | Bundle corrupted in transit | Re‑download / re‑copy |
|
||||||
|
| Import hangs at `Applying feeds…` | Low disk space in `/var/lib/stella` | Free ≥ 2 GiB before retry |
|
||||||
|
| `quota exceeded` same day after import | Import resets counters at UTC 00:00 only | Wait until next UTC day or load a JWT |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 · Related documentation
|
||||||
|
|
||||||
|
* **Install guide:** `/install/#air-gapped`
|
||||||
|
* **Sovereign mode rationale:** `/sovereign/`
|
||||||
|
* **Security policy:** `/security/#reporting-a-vulnerability`
|
||||||
84
docs/29_LEGAL_FAQ_QUOTA.md
Executable file
84
docs/29_LEGAL_FAQ_QUOTA.md
Executable file
@@ -0,0 +1,84 @@
|
|||||||
|
# Legal FAQ — Free‑Tier Quota & AGPL Compliance
|
||||||
|
|
||||||
|
> **Operational behaviour (limits, counters, delays) is documented in
|
||||||
|
> [`33_333_QUOTA_OVERVIEW.md`](33_333_QUOTA_OVERVIEW.md).**
|
||||||
|
> This page covers only the legal aspects of offering Stella Ops as a
|
||||||
|
> service or embedding it into another product while the free‑tier limits are
|
||||||
|
> in place.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 · Does enforcing a quota violate the AGPL?
|
||||||
|
|
||||||
|
**No.**
|
||||||
|
AGPL‑3.0 does not forbid implementing usage controls in the program itself.
|
||||||
|
Recipients retain the freedoms to run, study, modify and share the software.
|
||||||
|
The Stella Ops quota:
|
||||||
|
|
||||||
|
* Is enforced **solely at the service layer** (Redis counters) — the source
|
||||||
|
code implementing the quota is published under AGPL‑3.0‑or‑later.
|
||||||
|
* Never disables functionality; it introduces *time delays* only after the
|
||||||
|
free allocation is exhausted.
|
||||||
|
* Can be bypassed entirely by rebuilding from source and removing the
|
||||||
|
enforcement middleware — the licence explicitly allows such modifications.
|
||||||
|
|
||||||
|
Therefore the quota complies with §§ 0 & 2 of the AGPL.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · Can I redistribute Stella Ops with the quota removed?
|
||||||
|
|
||||||
|
Yes, provided you:
|
||||||
|
|
||||||
|
1. **Publish the full corresponding source code** of your modified version
|
||||||
|
(AGPL § 13 & § 5c), and
|
||||||
|
2. Clearly indicate the changes (AGPL § 5a).
|
||||||
|
|
||||||
|
You may *retain* or *relax* the limits, or introduce your own tiering, as long
|
||||||
|
as the complete modified source is offered to every user of the service.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · Embedding in a proprietary appliance
|
||||||
|
|
||||||
|
You may ship Stella Ops inside a hardware or virtual appliance **only if** the
|
||||||
|
entire combined work is distributed under **AGPL‑3.0‑or‑later** and you supply
|
||||||
|
the full source code for both the scanner and your integration glue.
|
||||||
|
|
||||||
|
Shipping an AGPL component while keeping the rest closed‑source violates
|
||||||
|
§ 13 (*“remote network interaction”*).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · SaaS redistribution
|
||||||
|
|
||||||
|
Operating a public SaaS that offers Stella Ops scans to third parties triggers
|
||||||
|
the **network‑use clause**. You must:
|
||||||
|
|
||||||
|
* Provide the complete, buildable source of **your running version** —
|
||||||
|
including quota patches or UI branding.
|
||||||
|
* Present the offer **conspicuously** (e.g. a “Source Code” footer link).
|
||||||
|
|
||||||
|
Failure to do so breaches § 13 and can terminate your licence under § 8.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 · Is e‑mail collection for the JWT legal?
|
||||||
|
|
||||||
|
* **Purpose limitation (GDPR Art. 5‑1 b):** address is used only to deliver the
|
||||||
|
JWT or optional release notes.
|
||||||
|
* **Data minimisation (Art. 5‑1 c):** no name, IP or marketing preferences are
|
||||||
|
required; a blank e‑mail body suffices.
|
||||||
|
* **Storage limitation (Art. 5‑1 e):** addresses are deleted or hashed after
|
||||||
|
≤ 7 days unless the sender opts into updates.
|
||||||
|
|
||||||
|
Hence the token workflow adheres to GDPR principles.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 · Change‑log
|
||||||
|
|
||||||
|
| Version | Date | Notes |
|
||||||
|
|---------|------|-------|
|
||||||
|
| **2.0** | 2025‑07‑16 | Removed runtime quota details; linked to new authoritative overview. |
|
||||||
|
| 1.0 | 2024‑12‑20 | Initial legal FAQ. |
|
||||||
93
docs/30_QUOTA_ENFORCEMENT_FLOW1.md
Executable file
93
docs/30_QUOTA_ENFORCEMENT_FLOW1.md
Executable file
@@ -0,0 +1,93 @@
|
|||||||
|
# Quota Enforcement — Flow Diagram (rev 2.1)
|
||||||
|
|
||||||
|
> **Scope** – this document explains *how* the free‑tier limits are enforced
|
||||||
|
> inside the scanner service. For policy rationale and legal aspects see
|
||||||
|
> [`33_333_QUOTA_OVERVIEW.md`](33_333_QUOTA_OVERVIEW.md).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 · Key parameters (rev 2.1)
|
||||||
|
|
||||||
|
| Symbol | Value | Meaning |
|
||||||
|
|--------|-------|---------|
|
||||||
|
| `L_anon` | **{{ quota_anon }}** | Daily ceiling for anonymous users |
|
||||||
|
| `L_jwt` | **{{ quota_token }}** | Daily ceiling for token holders |
|
||||||
|
| `T_warn` | `200` | Soft reminder threshold |
|
||||||
|
| `D_soft` | `5 000 ms` | Delay for *first 30* over‑quota scans |
|
||||||
|
| `D_hard` | `60 000 ms` | Delay for all scans beyond the soft window |
|
||||||
|
|
||||||
|
`L_active` is `L_jwt` if a valid token is present; else `L_anon`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 · Sequence diagram
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
participant C as Client
|
||||||
|
participant API as Scanner API
|
||||||
|
participant REDIS as Redis (quota)
|
||||||
|
C->>API: /scan
|
||||||
|
API->>REDIS: INCR quota:<key>
|
||||||
|
REDIS-->>API: new_count
|
||||||
|
alt new_count ≤ L_active
|
||||||
|
API-->>C: 202 Accepted (no delay)
|
||||||
|
else new_count ≤ L_active + 30
|
||||||
|
API->>C: wait D_soft
|
||||||
|
API-->>C: 202 Accepted
|
||||||
|
else
|
||||||
|
API->>C: wait D_hard
|
||||||
|
API-->>C: 202 Accepted
|
||||||
|
end
|
||||||
|
````
|
||||||
|
|
||||||
|
*Counters auto‑expire **24 h** after first increment (00:00 UTC reset).*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · Redis key layout
|
||||||
|
|
||||||
|
| Key pattern | TTL | Description |
|
||||||
|
| ---------------------- | ---- | --------------------------------- |
|
||||||
|
| `quota:ip:<sha256>` | 24 h | Anonymous quota per *hashed* IP |
|
||||||
|
| `quota:tid:<sha256>` | 24 h | Token quota per *hashed* token‑ID |
|
||||||
|
| `quota:ip:<sha256>:ts` | 24 h | First‑seen timestamp (ISO 8601) |
|
||||||
|
|
||||||
|
Keys share a common TTL for efficient mass expiry via `redis-cli --scan`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · Pseudocode (Go‑style)
|
||||||
|
|
||||||
|
```go
|
||||||
|
func gate(key string, limit int) (delay time.Duration) {
|
||||||
|
cnt, _ := rdb.Incr(ctx, key).Result()
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case cnt <= limit:
|
||||||
|
return 0 // under quota
|
||||||
|
case cnt <= limit+30:
|
||||||
|
return 5 * time.Second
|
||||||
|
default:
|
||||||
|
return 60 * time.Second
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
*The middleware applies `time.Sleep(delay)` **before** processing the scan
|
||||||
|
request; it never returns `HTTP 429` under the free tier.*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · Metrics & monitoring
|
||||||
|
|
||||||
|
| Metric | PromQL sample | Alert |
|
||||||
|
| ------------------------------ | ------------------------------------------ | --------------------- |
|
||||||
|
| `stella_quota_soft_hits_total` | `increase(...[5m]) > 50` | Many users near limit |
|
||||||
|
| `stella_quota_hard_hits_total` | `rate(...[1h]) > 0.1` | Potential abuse |
|
||||||
|
| Average delay per request | `histogram_quantile(0.95, sum(rate(...)))` | P95 < 1 s expected |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
*Generated {{ "now" | date: "%Y‑%m‑%d" }} — values pulled from central constants.*
|
||||||
120
docs/33_333_QUOTA_OVERVIEW.md
Executable file
120
docs/33_333_QUOTA_OVERVIEW.md
Executable file
@@ -0,0 +1,120 @@
|
|||||||
|
# Free‑Tier Quota — **{{ quota_anon }}/ {{ quota_token }} Scans per UTC Day**
|
||||||
|
|
||||||
|
Stella Ops is free for individual developers and small teams.
|
||||||
|
To avoid registry abuse the scanner enforces a **two‑tier daily quota**
|
||||||
|
— fully offline capable.
|
||||||
|
|
||||||
|
| Mode | Daily ceiling | How to obtain |
|
||||||
|
|------|---------------|---------------|
|
||||||
|
| **Anonymous** | **{{ quota_anon }} scans** | No registration. Works online or air‑gapped. |
|
||||||
|
| **Free JWT token** | **{{ quota_token }} scans** | Email `token@stella-ops.org` (blank body). Bot replies with a signed JWT. |
|
||||||
|
|
||||||
|
*Soft reminder banner appears at 200 scans. Exceeding the limit never blocks –
|
||||||
|
the CLI/UI introduce a delay, detailed below.*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 · Token structure
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"iss": "stella-ops.org",
|
||||||
|
"sub": "free-tier",
|
||||||
|
"tid": "7d2285…", // 32‑byte random token‑ID
|
||||||
|
"tier": {{ quota_token }}, // daily scans allowed
|
||||||
|
"exp": 1767139199 // POSIX seconds (mandatory) – token expiry
|
||||||
|
}
|
||||||
|
````
|
||||||
|
|
||||||
|
* The **token‑ID (`tid`)** – not the e‑mail – is hashed *(SHA‑256 + salt)*
|
||||||
|
and stored for counter lookup.
|
||||||
|
* Verification uses the bundled public key (`keys/cosign.pub`) so **offline
|
||||||
|
hosts validate tokens locally**. An optional `exp` claim may be present;
|
||||||
|
if absent, the default is a far‑future timestamp used solely for schema
|
||||||
|
compatibility.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · Enforcement algorithm (rev 2.1)
|
||||||
|
|
||||||
|
| Step | Operation | Typical latency |
|
||||||
|
| ---- | ------------------------------------------------------------------------------ | ------------------------------------ |
|
||||||
|
| 1 | `key = sha256(ip)` *or* `sha256(tid)` | < 0.1 ms |
|
||||||
|
| 2 | `count = INCR quota:<key>` in Redis (24 h TTL) | 0.2 ms (Lua) |
|
||||||
|
| 3 | If `count > limit` → `WAIT delay_ms` | first 30 × 5 000 ms → then 60 000 ms |
|
||||||
|
| 4 | Return HTTP 429 **only if** `delay > 60 s` (should never fire under free tier) | — |
|
||||||
|
|
||||||
|
*Counters reset at **00:00 UTC**.*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · CLI / API integration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Example .env
|
||||||
|
docker run --rm \
|
||||||
|
-e DOCKER_HOST="$DOCKER_HOST" \ # remote‑daemon pointer
|
||||||
|
-v "$WORKSPACE/${SBOM_FILE}:/${SBOM_FILE}:ro" \ # mount SBOM under same name at container root
|
||||||
|
-e STELLA_OPS_URL="https://${STELLA_URL}" \ # where the CLI posts findings
|
||||||
|
"$STELLA_URL/registry/stella-cli:latest" \
|
||||||
|
scan --sbom "/${SBOM_FILE}" "$IMAGE"
|
||||||
|
```
|
||||||
|
|
||||||
|
*No JWT? → scanner defaults to anonymous quota.*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · Data retention & privacy
|
||||||
|
|
||||||
|
| Data | Retention | Purpose |
|
||||||
|
| ---------------------- | ------------------------------------ | ---------------- |
|
||||||
|
| IP hash (`quota:ip:*`) | 7 days, then salted hash only | Abuse rate‑limit |
|
||||||
|
| Token‑ID hash | Until revoked | Counter lookup |
|
||||||
|
| E‑mail (token request) | ≤ 7 days unless newsletters opted‑in | Deliver the JWT |
|
||||||
|
|
||||||
|
*No personal data leaves your infrastructure when running offline.*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 · Common questions
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>What happens at exactly 200 scans?</summary>
|
||||||
|
|
||||||
|
> The UI/CLI shows a yellow “fair‑use reminder”.
|
||||||
|
> No throttling is applied yet.
|
||||||
|
> Once you cross the full limit, the **first 30** over‑quota scans incur a
|
||||||
|
> 5‑second delay; further excess scans delay **60 s** each.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Does the quota differ offline?</summary>
|
||||||
|
|
||||||
|
> No. Counters are evaluated locally in Redis; the same limits apply even
|
||||||
|
> without Internet access.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Can I reset counters manually?</summary>
|
||||||
|
|
||||||
|
> Yes – delete the `quota:*` keys in Redis, but we recommend letting them
|
||||||
|
> expire at midnight to keep statistics meaningful.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 · Revision history
|
||||||
|
|
||||||
|
| Version | Date | Notes |
|
||||||
|
| ------- | ---------- | ------------------------------------------------------------------- |
|
||||||
|
| **2.1** | 2025‑07‑16 | Consolidated into single source; delays re‑tuned (30 × 5 s → 60 s). |
|
||||||
|
| 2.0 | 2025‑04‑07 | Switched counters from Mongo to Redis. |
|
||||||
|
| 1.0 | 2024‑12‑20 | Initial free‑tier design. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Authoritative source** — any doc or website section that references quotas
|
||||||
|
*must* link to this file instead of duplicating text.
|
||||||
133
docs/40_ARCHITECTURE_OVERVIEW.md
Executable file
133
docs/40_ARCHITECTURE_OVERVIEW.md
Executable file
@@ -0,0 +1,133 @@
|
|||||||
|
# Stella Ops — High‑Level Architecture
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Use constants injected at build:
|
||||||
|
{{ dotnet }} = "10 LTS"
|
||||||
|
{{ angular }} = "20"
|
||||||
|
-->
|
||||||
|
|
||||||
|
This document offers a birds‑eye view of how the major components interact,
|
||||||
|
why the system leans *monolith‑plus‑plug‑ins*, and where extension points live.
|
||||||
|
|
||||||
|
> For a *timeline* of when features arrive, see the public
|
||||||
|
> [road‑map](/roadmap/) — no version details are repeated here.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 · Guiding principles
|
||||||
|
|
||||||
|
| Principle | Rationale |
|
||||||
|
|-----------|-----------|
|
||||||
|
| **SBOM‑first** | Scan existing CycloneDX/SPDX if present; fall back to layer unpack. |
|
||||||
|
| **Δ‑processing** | Re‑analyse only changed layers; reduces P95 warm path to \< 5 s. |
|
||||||
|
| **All‑managed code** | Entire stack is 100 % managed (.NET / TypeScript); no `unsafe` blocks or native extensions — eases review and reproducible builds. |
|
||||||
|
| **Restart‑time plug‑ins** | Avoids the attack surface of runtime DLL injection; still allows custom scanners & exporters. |
|
||||||
|
| **Sovereign‑by‑design** | No mandatory outbound traffic; Offline Kit distributes feeds. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 · Module graph
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A(API Gateway)
|
||||||
|
B1(Scanner Core<br/>.NET latest LTS)
|
||||||
|
B2(Feedser service\n(vuln ingest/merge/export))
|
||||||
|
B3(Policy Engine OPA)
|
||||||
|
C1(Redis 7)
|
||||||
|
C2(MongoDB 7)
|
||||||
|
D(UI SPA<br/>Angular latest version)
|
||||||
|
A -->|gRPC| B1
|
||||||
|
B1 -->|async| B2
|
||||||
|
B1 -->|OPA| B3
|
||||||
|
B1 --> C1
|
||||||
|
B1 --> C2
|
||||||
|
A -->|REST/WS| D
|
||||||
|
````
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · Key components
|
||||||
|
|
||||||
|
| Component | Language / tech | Responsibility |
|
||||||
|
| ---------------------------- | --------------------- | ---------------------------------------------------- |
|
||||||
|
| **API Gateway** | ASP.NET Minimal API | Auth (JWT), quotas, request routing |
|
||||||
|
| **Scanner Core** | C# 12, Polly | Layer diffing, SBOM generation, vuln correlation |
|
||||||
|
| **Feedser (vulnerability ingest/merge/export service)** | C# source-gen workers | Consolidate NVD + regional CVE feeds into the canonical MongoDB store and drive JSON / Trivy DB exports |
|
||||||
|
| **Policy Engine** | OPA (Rego) | admission decisions, custom org rules |
|
||||||
|
| **Redis 7** | Key‑DB compatible | LRU cache, quota counters |
|
||||||
|
| **MongoDB 7** | WiredTiger | SBOM & findings storage |
|
||||||
|
| **Angular {{ angular }} UI** | RxJS, Tailwind | Dashboard, reports, admin UX |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · Plug‑in system
|
||||||
|
|
||||||
|
* Discovered once at start‑up from `/opt/stella/plugins/**`.
|
||||||
|
* Runs under Linux user `stella‑plugin` (UID 1001).
|
||||||
|
* Extension points:
|
||||||
|
|
||||||
|
* `ISbomMutator`
|
||||||
|
* `IVulnerabilityProvider`
|
||||||
|
* `IResultSink`
|
||||||
|
* Policy files (`*.rego`)
|
||||||
|
* Each DLL is SHA‑256 hashed; digest embedded in the run report for provenance.
|
||||||
|
|
||||||
|
Hot‑plugging is deferred until after v 1.0 for security review.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · Data & control flow
|
||||||
|
|
||||||
|
1. **Client** calls `/api/scan` with image reference.
|
||||||
|
2. **Gateway** enforces quota, forwards to **Scanner Core** via gRPC.
|
||||||
|
3. **Core**:
|
||||||
|
|
||||||
|
* Queries Redis for cached SBOM.
|
||||||
|
* If miss → pulls layers, generates SBOM.
|
||||||
|
* Executes plug‑ins (mutators, additional scanners).
|
||||||
|
4. **Policy Engine** evaluates `scanResult` document.
|
||||||
|
5. **Findings** stored in MongoDB; WebSocket event notifies UI.
|
||||||
|
6. **ResultSink plug‑ins** export to Slack, Splunk, JSON file, etc.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 · Security hardening
|
||||||
|
|
||||||
|
| Surface | Mitigation |
|
||||||
|
| ----------------- | ------------------------------------------------------------ |
|
||||||
|
| Container runtime | Distroless base, non‑root UID, seccomp + AppArmor |
|
||||||
|
| Plug‑in sandbox | Separate UID, SELinux profile, cgroup 1 CPU / 256 MiB |
|
||||||
|
| Supply chain | Cosign signatures, in‑toto SLSA Level 3 (target) |
|
||||||
|
| Secrets | `Docker secrets` or K8s `Secret` mounts; never hard‑coded |
|
||||||
|
| Quota abuse | Redis rate‑limit gates (see `30_QUOTA_ENFORCEMENT_FLOW1.md`) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 · Build & release pipeline (TL;DR)
|
||||||
|
|
||||||
|
* **Git commits** trigger CI → unit / integration / E2E tests.
|
||||||
|
* Successful merge to `main`:
|
||||||
|
|
||||||
|
* Build `.NET {{ dotnet }}` trimmed self‑contained binary.
|
||||||
|
* `docker build --sbom=spdx-json`.
|
||||||
|
* Sign image and tarball with Cosign.
|
||||||
|
* Attach SBOM + provenance; push to registry and download portal.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7 · Future extraction path
|
||||||
|
|
||||||
|
Although the default deployment is a single container, each sub‑service can be
|
||||||
|
extracted:
|
||||||
|
|
||||||
|
* Feedser → standalone cron pod.
|
||||||
|
* Policy Engine → side‑car (OPA) with gRPC contract.
|
||||||
|
* ResultSink → queue worker (RabbitMQ or Azure Service Bus).
|
||||||
|
|
||||||
|
Interfaces are stable **as of v0.2 β**; extraction requires a recompilation
|
||||||
|
only, not a fork of the core.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Last updated {{ "now" | date: "%Y‑%m‑%d" }} – constants auto‑injected.*
|
||||||
101
docs/60_POLICY_TEMPLATES.md
Executable file
101
docs/60_POLICY_TEMPLATES.md
Executable file
@@ -0,0 +1,101 @@
|
|||||||
|
# Policy Templates — YAML & Rego Examples
|
||||||
|
|
||||||
|
Stella Ops lets you enforce *pass / fail* rules in two ways:
|
||||||
|
|
||||||
|
1. **YAML “quick policies”** — simple equality / inequality checks.
|
||||||
|
2. **OPA Rego modules** — full‑power logic for complex organisations.
|
||||||
|
|
||||||
|
> **Precedence:** If the same image is subject to both a YAML rule *and* a Rego
|
||||||
|
> module, the **Rego result wins**. That is, `deny` in Rego overrides any
|
||||||
|
> `allow` in YAML.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 · YAML quick policy
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# file: policies/root_user.yaml
|
||||||
|
version: 1
|
||||||
|
id: root-user
|
||||||
|
description: Disallow images that run as root
|
||||||
|
severity: high
|
||||||
|
|
||||||
|
rules:
|
||||||
|
- field: ".config.user"
|
||||||
|
operator: "equals"
|
||||||
|
value: "root"
|
||||||
|
deny_message: "Image runs as root — block."
|
||||||
|
````
|
||||||
|
|
||||||
|
Place the file under `/opt/stella/plugins/policies/`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · Rego example (deny on critical CVE)
|
||||||
|
|
||||||
|
```rego
|
||||||
|
# file: policies/deny_critical.rego
|
||||||
|
package stella.policy
|
||||||
|
|
||||||
|
default deny = []
|
||||||
|
|
||||||
|
deny[msg] {
|
||||||
|
some f
|
||||||
|
input.findings[f].severity == "critical"
|
||||||
|
msg := sprintf("Critical CVE %s – build blocked", [input.findings[f].id])
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
*Input schema* — the Rego `input` document matches the public
|
||||||
|
`ScanResult` POCO (see SDK). Use the bundled JSON schema in
|
||||||
|
`share/schemas/scanresult.schema.json` for IDE autocompletion.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · Pass‑through warnings (Rego)
|
||||||
|
|
||||||
|
Return a `warn` array to surface non‑blocking messages in the UI:
|
||||||
|
|
||||||
|
```rego
|
||||||
|
package stella.policy
|
||||||
|
|
||||||
|
warn[msg] {
|
||||||
|
input.image.base == "ubuntu:16.04"
|
||||||
|
msg := "Image uses EOL Ubuntu 16.04 — please upgrade."
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Warnings decrement the **quality score** but do *not* affect the CLI exit
|
||||||
|
code.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · Testing policies locally
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# run policy evaluation without pushing to DB
|
||||||
|
stella scan alpine:3.20 --policy-only \
|
||||||
|
--policies ./policies/
|
||||||
|
```
|
||||||
|
|
||||||
|
The CLI prints `PASS`, `WARN` or `DENY` plus structured JSON.
|
||||||
|
|
||||||
|
Unit‑test your Rego modules with the OPA binary:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
opa test policies/
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 · Developer quick‑start (plug‑ins)
|
||||||
|
|
||||||
|
Need logic beyond Rego? Implement a plug‑in via **C#/.NET {{ dotnet }}** and
|
||||||
|
the `StellaOps.SDK` NuGet:
|
||||||
|
|
||||||
|
* Tutorial: [`dev/30_PLUGIN_DEV_GUIDE.md`](dev/30_PLUGIN_DEV_GUIDE.md)
|
||||||
|
* Quick reference: `/plugins/`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Last updated {{ "now" | date: "%Y‑%m‑%d" }} — constants auto‑injected.*
|
||||||
19
docs/AGENTS.md
Normal file
19
docs/AGENTS.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
# Docs & Enablement Guild
|
||||||
|
|
||||||
|
## Mission
|
||||||
|
Produce and maintain offline-friendly documentation for StellaOps modules, covering architecture, configuration, operator workflows, and developer onboarding.
|
||||||
|
|
||||||
|
## Scope Highlights
|
||||||
|
- Authority docs (`docs/dev/31_AUTHORITY_PLUGIN_DEVELOPER_GUIDE.md`, upcoming `docs/11_AUTHORITY.md`).
|
||||||
|
- Feedser quickstarts, CLI guides, Offline Kit manuals.
|
||||||
|
- Release notes and migration playbooks.
|
||||||
|
|
||||||
|
## Operating Principles
|
||||||
|
- Keep guides deterministic and in sync with shipped configuration samples.
|
||||||
|
- Prefer tables/checklists for operator steps; flag security-sensitive actions.
|
||||||
|
- Update `docs/TASKS.md` whenever work items change status (TODO/DOING/REVIEW/DONE/BLOCKED).
|
||||||
|
|
||||||
|
## Coordination
|
||||||
|
- Authority Core & Plugin teams for auth-related changes.
|
||||||
|
- Security Guild for threat-model outputs and mitigations.
|
||||||
|
- DevEx for tooling diagrams and documentation pipeline.
|
||||||
190
docs/ARCHITECTURE_FEEDSER.md
Normal file
190
docs/ARCHITECTURE_FEEDSER.md
Normal file
@@ -0,0 +1,190 @@
|
|||||||
|
# ARCHITECTURE.md — **StellaOps.Feedser**
|
||||||
|
|
||||||
|
> **Goal**: Build a sovereign-ready, self-hostable **feed-merge service** that ingests authoritative vulnerability sources, normalizes and de-duplicates them into **MongoDB**, and exports **JSON** and **Trivy-compatible DB** artifacts.
|
||||||
|
> **Form factor**: Long-running **Web Service** with **REST APIs** (health, status, control) and an embedded **internal cron scheduler**. Controllable by StellaOps.Cli (# stella db ...)
|
||||||
|
> **No signing inside Feedser** (signing is a separate pipeline step).
|
||||||
|
> **Runtime SDK baseline**: .NET 10 Preview 7 (SDK 10.0.100-preview.7.25380.108) targeting `net10.0`, aligned with the deployed api.stella-ops.org service.
|
||||||
|
> **Four explicit stages**:
|
||||||
|
>
|
||||||
|
> 1. **Source Download** → raw documents.
|
||||||
|
> 2. **Parse & Normalize** → schema-validated DTOs enriched with canonical identifiers.
|
||||||
|
> 3. **Merge & Deduplicate** → precedence-aware canonical records persisted to MongoDB.
|
||||||
|
> 4. **Export** → JSON or TrivyDB (full or delta), then (externally) sign/publish.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1) Naming & Solution Layout
|
||||||
|
|
||||||
|
**Source connectors** namespace prefix: `StellaOps.Feedser.Source.*`
|
||||||
|
**Exporters**:
|
||||||
|
|
||||||
|
* `StellaOps.Feedser.Exporter.Json`
|
||||||
|
* `StellaOps.Feedser.Exporter.TrivyDb`
|
||||||
|
|
||||||
|
**Projects** (`/src`):
|
||||||
|
|
||||||
|
```
|
||||||
|
StellaOps.Feedser.WebService/ # ASP.NET Core (Minimal API, net10.0 preview) WebService + embedded scheduler
|
||||||
|
StellaOps.Feedser.Core/ # Domain models, pipelines, merge/dedupe engine, jobs orchestration
|
||||||
|
StellaOps.Feedser.Models/ # Canonical POCOs, JSON Schemas, enums
|
||||||
|
StellaOps.Feedser.Storage.Mongo/ # Mongo repositories, GridFS access, indexes, resume "flags"
|
||||||
|
StellaOps.Feedser.Source.Common/ # HTTP clients, rate-limiters, schema validators, parsers utils
|
||||||
|
StellaOps.Feedser.Source.Cve/
|
||||||
|
StellaOps.Feedser.Source.Nvd/
|
||||||
|
StellaOps.Feedser.Source.Ghsa/
|
||||||
|
StellaOps.Feedser.Source.Osv/
|
||||||
|
StellaOps.Feedser.Source.Jvn/
|
||||||
|
StellaOps.Feedser.Source.CertCc/
|
||||||
|
StellaOps.Feedser.Source.Kev/
|
||||||
|
StellaOps.Feedser.Source.Kisa/
|
||||||
|
StellaOps.Feedser.Source.CertIn/
|
||||||
|
StellaOps.Feedser.Source.CertFr/
|
||||||
|
StellaOps.Feedser.Source.CertBund/
|
||||||
|
StellaOps.Feedser.Source.Acsc/
|
||||||
|
StellaOps.Feedser.Source.Cccs/
|
||||||
|
StellaOps.Feedser.Source.Ru.Bdu/ # HTML→schema with LLM fallback (gated)
|
||||||
|
StellaOps.Feedser.Source.Ru.Nkcki/ # PDF/HTML bulletins → structured
|
||||||
|
StellaOps.Feedser.Source.Vndr.Msrc/
|
||||||
|
StellaOps.Feedser.Source.Vndr.Cisco/
|
||||||
|
StellaOps.Feedser.Source.Vndr.Oracle/
|
||||||
|
StellaOps.Feedser.Source.Vndr.Adobe/ # APSB ingest; emits vendor RangePrimitives with adobe.track/platform/priority telemetry + fixed-status provenance.
|
||||||
|
StellaOps.Feedser.Source.Vndr.Apple/
|
||||||
|
StellaOps.Feedser.Source.Vndr.Chromium/
|
||||||
|
StellaOps.Feedser.Source.Vndr.Vmware/
|
||||||
|
StellaOps.Feedser.Source.Distro.RedHat/
|
||||||
|
StellaOps.Feedser.Source.Distro.Debian/ # Fetches DSA list + detail HTML, emits EVR RangePrimitives with per-release provenance and telemetry.
|
||||||
|
StellaOps.Feedser.Source.Distro.Ubuntu/ # Ubuntu Security Notices connector (JSON index → EVR ranges with ubuntu.pocket telemetry).
|
||||||
|
StellaOps.Feedser.Source.Distro.Suse/ # CSAF fetch pipeline emitting NEVRA RangePrimitives with suse.status vendor telemetry.
|
||||||
|
StellaOps.Feedser.Source.Ics.Cisa/
|
||||||
|
StellaOps.Feedser.Source.Ics.Kaspersky/
|
||||||
|
StellaOps.Feedser.Normalization/ # Canonical mappers, validators, version-range normalization
|
||||||
|
StellaOps.Feedser.Merge/ # Identity graph, precedence, deterministic merge
|
||||||
|
StellaOps.Feedser.Exporter.Json/
|
||||||
|
StellaOps.Feedser.Exporter.TrivyDb/
|
||||||
|
StellaOps.Feedser.<Component>.Tests/ # Component-scoped unit/integration suites (Core, Storage.Mongo, Source.*, Exporter.*, WebService, etc.)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2) Runtime Shape
|
||||||
|
|
||||||
|
**Process**: single service (`StellaOps.Feedser.WebService`)
|
||||||
|
|
||||||
|
* `Program.cs`: top-level entry using **Generic Host**, **DI**, **Options** binding from `appsettings.json` + environment + optional `feedser.yaml`.
|
||||||
|
* Built-in **scheduler** (cron-like) + **job manager** with **distributed locks** in Mongo to prevent overlaps, enforce timeouts, allow cancel/kill.
|
||||||
|
* **REST APIs** for health/readiness/progress/trigger/kill/status.
|
||||||
|
|
||||||
|
**Key NuGet concepts** (indicative): `MongoDB.Driver`, `Polly` (retry/backoff), `System.Threading.Channels`, `Microsoft.Extensions.Http`, `Microsoft.Extensions.Hosting`, `Serilog`, `OpenTelemetry`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3) Data Storage — **MongoDB** (single source of truth)
|
||||||
|
|
||||||
|
**Database**: `feedser`
|
||||||
|
**Write concern**: `majority` for merge/export state, `acknowledged` for raw docs.
|
||||||
|
**Collections** (with “flags”/resume points):
|
||||||
|
|
||||||
|
* `source`
|
||||||
|
* `_id`, `name`, `type`, `baseUrl`, `auth`, `notes`.
|
||||||
|
* `source_state`
|
||||||
|
* Keys: `sourceName` (unique), `enabled`, `cursor`, `lastSuccess`, `failCount`, `backoffUntil`, `paceOverrides`, `paused`.
|
||||||
|
* Drives incremental fetch/parse/map resume and operator pause/pace controls.
|
||||||
|
* `document`
|
||||||
|
* `_id`, `sourceName`, `uri`, `fetchedAt`, `sha256`, `contentType`, `status`, `metadata`, `gridFsId`, `etag`, `lastModified`.
|
||||||
|
* Index `{sourceName:1, uri:1}` unique; optional TTL for superseded versions.
|
||||||
|
* `dto`
|
||||||
|
* `_id`, `sourceName`, `documentId`, `schemaVer`, `payload` (BSON), `validatedAt`.
|
||||||
|
* Index `{sourceName:1, documentId:1}`.
|
||||||
|
* `advisory`
|
||||||
|
* `_id`, `advisoryKey`, `title`, `summary`, `lang`, `published`, `modified`, `severity`, `exploitKnown`.
|
||||||
|
* Unique `{advisoryKey:1}` plus indexes on `modified` and `published`.
|
||||||
|
* `alias`
|
||||||
|
* `advisoryId`, `scheme`, `value` with index `{scheme:1, value:1}`.
|
||||||
|
* `affected`
|
||||||
|
* `advisoryId`, `platform`, `name`, `versionRange`, `cpe`, `purl`, `fixedBy`, `introducedVersion`.
|
||||||
|
* Index `{platform:1, name:1}`, `{advisoryId:1}`.
|
||||||
|
* `reference`
|
||||||
|
* `advisoryId`, `url`, `kind`, `sourceTag` (e.g., advisory/patch/kb).
|
||||||
|
* Flags collections: `kev_flag`, `ru_flags`, `jp_flags`, `psirt_flags` keyed by `advisoryId`.
|
||||||
|
* `merge_event`
|
||||||
|
* `_id`, `advisoryKey`, `beforeHash`, `afterHash`, `mergedAt`, `inputs` (document ids).
|
||||||
|
* `export_state`
|
||||||
|
* `_id` (`json`/`trivydb`), `baseExportId`, `baseDigest`, `lastFullDigest`, `lastDeltaDigest`, `exportCursor`, `targetRepo`, `exporterVersion`.
|
||||||
|
* `locks`
|
||||||
|
* `_id` (`jobKey`), `holder`, `acquiredAt`, `heartbeatAt`, `leaseMs`, `ttlAt` (TTL index cleans dead locks).
|
||||||
|
* `jobs`
|
||||||
|
* `_id`, `type`, `args`, `state`, `startedAt`, `endedAt`, `error`, `owner`, `heartbeatAt`, `timeoutMs`.
|
||||||
|
|
||||||
|
**GridFS buckets**: `fs.documents` for raw large payloads; referenced by `document.gridFsId`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4) Job & Scheduler Model
|
||||||
|
|
||||||
|
* Scheduler stores cron expressions per source/exporter in config; persists next-run pointers in Mongo.
|
||||||
|
* Jobs acquire locks (`locks` collection) to ensure singleton execution per source/exporter.
|
||||||
|
* Supports manual triggers via API endpoints (`POST /jobs/{type}`) and pause/resume toggles per source.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5) Connector Contracts
|
||||||
|
|
||||||
|
Connectors implement:
|
||||||
|
|
||||||
|
```csharp
|
||||||
|
public interface IFeedConnector {
|
||||||
|
string SourceName { get; }
|
||||||
|
Task FetchAsync(IServiceProvider sp, CancellationToken ct);
|
||||||
|
Task ParseAsync(IServiceProvider sp, CancellationToken ct);
|
||||||
|
Task MapAsync(IServiceProvider sp, CancellationToken ct);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
* Fetch populates `document` rows respecting rate limits, conditional GET, and `source_state.cursor`.
|
||||||
|
* Parse validates schema (JSON Schema, XSD) and writes sanitized DTO payloads.
|
||||||
|
* Map produces canonical advisory rows + provenance entries; must be idempotent.
|
||||||
|
* Base helpers in `StellaOps.Feedser.Source.Common` provide HTTP clients, retry policies, and watermark utilities.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6) Merge & Normalization
|
||||||
|
|
||||||
|
* Canonical model stored in `StellaOps.Feedser.Models` with serialization contracts used by storage/export layers.
|
||||||
|
* `StellaOps.Feedser.Normalization` handles NEVRA/EVR/PURL range parsing, CVSS normalization, localization.
|
||||||
|
* `StellaOps.Feedser.Merge` builds alias graphs keyed by CVE first, then falls back to vendor/regional IDs.
|
||||||
|
* Precedence rules: PSIRT/OVAL overrides generic ranges; KEV only toggles exploitation; regional feeds enrich severity but don’t override vendor truth.
|
||||||
|
* Determinism enforced via canonical JSON hashing logged in `merge_event`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7) Exporters
|
||||||
|
|
||||||
|
* JSON exporter mirrors `aquasecurity/vuln-list` layout with deterministic ordering and reproducible timestamps.
|
||||||
|
* Trivy DB exporter shells out to `trivy-db build`, produces Bolt archives, and reuses unchanged blobs from the last full baseline when running in delta mode. The exporter annotates `metadata.json` with `mode`, `baseExportId`, `baseManifestDigest`, `resetBaseline`, and `delta.changedFiles[]`/`delta.removedPaths[]`, and honours `publishFull` / `publishDelta` (ORAS) plus `includeFull` / `includeDelta` (offline bundle) toggles.
|
||||||
|
* `StellaOps.Feedser.Storage.Mongo` provides cursors for delta exports based on `export_state.exportCursor` and the persisted per-file manifest (`export_state.files`).
|
||||||
|
* Export jobs produce OCI tarballs (layer media type `application/vnd.aquasec.trivy.db.layer.v1.tar+gzip`) and optionally push via ORAS; `metadata.json` accompanies each layout so mirrors can decide between full refreshes and deltas.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8) Observability
|
||||||
|
|
||||||
|
* Serilog structured logging with enrichment fields (`source`, `uri`, `stage`, `durationMs`).
|
||||||
|
* OpenTelemetry traces around fetch/parse/map/export; metrics for rate limit hits, schema failures, dedupe ratios, package size. Connector HTTP metrics are emitted via the shared `feedser.source.http.*` instruments tagged with `feedser.source=<connector>` so per-source dashboards slice on that label instead of bespoke metric names.
|
||||||
|
* Prometheus scraping endpoint served by WebService.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 9) Security Considerations
|
||||||
|
|
||||||
|
* Offline-first: connectors only reach allowlisted hosts.
|
||||||
|
* BDU LLM fallback gated by config flag; logs audit trail with confidence score.
|
||||||
|
* No secrets written to logs; secrets loaded via environment or mounted files.
|
||||||
|
* Signing handled outside Feedser pipeline.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 10) Deployment Notes
|
||||||
|
|
||||||
|
* Default storage MongoDB; for air-gapped, bundle Mongo image + seeded data backup.
|
||||||
|
* Horizontal scale achieved via multiple web service instances sharing Mongo locks.
|
||||||
|
* Provide `feedser.yaml` template describing sources, rate limits, and export settings.
|
||||||
67
docs/README.md
Executable file
67
docs/README.md
Executable file
@@ -0,0 +1,67 @@
|
|||||||
|
# Stella Ops
|
||||||
|
|
||||||
|
> **Self‑hosted, SBOM‑first DevSecOps platform – offline‑friendly, AGPL‑3.0, free up to {{ quota_token }} scans per UTC day (soft delay only, never blocks).**
|
||||||
|
|
||||||
|
Stella Ops lets you discover container vulnerabilities in **< 5 s** without sending a single byte outside your network.
|
||||||
|
Everything here is open‑source and versioned — when you check out a git tag, the docs match the code you are running.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🚀 Start here (first 60 minutes)
|
||||||
|
|
||||||
|
| Step | What you will learn | Doc |
|
||||||
|
|------|--------------------|-----|
|
||||||
|
| 1 ️⃣ | 90‑second elevator pitch & pillars | **[What Is Stella Ops?](01_WHAT_IS_IT.md)** |
|
||||||
|
| 2 ️⃣ | Pain points it solves | **[Why Does It Exist?](02_WHY.md)** |
|
||||||
|
| 3 ️⃣ | Install & run a scan in 10 min | **[Install Guide](21_INSTALL_GUIDE.md)** |
|
||||||
|
| 4 ️⃣ | Components & data‑flow | **[High‑Level Architecture](07_HIGH_LEVEL_ARCHITECTURE.md)** |
|
||||||
|
| 5 ️⃣ | Integrate the CLI / REST API | **[API & CLI Reference](09_API_CLI_REFERENCE.md)** |
|
||||||
|
| 6 ️⃣ | Vocabulary used throughout the docs | **[Glossary](14_GLOSSARY_OF_TERMS.md)** |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📚 Complete Table of Contents
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Click to expand the full docs index</summary>
|
||||||
|
|
||||||
|
### Overview
|
||||||
|
- **01 – [What Is Stella Ops?](01_WHAT_IS_IT.md)**
|
||||||
|
- **02 – [Why Does It Exist?](02_WHY.md)**
|
||||||
|
- **03 – [Vision & Road‑map](03_VISION.md)**
|
||||||
|
- **04 – [Feature Matrix](04_FEATURE_MATRIX.md)**
|
||||||
|
|
||||||
|
### Reference & concepts
|
||||||
|
- **05 – [System Requirements Specification](05_SYSTEM_REQUIREMENTS_SPEC.md)**
|
||||||
|
- **07 – [High‑Level Architecture](40_ARCHITECTURE_OVERVIEW.md)**
|
||||||
|
- **08 – Module Specifications**
|
||||||
|
- [README](08_MODULE_SPECIFICATIONS/README.md)
|
||||||
|
- [`backend_api.md`](08_MODULE_SPECIFICATIONS/backend_api.md)
|
||||||
|
- [`zastava_scanner.md`](08_MODULE_SPECIFICATIONS/zastava_scanner.md)
|
||||||
|
- [`registry_scanner.md`](08_MODULE_SPECIFICATIONS/registry_scanner.md)
|
||||||
|
- [`nightly_scheduler.md`](08_MODULE_SPECIFICATIONS/nightly_scheduler.md)
|
||||||
|
- **09 – [API & CLI Reference](09_API_CLI_REFERENCE.md)**
|
||||||
|
- **10 – [Plug‑in SDK Guide](10_PLUGIN_SDK_GUIDE.md)**
|
||||||
|
- **11 – [Data Schemas](11_DATA_SCHEMAS.md)**
|
||||||
|
- **12 – [Performance Workbook](12_PERFORMANCE_WORKBOOK.md)**
|
||||||
|
- **13 – [Release‑Engineering Playbook](13_RELEASE_ENGINEERING_PLAYBOOK.md)**
|
||||||
|
|
||||||
|
### User & operator guides
|
||||||
|
- **14 – [Glossary](14_GLOSSARY_OF_TERMS.md)**
|
||||||
|
- **15 – [UI Guide](15_UI_GUIDE.md)**
|
||||||
|
- **17 – [Security Hardening Guide](17_SECURITY_HARDENING_GUIDE.md)**
|
||||||
|
- **18 – [Coding Standards](18_CODING_STANDARDS.md)**
|
||||||
|
- **19 – [Test‑Suite Overview](19_TEST_SUITE_OVERVIEW.md)**
|
||||||
|
- **21 – [Install Guide](21_INSTALL_GUIDE.md)**
|
||||||
|
- **22 – [CI/CD Recipes Library](ci/20_CI_RECIPES.md)**
|
||||||
|
- **23 – [FAQ](23_FAQ_MATRIX.md)**
|
||||||
|
- **24 – [Offline Update Kit Admin Guide](24_OUK_ADMIN_GUIDE.md)**
|
||||||
|
|
||||||
|
### Legal & licence
|
||||||
|
- **29 – [Legal & Quota FAQ](29_LEGAL_FAQ_QUOTA.md)**
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
© 2025 Stella Ops contributors – licensed AGPL‑3.0‑or‑later
|
||||||
12
docs/TASKS.md
Normal file
12
docs/TASKS.md
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
# Docs Guild Task Board (UTC 2025-10-10)
|
||||||
|
|
||||||
|
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||||
|
|----|--------|----------|------------|-------------|---------------|
|
||||||
|
| DOC4.AUTH-PDG | REVIEW | Docs Guild, Plugin Team | PLG6.DOC | Copy-edit `docs/dev/31_AUTHORITY_PLUGIN_DEVELOPER_GUIDE.md`, export lifecycle diagram, add LDAP RFC cross-link. | ✅ PR merged with polish; ✅ Diagram committed; ✅ Slack handoff posted. |
|
||||||
|
| DOC1.AUTH | TODO | Docs Guild, Authority Core | CORE5B.DOC | Draft `docs/11_AUTHORITY.md` covering architecture, configuration, bootstrap flows. | ✅ Architecture + config sections approved by Core; ✅ Samples reference latest options; ✅ Offline note added. |
|
||||||
|
| DOC3.Feedser-Authority | DOING (2025-10-10) | Docs Guild, DevEx | FSR4 | Polish operator/runbook sections (DOC3/DOC5) to document Feedser authority rollout, bypass logging, and enforcement checklist. | ✅ DOC3/DOC5 updated; ✅ enforcement deadline highlighted; ✅ Docs guild sign-off. |
|
||||||
|
| DOC5.Feedser-Runbook | TODO | Docs Guild | DOC3.Feedser-Authority | Produce dedicated Feedser authority audit runbook covering log fields, monitoring recommendations, and troubleshooting steps. | ✅ Runbook published; ✅ linked from DOC3/DOC5; ✅ alerting guidance included. |
|
||||||
|
| FEEDDOCS-DOCS-05-001 | DONE (2025-10-11) | Docs Guild | FEEDMERGE-ENGINE-04-001, FEEDMERGE-ENGINE-04-002 | Publish Feedser conflict resolution runbook covering precedence workflow, merge-event auditing, and Sprint 3 metrics. | ✅ `docs/ops/feedser-conflict-resolution.md` committed; ✅ metrics/log tables align with latest merge code; ✅ Ops alert guidance handed to Feedser team. |
|
||||||
|
| FEEDDOCS-DOCS-05-002 | TODO | Docs Guild, Feedser Ops | FEEDDOCS-DOCS-05-001 | Capture ops sign-off: circulate conflict runbook, tune alert thresholds, and document rollout decisions in change log. | ✅ Ops review recorded; ✅ alert thresholds finalised; ✅ change-log entry linked from runbook. |
|
||||||
|
|
||||||
|
> Update statuses (TODO/DOING/REVIEW/DONE/BLOCKED) as progress changes. Keep guides in sync with configuration samples under `etc/`.
|
||||||
18
docs/_includes/CONSTANTS.md
Executable file
18
docs/_includes/CONSTANTS.md
Executable file
@@ -0,0 +1,18 @@
|
|||||||
|
### `docs/_includes/CONSTANTS.md`
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
---
|
||||||
|
# ─────────────────────────────────────────────────────────────────────────────
|
||||||
|
# Shared constants for both the technical docs (Markdown) and the marketing
|
||||||
|
# site (Nunjucks). Eleventy injects these variables into every template.
|
||||||
|
# Never hard‑code the values elsewhere — lint‑ci will block the merge.
|
||||||
|
# ─────────────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
dotnet: "10 LTS" # Runs on .NET 10 (LTS channel)
|
||||||
|
angular: "20" # Front‑end framework major
|
||||||
|
quota_anon: 33 # Anonymous daily scans
|
||||||
|
quota_token: 333 # Daily scans with free JWT
|
||||||
|
slowdown: "5–60 s" # Delay window after exceeding quota
|
||||||
|
|
||||||
|
# Add new keys here; update the docs linter pattern in .gitlab-ci.yml.
|
||||||
|
---
|
||||||
258
docs/ci/20_CI_RECIPES.md
Executable file
258
docs/ci/20_CI_RECIPES.md
Executable file
@@ -0,0 +1,258 @@
|
|||||||
|
# Stella Ops CI Recipes — (2025‑08‑04)
|
||||||
|
|
||||||
|
## 0 · Key variables (export these once)
|
||||||
|
|
||||||
|
| Variable | Meaning | Typical value |
|
||||||
|
| ------------- | --------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------- |
|
||||||
|
| `STELLA_URL` | Host that: ① stores the **CLI** & **SBOM‑builder** images under `/registry` **and** ② receives API calls at `https://$STELLA_URL` | `stella-ops.ci.acme.example` |
|
||||||
|
| `DOCKER_HOST` | How containers reach your Docker daemon (because we no longer mount `/var/run/docker.sock`) | `tcp://docker:2375` |
|
||||||
|
| `WORKSPACE` | Directory where the pipeline stores artefacts (SBOM file) | `$(pwd)` |
|
||||||
|
| `IMAGE` | The image you are building & scanning | `acme/backend:sha-${COMMIT_SHA}` |
|
||||||
|
| `SBOM_FILE` | Immutable SBOM name – `<image-ref>‑YYYYMMDDThhmmssZ.sbom.json` | `acme_backend_sha‑abc123‑20250804T153050Z.sbom.json` |
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export STELLA_URL="stella-ops.ci.acme.example"
|
||||||
|
export DOCKER_HOST="tcp://docker:2375" # Jenkins/Circle often expose it like this
|
||||||
|
export WORKSPACE="$(pwd)"
|
||||||
|
export IMAGE="acme/backend:sha-${COMMIT_SHA}"
|
||||||
|
export SBOM_FILE="$(echo "${IMAGE}" | tr '/:+' '__')-$(date -u +%Y%m%dT%H%M%SZ).sbom.json"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 · SBOM creation strategies
|
||||||
|
|
||||||
|
### Option A – **Buildx attested SBOM** (preferred if you can use BuildKit)
|
||||||
|
|
||||||
|
You pass **two build args** so the Dockerfile can run the builder and copy the result out of the build context.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker buildx build \
|
||||||
|
--build-arg STELLA_SBOM_BUILDER="$STELLA_URL/registry/stella-sbom-builder:latest" \
|
||||||
|
--provenance=true --sbom=true \
|
||||||
|
--build-arg SBOM_FILE="$SBOM_FILE" \
|
||||||
|
-t "$IMAGE" .
|
||||||
|
```
|
||||||
|
|
||||||
|
**If you **cannot** use Buildx, use Option B below.** The older “run a builder stage inside the Dockerfile” pattern is unreliable for producing an SBOM of the final image.
|
||||||
|
|
||||||
|
```Dockerfile
|
||||||
|
|
||||||
|
ARG STELLA_SBOM_BUILDER
|
||||||
|
ARG SBOM_FILE
|
||||||
|
|
||||||
|
FROM $STELLA_SBOM_BUILDER as sbom
|
||||||
|
ARG IMAGE
|
||||||
|
ARG SBOM_FILE
|
||||||
|
RUN $STELLA_SBOM_BUILDER build --image $IMAGE --output /out/$SBOM_FILE
|
||||||
|
|
||||||
|
# ---- actual build stages … ----
|
||||||
|
FROM alpine:3.20
|
||||||
|
COPY --from=sbom /out/$SBOM_FILE / # (optional) keep or discard
|
||||||
|
|
||||||
|
# (rest of your Dockerfile)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Option B – **External builder step** (works everywhere; recommended baseline if Buildx isn’t available)
|
||||||
|
|
||||||
|
*(keep this block if your pipeline already has an image‑build step that you can’t modify)*
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run --rm \
|
||||||
|
-e DOCKER_HOST="$DOCKER_HOST" \ # let builder reach the daemon remotely
|
||||||
|
-v "$WORKSPACE:/workspace" \ # place SBOM beside the source code
|
||||||
|
"$STELLA_URL/registry/stella-sbom-builder:latest" \
|
||||||
|
build --image "$IMAGE" --output "/workspace/${SBOM_FILE}"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · Scan the image & upload results
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run --rm \
|
||||||
|
-e DOCKER_HOST="$DOCKER_HOST" \ # remote‑daemon pointer
|
||||||
|
-v "$WORKSPACE/${SBOM_FILE}:/${SBOM_FILE}:ro" \ # mount SBOM under same name at container root
|
||||||
|
-e STELLA_OPS_URL="https://${STELLA_URL}" \ # where the CLI posts findings
|
||||||
|
"$STELLA_URL/registry/stella-cli:latest" \
|
||||||
|
scan --sbom "/${SBOM_FILE}" "$IMAGE"
|
||||||
|
```
|
||||||
|
|
||||||
|
The CLI returns **exit 0** if policies pass, **>0** if blocked — perfect for failing the job.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · CI templates
|
||||||
|
|
||||||
|
Below are minimal, cut‑and‑paste snippets.
|
||||||
|
**Feel free to delete Option B** if you adopt Option A.
|
||||||
|
|
||||||
|
### 3.1 Jenkins (Declarative Pipeline)
|
||||||
|
|
||||||
|
```groovy
|
||||||
|
pipeline {
|
||||||
|
agent { docker { image 'docker:25' args '--privileged' } } // gives us /usr/bin/docker
|
||||||
|
environment {
|
||||||
|
STELLA_URL = 'stella-ops.ci.acme.example'
|
||||||
|
DOCKER_HOST = 'tcp://docker:2375'
|
||||||
|
IMAGE = "acme/backend:${env.BUILD_NUMBER}"
|
||||||
|
SBOM_FILE = "acme_backend_${env.BUILD_NUMBER}-${new Date().format('yyyyMMdd\'T\'HHmmss\'Z\'', TimeZone.getTimeZone('UTC'))}.sbom.json"
|
||||||
|
}
|
||||||
|
stages {
|
||||||
|
stage('Build image + SBOM (Option A)') {
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
docker build \
|
||||||
|
--build-arg STELLA_SBOM_BUILDER="$STELLA_URL/registry/stella-sbom-builder:latest" \
|
||||||
|
--build-arg SBOM_FILE="$SBOM_FILE" \
|
||||||
|
-t "$IMAGE" .
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* ---------- Option B fallback (when you must keep the existing build step as‑is) ----------
|
||||||
|
stage('SBOM builder (Option B)') {
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
docker run --rm -e DOCKER_HOST="$DOCKER_HOST" \
|
||||||
|
-v "$WORKSPACE:/workspace" \
|
||||||
|
"$STELLA_URL/registry/stella-sbom-builder:latest" \
|
||||||
|
build --image "$IMAGE" --output "/workspace/${SBOM_FILE}"
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
------------------------------------------------------------------------------------------ */
|
||||||
|
stage('Scan & upload') {
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
docker run --rm -e DOCKER_HOST="$DOCKER_HOST" \
|
||||||
|
-v "$WORKSPACE/${SBOM_FILE}:/${SBOM_FILE}:ro" \
|
||||||
|
-e STELLA_OPS_URL="https://$STELLA_URL" \
|
||||||
|
"$STELLA_URL/registry/stella-cli:latest" \
|
||||||
|
scan --sbom "/${SBOM_FILE}" "$IMAGE"
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3.2 CircleCI `.circleci/config.yml`
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
version: 2.1
|
||||||
|
jobs:
|
||||||
|
stella_scan:
|
||||||
|
docker:
|
||||||
|
- image: cimg/base:stable # baremetal image with Docker CLI
|
||||||
|
environment:
|
||||||
|
STELLA_URL: stella-ops.ci.acme.example
|
||||||
|
DOCKER_HOST: tcp://docker:2375 # Circle’s “remote Docker” socket
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
|
||||||
|
- run:
|
||||||
|
name: Compute vars
|
||||||
|
command: |
|
||||||
|
echo 'export IMAGE="acme/backend:${CIRCLE_SHA1}"' >> $BASH_ENV
|
||||||
|
echo 'export SBOM_FILE="$(echo acme/backend:${CIRCLE_SHA1} | tr "/:+" "__")-$(date -u +%Y%m%dT%H%M%SZ).sbom.json"' >> $BASH_ENV
|
||||||
|
- run:
|
||||||
|
name: Build image + SBOM (Option A)
|
||||||
|
command: |
|
||||||
|
docker build \
|
||||||
|
--build-arg STELLA_SBOM_BUILDER="$STELLA_URL/registry/stella-sbom-builder:latest" \
|
||||||
|
--build-arg SBOM_FILE="$SBOM_FILE" \
|
||||||
|
-t "$IMAGE" .
|
||||||
|
# --- Option B fallback (when you must keep the existing build step as‑is) ---
|
||||||
|
#- run:
|
||||||
|
# name: SBOM builder (Option B)
|
||||||
|
# command: |
|
||||||
|
# docker run --rm -e DOCKER_HOST="$DOCKER_HOST" \
|
||||||
|
# -v "$PWD:/workspace" \
|
||||||
|
# "$STELLA_URL/registry/stella-sbom-builder:latest" \
|
||||||
|
# build --image "$IMAGE" --output "/workspace/${SBOM_FILE}"
|
||||||
|
- run:
|
||||||
|
name: Scan
|
||||||
|
command: |
|
||||||
|
docker run --rm -e DOCKER_HOST="$DOCKER_HOST" \
|
||||||
|
-v "$PWD/${SBOM_FILE}:/${SBOM_FILE}:ro" \
|
||||||
|
-e STELLA_OPS_URL="https://$STELLA_URL" \
|
||||||
|
"$STELLA_URL/registry/stella-cli:latest" \
|
||||||
|
scan --sbom "/${SBOM_FILE}" "$IMAGE"
|
||||||
|
workflows:
|
||||||
|
stella:
|
||||||
|
jobs: [stella_scan]
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3.3 Gitea Actions `.gitea/workflows/stella.yml`
|
||||||
|
|
||||||
|
*(Gitea 1.22+ ships native Actions compatible with GitHub syntax)*
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
name: Stella Scan
|
||||||
|
on: [push]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
stella:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
STELLA_URL: ${{ secrets.STELLA_URL }}
|
||||||
|
DOCKER_HOST: tcp://docker:2375 # provided by the docker:dind service
|
||||||
|
services:
|
||||||
|
docker:
|
||||||
|
image: docker:dind
|
||||||
|
options: >-
|
||||||
|
--privileged
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Compute vars
|
||||||
|
id: vars
|
||||||
|
run: |
|
||||||
|
echo "IMAGE=ghcr.io/${{ gitea.repository }}:${{ gitea.sha }}" >> $GITEA_OUTPUT
|
||||||
|
echo "SBOM_FILE=$(echo ghcr.io/${{ gitea.repository }}:${{ gitea.sha }} | tr '/:+' '__')-$(date -u +%Y%m%dT%H%M%SZ).sbom.json" >> $GITEA_OUTPUT
|
||||||
|
|
||||||
|
- name: Build image + SBOM (Option A)
|
||||||
|
run: |
|
||||||
|
docker build \
|
||||||
|
--build-arg STELLA_SBOM_BUILDER="${STELLA_URL}/registry/stella-sbom-builder:latest" \
|
||||||
|
--build-arg SBOM_FILE="${{ steps.vars.outputs.SBOM_FILE }}" \
|
||||||
|
-t "${{ steps.vars.outputs.IMAGE }}" .
|
||||||
|
|
||||||
|
# --- Option B fallback (when you must keep the existing build step as‑is) ---
|
||||||
|
#- name: SBOM builder (Option B)
|
||||||
|
# run: |
|
||||||
|
# docker run --rm -e DOCKER_HOST="$DOCKER_HOST" \
|
||||||
|
# -v "$(pwd):/workspace" \
|
||||||
|
# "${STELLA_URL}/registry/stella-sbom-builder:latest" \
|
||||||
|
# build --image "${{ steps.vars.outputs.IMAGE }}" --output "/workspace/${{ steps.vars.outputs.SBOM_FILE }}"
|
||||||
|
|
||||||
|
- name: Scan
|
||||||
|
run: |
|
||||||
|
docker run --rm -e DOCKER_HOST="$DOCKER_HOST" \
|
||||||
|
-v "$(pwd)/${{ steps.vars.outputs.SBOM_FILE }}:/${{ steps.vars.outputs.SBOM_FILE }}:ro" \
|
||||||
|
-e STELLA_OPS_URL="https://${STELLA_URL}" \
|
||||||
|
"${STELLA_URL}/registry/stella-cli:latest" \
|
||||||
|
scan --sbom "/${{ steps.vars.outputs.SBOM_FILE }}" "${{ steps.vars.outputs.IMAGE }}"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · Troubleshooting cheat‑sheet
|
||||||
|
|
||||||
|
| Symptom | Root cause | First things to try |
|
||||||
|
| ------------------------------------- | --------------------------- | --------------------------------------------------------------- |
|
||||||
|
| `no such host $STELLA_URL` | DNS typo or VPN outage | `ping $STELLA_URL` from runner |
|
||||||
|
| `connection refused` when CLI uploads | Port 443 blocked | open firewall / check ingress |
|
||||||
|
| `failed to stat /<sbom>.json` | SBOM wasn’t produced | Did Option A actually run builder? If not, enable Option B |
|
||||||
|
| `registry unauthorized` | Runner lacks registry creds | `docker login $STELLA_URL/registry` (store creds in CI secrets) |
|
||||||
|
| Non‑zero scan exit | Blocking vuln/licence | Open project in Ops UI → triage or waive |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Change log
|
||||||
|
|
||||||
|
* **2025‑08‑04** – Variable clean‑up, removed Docker‑socket & cache mounts, added Jenkins / CircleCI / Gitea examples, clarified Option B comment.
|
||||||
8
docs/cli/20_REFERENCE.md
Executable file
8
docs/cli/20_REFERENCE.md
Executable file
@@ -0,0 +1,8 @@
|
|||||||
|
# CLI Reference (`stella --help`)
|
||||||
|
|
||||||
|
> **Auto‑generated file — do not edit manually.**
|
||||||
|
> On every tagged release the CI pipeline runs
|
||||||
|
> `stella --help --markdown > docs/cli/20_REFERENCE.md`
|
||||||
|
> ensuring this document always matches the shipped binary.
|
||||||
|
|
||||||
|
*(The reference will appear after the first public α release.)*
|
||||||
146
docs/dev/30_PLUGIN_DEV_GUIDE.md
Executable file
146
docs/dev/30_PLUGIN_DEV_GUIDE.md
Executable file
@@ -0,0 +1,146 @@
|
|||||||
|
# Writing Plug‑ins for Stella Ops SDK *Preview 3*
|
||||||
|
|
||||||
|
> **SDK status:** *Preview 3* is compatible with the **v0.1 α** runtime.
|
||||||
|
> Interfaces freeze at **v0.2 β**; binary‑breaking changes are still possible
|
||||||
|
> until then.
|
||||||
|
|
||||||
|
| SDK NuGet | Runtime compat | Notes |
|
||||||
|
|-----------|---------------|-------|
|
||||||
|
| `StellaOps.SDK 0.2.0-preview3` | `stella-ops >= 0.1.0-alpha` | Current preview |
|
||||||
|
| `StellaOps.SDK 0.2.x‑beta` | v0.2 β (Q1 2026) | Interface **freeze** |
|
||||||
|
| `StellaOps.SDK 1.0.0` | v1.0 GA (Q4 2026) | Semantic Ver from here |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0 · Extension points
|
||||||
|
|
||||||
|
| Area | Interface / format | Example |
|
||||||
|
|------|--------------------|---------|
|
||||||
|
| SBOM mutator | `ISbomMutator` | Inject SPDX licences |
|
||||||
|
| Additional scanner | `IVulnerabilityProvider` | Rust Crates ecosystem |
|
||||||
|
| Policy engine | **OPA Rego** file | Custom pass/fail rule |
|
||||||
|
| Result exporter | `IResultSink` | Slack webhook notifier |
|
||||||
|
|
||||||
|
*Hot‑plugging (live reload) is **post‑1.0**; modules are discovered once
|
||||||
|
during service start‑up.*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 · Five‑minute quick‑start (C# /.NET {{ dotnet }})
|
||||||
|
|
||||||
|
```bash
|
||||||
|
dotnet new classlib -n SlackExporter
|
||||||
|
cd SlackExporter
|
||||||
|
dotnet add package StellaOps.SDK --version 0.2.0-preview3
|
||||||
|
````
|
||||||
|
|
||||||
|
```csharp
|
||||||
|
using System.Net.Http.Json;
|
||||||
|
using StellaOps.Plugin;
|
||||||
|
|
||||||
|
public sealed class SlackSink : IResultSink
|
||||||
|
{
|
||||||
|
private readonly string _webhook =
|
||||||
|
Environment.GetEnvironmentVariable("SLACK_WEBHOOK")
|
||||||
|
?? throw new InvalidOperationException("Missing SLACK_WEBHOOK");
|
||||||
|
|
||||||
|
public string Name => "Slack Notifier";
|
||||||
|
|
||||||
|
public async Task ExportAsync(ScanResult result, CancellationToken ct)
|
||||||
|
{
|
||||||
|
var payload = new
|
||||||
|
{
|
||||||
|
text = $":rotating_light: *{result.Image}* " +
|
||||||
|
$"→ {result.Findings.Count} findings (max {result.MaxSeverity})"
|
||||||
|
};
|
||||||
|
|
||||||
|
using var client = new HttpClient();
|
||||||
|
await client.PostAsJsonAsync(_webhook, payload, ct);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
dotnet publish -c Release -o out
|
||||||
|
sudo mkdir -p /opt/stella/plugins/Slack
|
||||||
|
sudo cp out/SlackExporter.dll /opt/stella/plugins/Slack/
|
||||||
|
sudo systemctl restart stella-ops
|
||||||
|
```
|
||||||
|
|
||||||
|
Start‑up log:
|
||||||
|
|
||||||
|
```
|
||||||
|
[PluginLoader] Loaded 1 plug‑in:
|
||||||
|
• Slack Notifier
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 · Packaging rules
|
||||||
|
|
||||||
|
| Item | Rule |
|
||||||
|
| ------ | ----------------------------------------- |
|
||||||
|
| Folder | `/opt/stella/plugins/<NiceName>/` |
|
||||||
|
| DLLs | Your plug‑in + non‑GAC deps |
|
||||||
|
| Config | Env‑vars or `settings.yaml` |
|
||||||
|
| SBOM | Optional `addon.spdx.json` for provenance |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 · Security sandbox
|
||||||
|
|
||||||
|
* Runs as Linux user **`stella‑plugin` (UID 1001)**.
|
||||||
|
* SELinux/AppArmor profile blocks inbound traffic; outbound :80/443 only.
|
||||||
|
* cgroup default: **1 CPU / 256 MiB** (adjustable).
|
||||||
|
* SHA‑256 of every DLL is embedded in the run report.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 · Debugging
|
||||||
|
|
||||||
|
| Technique | Command |
|
||||||
|
| ----------------- | ---------------------------------- |
|
||||||
|
| Verbose core log | `STELLA_LOG=debug` |
|
||||||
|
| Per‑plug‑in log | Inject `ILogger<YourClass>` |
|
||||||
|
| Dry‑run (no fail) | `--plugin-mode warn` |
|
||||||
|
| Hot reload | *Not supported* (planned post‑1.0) |
|
||||||
|
|
||||||
|
Logs: `/var/log/stella-ops/plugins/YYYY‑MM‑DD.log`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5 · Interface reference (Preview 3)
|
||||||
|
|
||||||
|
```csharp
|
||||||
|
namespace StellaOps.Plugin
|
||||||
|
{
|
||||||
|
public interface ISbomMutator
|
||||||
|
{
|
||||||
|
string Name { get; }
|
||||||
|
Task<SoftwareBillOfMaterials> MutateAsync(
|
||||||
|
SoftwareBillOfMaterials sbom,
|
||||||
|
CancellationToken ct = default);
|
||||||
|
}
|
||||||
|
|
||||||
|
public interface IVulnerabilityProvider
|
||||||
|
{
|
||||||
|
string Ecosystem { get; }
|
||||||
|
Task<IReadOnlyList<Vulnerability>> QueryAsync(
|
||||||
|
PackageReference p, CancellationToken ct = default);
|
||||||
|
}
|
||||||
|
|
||||||
|
public interface IResultSink
|
||||||
|
{
|
||||||
|
string Name { get; }
|
||||||
|
Task ExportAsync(
|
||||||
|
ScanResult result, CancellationToken ct = default);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Full POCO docs: [https://git.stella-ops.org/stella-ops/sdk/-/tree/main/docs/api](https://git.stella-ops.org/stella-ops/sdk/-/tree/main/docs/api).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Last updated {{ "now" | date: "%Y‑%m‑%d" }} – constants auto‑injected.*
|
||||||
|
|
||||||
157
docs/dev/31_AUTHORITY_PLUGIN_DEVELOPER_GUIDE.md
Normal file
157
docs/dev/31_AUTHORITY_PLUGIN_DEVELOPER_GUIDE.md
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
# Authority Plug-in Developer Guide
|
||||||
|
|
||||||
|
> **Status:** Ready for Docs/DOC4 editorial review as of 2025-10-10. Content aligns with PLG6 acceptance criteria and references stable Authority primitives.
|
||||||
|
|
||||||
|
## 1. Overview
|
||||||
|
Authority plug-ins extend the **StellaOps Authority** service with custom identity providers, credential stores, and client-management logic. Unlike Feedser plug-ins (which ingest or export advisories), Authority plug-ins participate directly in authentication flows:
|
||||||
|
|
||||||
|
- **Use cases:** integrate corporate directories (LDAP/AD), delegate to external IDPs, enforce bespoke password/lockout policies, or add client provisioning automation.
|
||||||
|
- **Constraints:** plug-ins load only during service start (no hot-reload), must function without outbound internet access, and must emit deterministic results for identical configuration and input data.
|
||||||
|
- **Ship targets:** target the same .NET 10 preview as the host, honour offline-first requirements, and provide clear diagnostics so operators can triage issues from `/ready`.
|
||||||
|
|
||||||
|
## 2. Architecture Snapshot
|
||||||
|
Authority hosts follow a deterministic plug-in lifecycle. The flow below can be rendered as a sequence diagram in the final authored documentation, but all touchpoints are described here for offline viewers:
|
||||||
|
|
||||||
|
1. **Configuration load** – `AuthorityPluginConfigurationLoader` resolves YAML manifests under `etc/authority.plugins/`.
|
||||||
|
2. **Assembly discovery** – the shared `PluginHost` scans `PluginBinaries/Authority` for `StellaOps.Authority.Plugin.*.dll` assemblies.
|
||||||
|
3. **Registrar execution** – each assembly is searched for `IAuthorityPluginRegistrar` implementations. Registrars bind options, register services, and optionally queue bootstrap tasks.
|
||||||
|
4. **Runtime** – the host resolves `IIdentityProviderPlugin` instances, uses capability metadata to decide which OAuth grants to expose, and invokes health checks for readiness endpoints.
|
||||||
|
|
||||||
|
**Data persistence primer:** the standard Mongo-backed plugin stores users in collections named `authority_users_<pluginName>` and lockout metadata in embedded documents. Additional plugins must document their storage layout and provide deterministic collection naming to honour the Offline Kit replication process.
|
||||||
|
|
||||||
|
## 3. Capability Metadata
|
||||||
|
Capability flags let the host reason about what your plug-in supports:
|
||||||
|
|
||||||
|
- Declare capabilities in your descriptor using the string constants from `AuthorityPluginCapabilities` (`password`, `mfa`, `clientProvisioning`, `bootstrap`). The configuration loader now validates these tokens and rejects unknown values at startup.
|
||||||
|
- `AuthorityIdentityProviderCapabilities.FromCapabilities` projects those strings into strongly typed booleans (`SupportsPassword`, etc.). Authority Core will use these flags when wiring flows such as the password grant. Built-in plugins (e.g., Standard) will fail fast or force-enable required capabilities if the descriptor is misconfigured, so keep manifests accurate.
|
||||||
|
- Typical configuration (`etc/authority.plugins/standard.yaml`):
|
||||||
|
```yaml
|
||||||
|
plugins:
|
||||||
|
descriptors:
|
||||||
|
standard:
|
||||||
|
assemblyName: "StellaOps.Authority.Plugin.Standard"
|
||||||
|
capabilities:
|
||||||
|
- password
|
||||||
|
- bootstrap
|
||||||
|
```
|
||||||
|
- Only declare a capability if the plug-in genuinely implements it. For example, if `SupportsClientProvisioning` is `true`, the plug-in must supply a working `IClientProvisioningStore`.
|
||||||
|
|
||||||
|
**Operational reminder:** the Authority host surfaces capability summaries during startup (see `AuthorityIdentityProviderRegistry` log lines). Use those logs during smoke tests to ensure manifests align with expectations.
|
||||||
|
|
||||||
|
**Configuration path normalisation:** Manifest-relative paths (e.g., `tokenSigning.keyDirectory: "../keys"`) are resolved against the YAML file location and environment variables are expanded before validation. Plug-ins should expect to receive an absolute, canonical path when options are injected.
|
||||||
|
|
||||||
|
## 4. Project Scaffold
|
||||||
|
- Target **.NET 10 preview**, enable nullable, treat warnings as errors, and mark Authority plug-ins with `<IsAuthorityPlugin>true</IsAuthorityPlugin>`.
|
||||||
|
- Minimum references:
|
||||||
|
- `StellaOps.Authority.Plugins.Abstractions` (contracts & capability helpers)
|
||||||
|
- `StellaOps.Plugin` (hosting/DI helpers)
|
||||||
|
- `StellaOps.Auth.*` libraries as needed for shared token utilities (optional today).
|
||||||
|
- Example `.csproj` (trimmed from `StellaOps.Authority.Plugin.Standard`):
|
||||||
|
```xml
|
||||||
|
<Project Sdk="Microsoft.NET.Sdk">
|
||||||
|
<PropertyGroup>
|
||||||
|
<TargetFramework>net10.0</TargetFramework>
|
||||||
|
<Nullable>enable</Nullable>
|
||||||
|
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
|
||||||
|
<IsAuthorityPlugin>true</IsAuthorityPlugin>
|
||||||
|
</PropertyGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<ProjectReference Include="..\StellaOps.Authority.Plugins.Abstractions\StellaOps.Authority.Plugins.Abstractions.csproj" />
|
||||||
|
<ProjectReference Include="..\..\StellaOps.Plugin\StellaOps.Plugin.csproj" />
|
||||||
|
</ItemGroup>
|
||||||
|
</Project>
|
||||||
|
```
|
||||||
|
(Add other references—e.g., MongoDB driver, shared auth libraries—according to your implementation.)
|
||||||
|
|
||||||
|
## 5. Implementing `IAuthorityPluginRegistrar`
|
||||||
|
- Create a parameterless registrar class that returns your plug-in type name via `PluginType`.
|
||||||
|
- Use `AuthorityPluginRegistrationContext` to:
|
||||||
|
- Bind options (`AddOptions<T>(pluginName).Bind(...)`).
|
||||||
|
- Register singletons for stores/enrichers using manifest metadata.
|
||||||
|
- Register any hosted bootstrap tasks (e.g., seed admin users).
|
||||||
|
- Always validate configuration inside `PostConfigure` and throw meaningful `InvalidOperationException` to fail fast during startup.
|
||||||
|
- Use the provided `ILoggerFactory` from DI; avoid static loggers or console writes.
|
||||||
|
- Example skeleton:
|
||||||
|
```csharp
|
||||||
|
internal sealed class MyPluginRegistrar : IAuthorityPluginRegistrar
|
||||||
|
{
|
||||||
|
public string PluginType => "my-custom";
|
||||||
|
|
||||||
|
public void Register(AuthorityPluginRegistrationContext context)
|
||||||
|
{
|
||||||
|
var name = context.Plugin.Manifest.Name;
|
||||||
|
|
||||||
|
context.Services.AddOptions<MyPluginOptions>(name)
|
||||||
|
.Bind(context.Plugin.Configuration)
|
||||||
|
.PostConfigure(opts => opts.Validate(name));
|
||||||
|
|
||||||
|
context.Services.AddSingleton<IIdentityProviderPlugin>(sp =>
|
||||||
|
new MyIdentityProvider(context.Plugin, sp.GetRequiredService<MyCredentialStore>(),
|
||||||
|
sp.GetRequiredService<MyClaimsEnricher>(),
|
||||||
|
sp.GetRequiredService<ILogger<MyIdentityProvider>>()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 6. Identity Provider Surface
|
||||||
|
- Implement `IIdentityProviderPlugin` to expose:
|
||||||
|
- `IUserCredentialStore` for password validation and user CRUD.
|
||||||
|
- `IClaimsEnricher` to append roles/attributes onto issued principals.
|
||||||
|
- Optional `IClientProvisioningStore` for machine-to-machine clients.
|
||||||
|
- `AuthorityIdentityProviderCapabilities` to advertise supported flows.
|
||||||
|
- Password guidance:
|
||||||
|
- Prefer Argon2 (Security Guild upcoming recommendation); Standard plug-in currently ships PBKDF2 with easy swap via `IPasswordHasher`.
|
||||||
|
- Enforce password policies before hashing to avoid storing weak credentials.
|
||||||
|
- Health checks should probe backing stores (e.g., Mongo `ping`) and return `AuthorityPluginHealthResult` so `/ready` can surface issues.
|
||||||
|
- When supporting additional factors (e.g., TOTP), implement `SupportsMfa` and document the enrolment flow for resource servers.
|
||||||
|
|
||||||
|
## 7. Configuration & Secrets
|
||||||
|
- Authority looks for manifests under `etc/authority.plugins/`. Each YAML file maps directly to a plug-in name.
|
||||||
|
- Support environment overrides using `STELLAOPS_AUTHORITY_PLUGINS__DESCRIPTORS__<NAME>__...`.
|
||||||
|
- Never store raw secrets in git: allow operators to supply them via `.local.yaml`, environment variables, or injected secret files. Document which keys are mandatory.
|
||||||
|
- Validate configuration as soon as the registrar runs; use explicit error messages to guide operators. The Standard plug-in now enforces complete bootstrap credentials (username + password) and positive lockout windows via `StandardPluginOptions.Validate`.
|
||||||
|
- Cross-reference bootstrap workflows with `docs/ops/authority_bootstrap.md` (to be published alongside CORE6) so operators can reuse the same payload formats for manual provisioning.
|
||||||
|
|
||||||
|
## 8. Logging, Metrics, and Diagnostics
|
||||||
|
- Always log via the injected `ILogger<T>`; include `pluginName` and correlation IDs where available.
|
||||||
|
- Activity/metric names should align with `AuthorityTelemetry` constants (`service.name=stellaops-authority`).
|
||||||
|
- Expose additional diagnostics via structured logging rather than writing custom HTTP endpoints; the host will integrate these into `/health` and `/ready`.
|
||||||
|
- Emit metrics with stable names (`auth.plugins.<pluginName>.*`) when introducing custom instrumentation; coordinate with the Observability guild to reserve prefixes.
|
||||||
|
|
||||||
|
## 9. Testing & Tooling
|
||||||
|
- Unit tests: use Mongo2Go (or similar) to exercise credential stores without hitting production infrastructure (`StandardUserCredentialStoreTests` is a template).
|
||||||
|
- Determinism: fix timestamps to UTC and sort outputs consistently; avoid random GUIDs unless stable.
|
||||||
|
- Smoke tests: launch `dotnet run --project src/StellaOps.Authority/StellaOps.Authority` with your plug-in under `PluginBinaries/Authority` and verify `/ready`.
|
||||||
|
- Example verification snippet:
|
||||||
|
```csharp
|
||||||
|
[Fact]
|
||||||
|
public async Task VerifyPasswordAsync_ReturnsSuccess()
|
||||||
|
{
|
||||||
|
var store = CreateCredentialStore();
|
||||||
|
await store.UpsertUserAsync(new AuthorityUserRegistration("alice", "Pa55!", null, null, false,
|
||||||
|
Array.Empty<string>(), new Dictionary<string, string?>()), CancellationToken.None);
|
||||||
|
|
||||||
|
var result = await store.VerifyPasswordAsync("alice", "Pa55!", CancellationToken.None);
|
||||||
|
Assert.True(result.Succeeded);
|
||||||
|
Assert.True(result.User?.Roles.Count == 0);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 10. Packaging & Delivery
|
||||||
|
- Output assembly should follow `StellaOps.Authority.Plugin.<Name>.dll` so the host’s search pattern picks it up.
|
||||||
|
- Place the compiled DLL plus dependencies under `PluginBinaries/Authority` for offline deployments; include hashes/signatures in release notes (Security Guild guidance forthcoming).
|
||||||
|
- Document any external prerequisites (e.g., CA cert bundle) in your plug-in README.
|
||||||
|
- Update `etc/authority.plugins/<plugin>.yaml` samples and include deterministic SHA256 hashes for optional bootstrap payloads when distributing Offline Kit artefacts.
|
||||||
|
|
||||||
|
## 11. Checklist & Handoff
|
||||||
|
- ✅ Capabilities declared and validated in automated tests.
|
||||||
|
- ✅ Bootstrap workflows documented (if `bootstrap` capability used) and repeatable.
|
||||||
|
- ✅ Local smoke test + unit/integration suites green (`dotnet test`).
|
||||||
|
- ✅ Operational docs updated: configuration keys, secrets guidance, troubleshooting.
|
||||||
|
- Submit the developer guide update referencing PLG6/DOC4 and tag DevEx + Docs reviewers for sign-off.
|
||||||
|
|
||||||
|
---
|
||||||
|
**Next documentation actions:**
|
||||||
|
- Add rendered architectural diagram (PlantUML/mermaid) reflecting the lifecycle above once the Docs toolkit pipeline is ready.
|
||||||
|
- Reference the LDAP RFC (`docs/rfcs/authority-plugin-ldap.md`) in the capability section once review completes.
|
||||||
|
- Sync terminology with `docs/11_AUTHORITY.md` when that chapter is published to keep glossary terms consistent.
|
||||||
91
docs/dev/32_AUTH_CLIENT_GUIDE.md
Normal file
91
docs/dev/32_AUTH_CLIENT_GUIDE.md
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
# StellaOps Auth Client — Integration Guide
|
||||||
|
|
||||||
|
> **Status:** Drafted 2025-10-10 as part of LIB5. Consumer teams (Feedser, CLI, Agent) should review before wiring the new options into their configuration surfaces.
|
||||||
|
|
||||||
|
The `StellaOps.Auth.Client` library provides a resilient OpenID Connect client for services and tools that talk to **StellaOps Authority**. LIB5 introduced configurable HTTP retry/backoff policies and an offline-fallback window so downstream components stay deterministic even when Authority is briefly unavailable.
|
||||||
|
|
||||||
|
This guide explains how to consume the new settings, when to toggle them, and how to test your integration.
|
||||||
|
|
||||||
|
## 1. Registering the client
|
||||||
|
|
||||||
|
```csharp
|
||||||
|
services.AddStellaOpsAuthClient(options =>
|
||||||
|
{
|
||||||
|
options.Authority = configuration["StellaOps:Authority:Url"]!;
|
||||||
|
options.ClientId = configuration["StellaOps:Authority:ClientId"]!;
|
||||||
|
options.ClientSecret = configuration["StellaOps:Authority:ClientSecret"];
|
||||||
|
options.DefaultScopes.Add("feedser.jobs.trigger");
|
||||||
|
|
||||||
|
options.EnableRetries = true;
|
||||||
|
options.RetryDelays.Clear();
|
||||||
|
options.RetryDelays.Add(TimeSpan.FromMilliseconds(500));
|
||||||
|
options.RetryDelays.Add(TimeSpan.FromSeconds(2));
|
||||||
|
|
||||||
|
options.AllowOfflineCacheFallback = true;
|
||||||
|
options.OfflineCacheTolerance = TimeSpan.FromMinutes(5);
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
> **Reminder:** `AddStellaOpsAuthClient` binds the options via `IOptionsMonitor<T>` so changes picked up from configuration reloads will be applied to future HTTP calls without restarting the host.
|
||||||
|
|
||||||
|
## 2. Resilience options
|
||||||
|
|
||||||
|
| Option | Default | Notes |
|
||||||
|
|--------|---------|-------|
|
||||||
|
| `EnableRetries` | `true` | When disabled, the shared Polly policy is a no-op and HTTP calls will fail fast. |
|
||||||
|
| `RetryDelays` | `1s, 2s, 5s` | Edit in ascending order; zero/negative entries are ignored. Clearing the list and leaving it empty keeps the defaults. |
|
||||||
|
| `AllowOfflineCacheFallback` | `true` | When `true`, stale discovery/JWKS responses are reused within the tolerance window if Authority is unreachable. |
|
||||||
|
| `OfflineCacheTolerance` | `00:10:00` | Added to the normal cache lifetime. E.g. a 10 minute JWKS cache plus 5 minute tolerance keeps keys for 15 minutes if Authority is offline. |
|
||||||
|
|
||||||
|
The HTTP retry policy handles:
|
||||||
|
|
||||||
|
- 5xx responses
|
||||||
|
- 429 responses
|
||||||
|
- Transient transport failures (`HttpRequestException`, timeouts, aborted sockets)
|
||||||
|
|
||||||
|
Retries emit warnings via the `StellaOps.Auth.Client.HttpRetry` logger. Tune the delay values to honour your deployment’s SLOs.
|
||||||
|
|
||||||
|
## 3. Configuration mapping
|
||||||
|
|
||||||
|
Suggested configuration keys (coordinate with consuming teams before finalising):
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
StellaOps:
|
||||||
|
Authority:
|
||||||
|
Url: "https://authority.stella-ops.local"
|
||||||
|
ClientId: "feedser"
|
||||||
|
ClientSecret: "change-me"
|
||||||
|
AuthClient:
|
||||||
|
EnableRetries: true
|
||||||
|
RetryDelays:
|
||||||
|
- "00:00:01"
|
||||||
|
- "00:00:02"
|
||||||
|
- "00:00:05"
|
||||||
|
AllowOfflineCacheFallback: true
|
||||||
|
OfflineCacheTolerance: "00:10:00"
|
||||||
|
```
|
||||||
|
|
||||||
|
Environment variable binding follows the usual double-underscore rules, e.g.
|
||||||
|
|
||||||
|
```
|
||||||
|
STELLAOPS__AUTHORITY__AUTHCLIENT__RETRYDELAYS__0=00:00:02
|
||||||
|
STELLAOPS__AUTHORITY__AUTHCLIENT__OFFLINECACHETOLERANCE=00:05:00
|
||||||
|
```
|
||||||
|
|
||||||
|
CLI and Feedser teams should expose these knobs once they adopt the auth client.
|
||||||
|
|
||||||
|
## 4. Testing recommendations
|
||||||
|
|
||||||
|
1. **Unit tests:** assert option binding by configuring `StellaOpsAuthClientOptions` via a `ConfigurationBuilder` and ensuring `Validate()` normalises the retry delays and scope list.
|
||||||
|
2. **Offline fallback:** simulate an unreachable Authority by swapping `HttpMessageHandler` to throw `HttpRequestException` after priming the discovery/JWKS caches. Verify that tokens are still issued until the tolerance expires.
|
||||||
|
3. **Observability:** watch for `StellaOps.Auth.Client.HttpRetry` warnings in your logs. Excessive retries mean the upstream Authority cluster needs attention.
|
||||||
|
4. **Determinism:** keep retry delays deterministic. Avoid random jitter—operators can introduce jitter at the infrastructure layer if desired.
|
||||||
|
|
||||||
|
## 5. Rollout checklist
|
||||||
|
|
||||||
|
- [ ] Update consuming service/CLI configuration schema to include the new settings.
|
||||||
|
- [ ] Document recommended defaults for offline (air-gapped) versus connected deployments.
|
||||||
|
- [ ] Extend smoke tests to cover Authority outage scenarios.
|
||||||
|
- [ ] Coordinate with Docs Guild so user-facing quickstarts reference the new knobs.
|
||||||
|
|
||||||
|
Once Feedser and CLI integrate these changes, we can mark LIB5 **DONE**; further packaging work is deferred until the backlog reintroduces it.
|
||||||
33
docs/dev/authority-rate-limit-tuning-outline.md
Normal file
33
docs/dev/authority-rate-limit-tuning-outline.md
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
# Authority Rate Limit Tuning Outline (2025-10-11)
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
- Drive the remaining work on SEC3.B (Security Guild) and PLG6.DOC (Docs Guild) by capturing the agreed baseline for Authority rate limits and related documentation deliverables.
|
||||||
|
- Provide a single reference for lockout + rate limit interplay so Docs can lift accurate copy into `docs/security/rate-limits.md` and `docs/dev/31_AUTHORITY_PLUGIN_DEVELOPER_GUIDE.md`.
|
||||||
|
|
||||||
|
## Baseline Configuration
|
||||||
|
- `/token`: fixed window, permitLimit 30, window 60s, queueLimit 0. Reduce to 10/60s for untrusted IP ranges; raise to 60/60s only with compensating controls (WAF + active monitoring).
|
||||||
|
- `/authorize`: permitLimit 60, window 60s, queueLimit 10. Intended for interactive browser flows; lowering below 30 requires UX review.
|
||||||
|
- `/internal/*`: disabled by default; recommended 5/60s with queueLimit 0 when bootstrap API exposed.
|
||||||
|
- Configuration path: `authority.security.rateLimiting.<endpoint>` (e.g., `token.permitLimit`). YAML/ENV bindings follow the standard options hierarchy.
|
||||||
|
- Retry metadata: middleware stamps `Retry-After` along with tags `authority.client_id`, `authority.remote_ip`, `authority.endpoint`. Docs should highlight these for operator dashboards.
|
||||||
|
|
||||||
|
## Parameter Matrix
|
||||||
|
| Scenario | permitLimit | window | queueLimit | Notes |
|
||||||
|
|----------|-------------|--------|------------|-------|
|
||||||
|
| Default production | 30 | 60s | 0 | Works with anonymous quota (33 scans/day). |
|
||||||
|
| High-trust clustered IPs | 60 | 60s | 5 | Requires `authorize_rate_limit_hits` alert ≤ 1% sustained. |
|
||||||
|
| Air-gapped lab | 10 | 120s | 0 | Emphasise reduced concurrency + manual queue draining. |
|
||||||
|
| Incident lockdown | 5 | 300s | 0 | Pair with lockout lowering to 3 attempts. |
|
||||||
|
|
||||||
|
## Lockout Interplay
|
||||||
|
- Ensure Docs explain difference between rate limit (per IP/client) vs lockout (per subject). Provide table mapping retry-after headers to recommended support scripts.
|
||||||
|
- Security Guild to define alert thresholds: trigger SOC ticket when 429 rate > 25% for 5 minutes or when limiter emits >100 events/hour per client.
|
||||||
|
|
||||||
|
## Observability
|
||||||
|
- Surface metrics: `aspnetcore_rate_limiting_rejections_total{limiter="authority-token"}` and custom log tags from `AuthorityRateLimiterMetadataMiddleware`.
|
||||||
|
- Recommend dashboard sections: request volume vs. rejections, top offending clientIds, per-endpoint heatmap.
|
||||||
|
|
||||||
|
## Action Items
|
||||||
|
1. Security Guild (SEC3.B): incorporate matrix + alert rules into `docs/security/rate-limits.md`, add YAML examples for override blocks, and cross-link lockout policy doc.
|
||||||
|
2. Docs Guild (PLG6.DOC): update developer guide section 9 with the middleware sequence and reference this outline for retry metadata + tuning guidance.
|
||||||
|
3. Authority Core: validate appsettings sample includes the `security.rateLimiting` block with comments and link back to published doc once ready.
|
||||||
99
docs/dev/merge_semver_playbook.md
Normal file
99
docs/dev/merge_semver_playbook.md
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
# Feedser SemVer Merge Playbook (Sprint 1–2)
|
||||||
|
|
||||||
|
This playbook describes how the merge layer and connector teams should emit the new SemVer primitives introduced in Sprint 1–2, how those primitives become normalized version rules, and how downstream jobs query them deterministically.
|
||||||
|
|
||||||
|
## 1. What landed in Sprint 1–2
|
||||||
|
|
||||||
|
- `RangePrimitives.SemVer` now infers a canonical `style` (`range`, `exact`, `lt`, `lte`, `gt`, `gte`) and captures `exactValue` when the constraint is a single version.
|
||||||
|
- `NormalizedVersionRule` documents the analytics-friendly projection of each `AffectedPackage` coverage entry and is persisted alongside legacy `versionRanges`.
|
||||||
|
- `AdvisoryProvenance.decisionReason` records whether merge resolution favored precedence, freshness, or a tie-breaker comparison.
|
||||||
|
|
||||||
|
See `src/StellaOps.Feedser.Models/CANONICAL_RECORDS.md` for the full schema and field descriptions.
|
||||||
|
|
||||||
|
## 2. Mapper pattern
|
||||||
|
|
||||||
|
Connectors should emit SemVer primitives as soon as they can normalize a vendor constraint. The helper `SemVerPrimitiveExtensions.ToNormalizedVersionRule` turns those primitives into the persisted rules:
|
||||||
|
|
||||||
|
```csharp
|
||||||
|
var primitive = new SemVerPrimitive(
|
||||||
|
introduced: "1.2.3",
|
||||||
|
introducedInclusive: true,
|
||||||
|
fixed: "2.0.0",
|
||||||
|
fixedInclusive: false,
|
||||||
|
lastAffected: null,
|
||||||
|
lastAffectedInclusive: false,
|
||||||
|
constraintExpression: ">=1.2.3 <2.0.0",
|
||||||
|
exactValue: null);
|
||||||
|
|
||||||
|
var rule = primitive.ToNormalizedVersionRule(notes: "nvd:CVE-2025-1234");
|
||||||
|
// rule => scheme=semver, type=range, min=1.2.3, minInclusive=true, max=2.0.0, maxInclusive=false
|
||||||
|
```
|
||||||
|
|
||||||
|
Emit the resulting rule inside `AffectedPackage.NormalizedVersions` while continuing to populate `AffectedVersionRange.RangeExpression` for backward compatibility.
|
||||||
|
|
||||||
|
## 3. Merge dedupe flow
|
||||||
|
|
||||||
|
During merge, feed all package candidates through `NormalizedVersionRuleComparer.Instance` prior to persistence. The comparer orders by scheme → type → min → minInclusive → max → maxInclusive → value → notes, guaranteeing consistent document layout and making `$unwind` pipelines deterministic.
|
||||||
|
|
||||||
|
If multiple connectors emit identical constraints, the merge layer should:
|
||||||
|
|
||||||
|
1. Combine provenance entries (preserving one per source).
|
||||||
|
2. Preserve a single normalized rule instance (thanks to `NormalizedVersionRuleEqualityComparer.Instance`).
|
||||||
|
3. Attach `decisionReason="precedence"` if one source overrides another.
|
||||||
|
|
||||||
|
## 4. Example Mongo pipeline
|
||||||
|
|
||||||
|
Use the following aggregation to locate advisories that affect a specific SemVer:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
db.advisories.aggregate([
|
||||||
|
{ $match: { "affectedPackages.type": "semver", "affectedPackages.identifier": "pkg:npm/lodash" } },
|
||||||
|
{ $unwind: "$affectedPackages" },
|
||||||
|
{ $unwind: "$affectedPackages.normalizedVersions" },
|
||||||
|
{ $match: {
|
||||||
|
$or: [
|
||||||
|
{ "affectedPackages.normalizedVersions.type": "exact",
|
||||||
|
"affectedPackages.normalizedVersions.value": "4.17.21" },
|
||||||
|
{ "affectedPackages.normalizedVersions.type": "range",
|
||||||
|
"affectedPackages.normalizedVersions.min": { $lte: "4.17.21" },
|
||||||
|
"affectedPackages.normalizedVersions.max": { $gt: "4.17.21" } },
|
||||||
|
{ "affectedPackages.normalizedVersions.type": "gte",
|
||||||
|
"affectedPackages.normalizedVersions.min": { $lte: "4.17.21" } },
|
||||||
|
{ "affectedPackages.normalizedVersions.type": "lte",
|
||||||
|
"affectedPackages.normalizedVersions.max": { $gte: "4.17.21" } }
|
||||||
|
]
|
||||||
|
}},
|
||||||
|
{ $project: { advisoryKey: 1, title: 1, "affectedPackages.identifier": 1 } }
|
||||||
|
]);
|
||||||
|
```
|
||||||
|
|
||||||
|
Pair this query with the indexes listed in [Normalized Versions Query Guide](mongo_indices.md).
|
||||||
|
|
||||||
|
## 5. Recommended indexes
|
||||||
|
|
||||||
|
| Collection | Index | Purpose |
|
||||||
|
|------------|-------|---------|
|
||||||
|
| `advisory` | `{ "affectedPackages.identifier": 1, "affectedPackages.normalizedVersions.scheme": 1, "affectedPackages.normalizedVersions.type": 1 }` (compound, multikey) | Speeds up `$match` on identifier + rule style. |
|
||||||
|
| `advisory` | `{ "affectedPackages.normalizedVersions.value": 1 }` (sparse) | Optimizes lookups for exact version hits. |
|
||||||
|
|
||||||
|
Coordinate with the Storage team when enabling these indexes so deployment windows account for collection size.
|
||||||
|
|
||||||
|
## 6. Dual-write rollout
|
||||||
|
|
||||||
|
Follow the operational checklist in `docs/ops/migrations/SEMVER_STYLE.md`. The summary:
|
||||||
|
|
||||||
|
1. **Dual write (now)** – emit both legacy `versionRanges` and the new `normalizedVersions`.
|
||||||
|
2. **Backfill** – follow the storage migration in `docs/ops/migrations/SEMVER_STYLE.md` to rewrite historical advisories before switching consumers.
|
||||||
|
3. **Verify** – run the aggregation above (with `explain("executionStats")`) to ensure the new indexes are used.
|
||||||
|
4. **Cutover** – after consumers switch to normalized rules, mark the old `rangeExpression` as deprecated.
|
||||||
|
|
||||||
|
## 7. Checklist for connectors & merge
|
||||||
|
|
||||||
|
- [ ] Populate `SemVerPrimitive` for every SemVer-friendly constraint.
|
||||||
|
- [ ] Call `ToNormalizedVersionRule` and store the result.
|
||||||
|
- [ ] Emit provenance masks covering both `versionRanges[].primitives.semver` and `normalizedVersions[]`.
|
||||||
|
- [ ] Ensure merge deduping relies on the canonical comparer.
|
||||||
|
- [ ] Capture merge decisions via `decisionReason`.
|
||||||
|
- [ ] Confirm integration tests include fixtures with normalized rules and SemVer styles.
|
||||||
|
|
||||||
|
For deeper query examples and maintenance tasks, continue with [Normalized Versions Query Guide](mongo_indices.md).
|
||||||
106
docs/dev/mongo_indices.md
Normal file
106
docs/dev/mongo_indices.md
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
# Normalized Versions Query Guide
|
||||||
|
|
||||||
|
This guide complements the Sprint 1–2 normalized versions rollout. It documents recommended indexes and aggregation patterns for querying `AffectedPackage.normalizedVersions`.
|
||||||
|
|
||||||
|
## 1. Recommended indexes
|
||||||
|
|
||||||
|
When `feedser.storage.enableSemVerStyle` is enabled, advisories expose a flattened
|
||||||
|
`normalizedVersions` array at the document root. Create these indexes in `mongosh`
|
||||||
|
after the migration completes (adjust collection name if you use a prefix):
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
db.advisories.createIndex(
|
||||||
|
{
|
||||||
|
"normalizedVersions.packageId": 1,
|
||||||
|
"normalizedVersions.scheme": 1,
|
||||||
|
"normalizedVersions.type": 1
|
||||||
|
},
|
||||||
|
{ name: "advisory_normalizedVersions_pkg_scheme_type" }
|
||||||
|
);
|
||||||
|
|
||||||
|
db.advisories.createIndex(
|
||||||
|
{ "normalizedVersions.value": 1 },
|
||||||
|
{ name: "advisory_normalizedVersions_value", sparse: true }
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
- The compound index accelerates `$match` stages that filter by package identifier and rule style without unwinding `affectedPackages`.
|
||||||
|
- The sparse index keeps storage costs low while supporting pure exact-version lookups (type `exact`).
|
||||||
|
|
||||||
|
The storage bootstrapper creates the same indexes automatically when the feature flag is enabled.
|
||||||
|
|
||||||
|
## 2. Query patterns
|
||||||
|
|
||||||
|
### 2.1 Determine if a specific version is affected
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
db.advisories.aggregate([
|
||||||
|
{ $match: { "normalizedVersions.packageId": "pkg:npm/lodash" } },
|
||||||
|
{ $unwind: "$normalizedVersions" },
|
||||||
|
{ $match: {
|
||||||
|
$or: [
|
||||||
|
{ "normalizedVersions.type": "exact",
|
||||||
|
"normalizedVersions.value": "4.17.21" },
|
||||||
|
{ "normalizedVersions.type": "range",
|
||||||
|
"normalizedVersions.min": { $lte: "4.17.21" },
|
||||||
|
"normalizedVersions.max": { $gt: "4.17.21" } },
|
||||||
|
{ "normalizedVersions.type": "gte",
|
||||||
|
"normalizedVersions.min": { $lte: "4.17.21" } },
|
||||||
|
{ "normalizedVersions.type": "lte",
|
||||||
|
"normalizedVersions.max": { $gte: "4.17.21" } }
|
||||||
|
]
|
||||||
|
}},
|
||||||
|
{ $project: { advisoryKey: 1, title: 1, "normalizedVersions.packageId": 1 } }
|
||||||
|
]);
|
||||||
|
```
|
||||||
|
|
||||||
|
Use this pipeline during Sprint 2 staging validation runs. Invoke `explain("executionStats")` to confirm the compound index is selected.
|
||||||
|
|
||||||
|
### 2.2 Locate advisories missing normalized rules
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
db.advisories.aggregate([
|
||||||
|
{ $match: { $or: [
|
||||||
|
{ "normalizedVersions": { $exists: false } },
|
||||||
|
{ "normalizedVersions": { $size: 0 } }
|
||||||
|
] } },
|
||||||
|
{ $project: { advisoryKey: 1, affectedPackages: 1 } }
|
||||||
|
]);
|
||||||
|
```
|
||||||
|
|
||||||
|
Run this query after backfill jobs to identify gaps that still rely solely on `rangeExpression`.
|
||||||
|
|
||||||
|
### 2.3 Deduplicate overlapping rules
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
db.advisories.aggregate([
|
||||||
|
{ $unwind: "$normalizedVersions" },
|
||||||
|
{ $group: {
|
||||||
|
_id: {
|
||||||
|
identifier: "$normalizedVersions.packageId",
|
||||||
|
scheme: "$normalizedVersions.scheme",
|
||||||
|
type: "$normalizedVersions.type",
|
||||||
|
min: "$normalizedVersions.min",
|
||||||
|
minInclusive: "$normalizedVersions.minInclusive",
|
||||||
|
max: "$normalizedVersions.max",
|
||||||
|
maxInclusive: "$normalizedVersions.maxInclusive",
|
||||||
|
value: "$normalizedVersions.value"
|
||||||
|
},
|
||||||
|
advisories: { $addToSet: "$advisoryKey" },
|
||||||
|
notes: { $addToSet: "$normalizedVersions.notes" }
|
||||||
|
}},
|
||||||
|
{ $match: { "advisories.1": { $exists: true } } },
|
||||||
|
{ $sort: { "_id.identifier": 1, "_id.type": 1 } }
|
||||||
|
]);
|
||||||
|
```
|
||||||
|
|
||||||
|
Use this to confirm the merge dedupe logic keeps only one normalized rule per unique constraint.
|
||||||
|
|
||||||
|
## 3. Operational checklist
|
||||||
|
|
||||||
|
- [ ] Create the indexes in staging before toggling dual-write in production.
|
||||||
|
- [ ] Capture explain plans and attach them to the release notes.
|
||||||
|
- [ ] Notify downstream services that consume advisory snapshots about the new `normalizedVersions` array.
|
||||||
|
- [ ] Update export fixtures once dedupe verification passes.
|
||||||
|
|
||||||
|
Additional background and mapper examples live in [Feedser SemVer Merge Playbook](merge_semver_playbook.md).
|
||||||
123
docs/license-jwt-quota.md
Executable file
123
docs/license-jwt-quota.md
Executable file
@@ -0,0 +1,123 @@
|
|||||||
|
---
|
||||||
|
title: Offline JWT licence & daily‑run quota
|
||||||
|
description: How Stella‑Ops enforces a **runs‑per‑day** limit in fully air‑gapped deployments.
|
||||||
|
nav:
|
||||||
|
order: 36
|
||||||
|
---
|
||||||
|
|
||||||
|
# JWT‑based daily‑run licence (offline‑capable)
|
||||||
|
|
||||||
|
When *Stella‑Ops* scanners operate entirely **offline**, they cannot phone home
|
||||||
|
for metering.
|
||||||
|
Instead, the backend accepts a **signed JSON Web Token (JWT)** that states the
|
||||||
|
**maximum number of scans per UTC day**.
|
||||||
|
If no token is supplied, a _grace quota_ of **33 runs/24 h** applies.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1 Token contents
|
||||||
|
|
||||||
|
| Claim | Purpose | Example |
|
||||||
|
|-------|---------|---------|
|
||||||
|
| `sub` | Customer / licensee identifier | `"f47ac10b…"` |
|
||||||
|
| `iat` | Issued‑at timestamp | `1722566400` |
|
||||||
|
| `exp` | Absolute licence expiry | `2025‑12‑31T23:59:59Z` |
|
||||||
|
| `tier` | **Max scans per UTC day** | `{{ quota_token }}` |
|
||||||
|
| `tid` | Token identifier (32‑byte) | `"7d2285..."` |
|
||||||
|
| `pkg` | Product SKU / edition | `"stella‑core"` |
|
||||||
|
|
||||||
|
Tokens are signed with **RS256** and verified locally using the bundled public key.
|
||||||
|
Only the public key ships inside the container; the private key never leaves
|
||||||
|
the build pipeline.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2 Obtaining a token
|
||||||
|
|
||||||
|
1. **Request** → `POST /register { email:"alice@example.org" }`
|
||||||
|
2. Service hashes the e‑mail (SHA‑256), stores it, and issues a JWT (60 days by default).
|
||||||
|
3. Token is e‑mailed to you.
|
||||||
|
|
||||||
|
A new request for the same e‑mail returns the **same** token until it nears
|
||||||
|
expiry, avoiding quota “top‑ups” by re‑registration.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3 Supplying the token to an air‑gapped stack
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# recommended
|
||||||
|
docker run \
|
||||||
|
-v /opt/stella/license/alice.jwt:/run/secrets/stella_license.jwt:ro \
|
||||||
|
stella‑ops
|
||||||
|
````
|
||||||
|
|
||||||
|
Other supported paths:
|
||||||
|
|
||||||
|
| Method | Mount point | Hot‑reload |
|
||||||
|
| ------------- | ------------------------ | ----------- |
|
||||||
|
| Docker secret | `/run/secrets/…` | ✓ (inotify) |
|
||||||
|
| Bind‑mounted | user‑chosen path (above) | ✓ |
|
||||||
|
| Env variable | `STELLA_LICENSE_JWT` | ✗ restart |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4 Quota‑enforcement algorithm
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
Start --> Verify[Verify JWT signature]
|
||||||
|
Verify -->|Invalid| Deny1[Run in non licensed mode]
|
||||||
|
Verify --> Load[load today's counter UTC]
|
||||||
|
Load -->|SUM of last 24h scans < daily_quota| Permit[allow scan, add scan]
|
||||||
|
Permit --> End
|
||||||
|
Load -->|SUM of last 24h scans ≥ daily_quota| Deny1
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## 5 Renewal procedure
|
||||||
|
|
||||||
|
| Scenario | Action |
|
||||||
|
| -------------- | --------------------------------------------------------------------------------- |
|
||||||
|
| More capacity | Request new token with higher `daily_quota`; replace file – **no restart needed** |
|
||||||
|
| Licence expiry | Same as above; new `exp` date |
|
||||||
|
| Key rotation | Container image ships new public key(s); older tokens still verify |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6 Fallback limits
|
||||||
|
|
||||||
|
| Situation | Daily quota |
|
||||||
|
| ----------------------- | ----------------------------------- |
|
||||||
|
| Valid JWT present | value of `daily_quota` claim ({{ quota_token }}) |
|
||||||
|
| No JWT | **33** |
|
||||||
|
| JWT expired (if used) | treated as **anonymous** unless policy enforces hard‑fail |
|
||||||
|
| Token signature invalid | **0** (reject) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7 Threat‑model highlights (future work / optional hardening)
|
||||||
|
|
||||||
|
| Threat | Mitigation |
|
||||||
|
| --------------------------- | ---------------------------------------------------------------------- |
|
||||||
|
| Copy token & DB to 2nd node | Bind `sub`/`tid` to host fingerprint (TPM EK) – optional enterprise control |
|
||||||
|
| Counter DB rollback | Hash‑chain + monotonic clock – optional enterprise control |
|
||||||
|
| Flooding single node | Redis‑backed cluster rate‑limit (30 hits / 60 s) + edge Nginx (20 r/s) |
|
||||||
|
| Key compromise | Rotate RS256 key‑pair, ship new pubkey, re‑sign tokens |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8 Anonymous (33 runs) mode
|
||||||
|
|
||||||
|
Offline PoCs without registration still work:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose exec stella-ops stella-jwt reload # reloads, discovers no token
|
||||||
|
```
|
||||||
|
|
||||||
|
…but **production deployments *must* register** to unlock real‑world quotas and
|
||||||
|
receive security advisories via e‑mail.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Last updated: 2025‑08‑02*
|
||||||
97
docs/ops/authority-backup-restore.md
Normal file
97
docs/ops/authority-backup-restore.md
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
# Authority Backup & Restore Runbook
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
- **Applies to:** StellaOps Authority deployments running the official `ops/authority/docker-compose.authority.yaml` stack or equivalent Kubernetes packaging.
|
||||||
|
- **Artifacts covered:** MongoDB (`stellaops-authority` database), Authority configuration (`etc/authority.yaml`), plugin manifests under `etc/authority.plugins/`, and signing key material stored in the `authority-keys` volume (defaults to `/app/keys` inside the container).
|
||||||
|
- **Frequency:** Run the full procedure prior to upgrades, before rotating keys, and at least once per 24 h in production. Store snapshots in an encrypted, access-controlled vault.
|
||||||
|
|
||||||
|
## Inventory Checklist
|
||||||
|
| Component | Location (compose default) | Notes |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| Mongo data | `mongo-data` volume (`/var/lib/docker/volumes/.../mongo-data`) | Contains all Authority collections (`AuthorityUser`, `AuthorityClient`, `AuthorityToken`, etc.). |
|
||||||
|
| Configuration | `etc/authority.yaml` | Mounted read-only into the container at `/etc/authority.yaml`. |
|
||||||
|
| Plugin manifests | `etc/authority.plugins/*.yaml` | Includes `standard.yaml` with `tokenSigning.keyDirectory`. |
|
||||||
|
| Signing keys | `authority-keys` volume -> `/app/keys` | Path is derived from `tokenSigning.keyDirectory` (defaults to `../keys` relative to the manifest). |
|
||||||
|
|
||||||
|
> **TIP:** Confirm the deployed key directory via `tokenSigning.keyDirectory` in `etc/authority.plugins/standard.yaml`; some installations relocate keys to `/var/lib/stellaops/authority/keys`.
|
||||||
|
|
||||||
|
## Hot Backup (no downtime)
|
||||||
|
1. **Create output directory:** `mkdir -p backup/$(date +%Y-%m-%d)` on the host.
|
||||||
|
2. **Dump Mongo:**
|
||||||
|
```bash
|
||||||
|
docker compose -f ops/authority/docker-compose.authority.yaml exec mongo \
|
||||||
|
mongodump --archive=/dump/authority-$(date +%Y%m%dT%H%M%SZ).gz \
|
||||||
|
--gzip --db stellaops-authority
|
||||||
|
docker compose -f ops/authority/docker-compose.authority.yaml cp \
|
||||||
|
mongo:/dump/authority-$(date +%Y%m%dT%H%M%SZ).gz backup/
|
||||||
|
```
|
||||||
|
The `mongodump` archive preserves indexes and can be restored with `mongorestore --archive --gzip`.
|
||||||
|
3. **Capture configuration + manifests:**
|
||||||
|
```bash
|
||||||
|
cp etc/authority.yaml backup/
|
||||||
|
rsync -a etc/authority.plugins/ backup/authority.plugins/
|
||||||
|
```
|
||||||
|
4. **Export signing keys:** the compose file maps `authority-keys` to a local Docker volume. Snapshot it without stopping the service:
|
||||||
|
```bash
|
||||||
|
docker run --rm \
|
||||||
|
-v authority-keys:/keys \
|
||||||
|
-v "$(pwd)/backup:/backup" \
|
||||||
|
busybox tar czf /backup/authority-keys-$(date +%Y%m%dT%H%M%SZ).tar.gz -C /keys .
|
||||||
|
```
|
||||||
|
5. **Checksum:** generate SHA-256 digests for every file and store them alongside the artefacts.
|
||||||
|
6. **Encrypt & upload:** wrap the backup folder using your secrets management standard (e.g., age, GPG) and upload to the designated offline vault.
|
||||||
|
|
||||||
|
## Cold Backup (planned downtime)
|
||||||
|
1. Notify stakeholders and drain traffic (CLI clients should refresh tokens afterwards).
|
||||||
|
2. Stop services:
|
||||||
|
```bash
|
||||||
|
docker compose -f ops/authority/docker-compose.authority.yaml down
|
||||||
|
```
|
||||||
|
3. Back up volumes directly using `tar`:
|
||||||
|
```bash
|
||||||
|
docker run --rm -v mongo-data:/data -v "$(pwd)/backup:/backup" \
|
||||||
|
busybox tar czf /backup/mongo-data-$(date +%Y%m%d).tar.gz -C /data .
|
||||||
|
docker run --rm -v authority-keys:/keys -v "$(pwd)/backup:/backup" \
|
||||||
|
busybox tar czf /backup/authority-keys-$(date +%Y%m%d).tar.gz -C /keys .
|
||||||
|
```
|
||||||
|
4. Copy configuration + manifests as in the hot backup (steps 3–6).
|
||||||
|
5. Restart services and verify health:
|
||||||
|
```bash
|
||||||
|
docker compose -f ops/authority/docker-compose.authority.yaml up -d
|
||||||
|
curl -fsS http://localhost:8080/ready
|
||||||
|
```
|
||||||
|
|
||||||
|
## Restore Procedure
|
||||||
|
1. **Provision clean volumes:** remove existing volumes if you’re rebuilding a node (`docker volume rm mongo-data authority-keys`), then recreate the compose stack so empty volumes exist.
|
||||||
|
2. **Restore Mongo:**
|
||||||
|
```bash
|
||||||
|
docker compose exec -T mongo mongorestore --archive --gzip --drop < backup/authority-YYYYMMDDTHHMMSSZ.gz
|
||||||
|
```
|
||||||
|
Use `--drop` to replace collections; omit if doing a partial restore.
|
||||||
|
3. **Restore configuration/manifests:** copy `authority.yaml` and `authority.plugins/*` into place before starting the Authority container.
|
||||||
|
4. **Restore signing keys:** untar into the mounted volume:
|
||||||
|
```bash
|
||||||
|
docker run --rm -v authority-keys:/keys -v "$(pwd)/backup:/backup" \
|
||||||
|
busybox tar xzf /backup/authority-keys-YYYYMMDD.tar.gz -C /keys
|
||||||
|
```
|
||||||
|
Ensure file permissions remain `600` for private keys (`chmod -R 600`).
|
||||||
|
5. **Start services & validate:**
|
||||||
|
```bash
|
||||||
|
docker compose up -d
|
||||||
|
curl -fsS http://localhost:8080/health
|
||||||
|
```
|
||||||
|
6. **Validate JWKS and tokens:** call `/jwks` and issue a short-lived token via the CLI to confirm key material matches expectations.
|
||||||
|
|
||||||
|
## Disaster Recovery Notes
|
||||||
|
- **Air-gapped replication:** replicate archives via the Offline Update Kit transport channels; never attach USB devices without scanning.
|
||||||
|
- **Retention:** maintain 30 daily snapshots + 12 monthly archival copies. Rotate encryption keys annually.
|
||||||
|
- **Key compromise:** if signing keys are suspected compromised, restore from the latest clean backup, rotate via OPS3 (key rotation tooling), and publish a revocation notice.
|
||||||
|
- **Mongo version:** keep dump/restore images pinned to the deployment version (compose uses `mongo:7`). Restoring across major versions requires a compatibility review.
|
||||||
|
|
||||||
|
## Verification Checklist
|
||||||
|
- [ ] `/ready` reports all identity providers ready.
|
||||||
|
- [ ] OAuth flows issue tokens signed by the restored keys.
|
||||||
|
- [ ] `PluginRegistrationSummary` logs expected providers on startup.
|
||||||
|
- [ ] Revocation manifest export (`dotnet run --project src/StellaOps.Authority`) succeeds.
|
||||||
|
- [ ] Monitoring dashboards show metrics resuming (see OPS5 deliverables).
|
||||||
|
|
||||||
174
docs/ops/authority-grafana-dashboard.json
Normal file
174
docs/ops/authority-grafana-dashboard.json
Normal file
@@ -0,0 +1,174 @@
|
|||||||
|
{
|
||||||
|
"title": "StellaOps Authority - Token & Access Monitoring",
|
||||||
|
"uid": "authority-token-monitoring",
|
||||||
|
"schemaVersion": 38,
|
||||||
|
"version": 1,
|
||||||
|
"editable": true,
|
||||||
|
"timezone": "",
|
||||||
|
"graphTooltip": 0,
|
||||||
|
"time": {
|
||||||
|
"from": "now-6h",
|
||||||
|
"to": "now"
|
||||||
|
},
|
||||||
|
"templating": {
|
||||||
|
"list": [
|
||||||
|
{
|
||||||
|
"name": "datasource",
|
||||||
|
"type": "datasource",
|
||||||
|
"query": "prometheus",
|
||||||
|
"refresh": 1,
|
||||||
|
"hide": 0,
|
||||||
|
"current": {}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"panels": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"title": "Token Requests – Success vs Failure",
|
||||||
|
"type": "timeseries",
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"unit": "req/s",
|
||||||
|
"displayName": "{{grant_type}} ({{status}})"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"refId": "A",
|
||||||
|
"expr": "sum by (grant_type, status) (rate(http_server_duration_seconds_count{service_name=\"stellaops-authority\", http_route=\"/token\"}[5m]))",
|
||||||
|
"legendFormat": "{{grant_type}} {{status}}"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"displayMode": "table",
|
||||||
|
"placement": "bottom"
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "multi"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"title": "Rate Limiter Rejections",
|
||||||
|
"type": "timeseries",
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"unit": "req/s",
|
||||||
|
"displayName": "{{limiter}}"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"refId": "A",
|
||||||
|
"expr": "sum by (limiter) (rate(aspnetcore_rate_limiting_rejections_total{service_name=\"stellaops-authority\"}[5m]))",
|
||||||
|
"legendFormat": "{{limiter}}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"title": "Bypass Events (5m)",
|
||||||
|
"type": "stat",
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"unit": "short",
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{ "color": "green", "value": null },
|
||||||
|
{ "color": "orange", "value": 1 },
|
||||||
|
{ "color": "red", "value": 5 }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"refId": "A",
|
||||||
|
"expr": "sum(rate(log_messages_total{message_template=\"Granting StellaOps bypass for remote {RemoteIp}; required scopes {RequiredScopes}.\"}[5m]))"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"options": {
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": ["last"],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"orientation": "horizontal",
|
||||||
|
"textMode": "auto"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 4,
|
||||||
|
"title": "Lockout Events (15m)",
|
||||||
|
"type": "stat",
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"unit": "short",
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{ "color": "green", "value": null },
|
||||||
|
{ "color": "orange", "value": 5 },
|
||||||
|
{ "color": "red", "value": 10 }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"refId": "A",
|
||||||
|
"expr": "sum(rate(log_messages_total{message_template=\"Plugin {PluginName} denied access for {Username} due to lockout (retry after {RetryAfter}).\"}[15m]))"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"options": {
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": ["last"],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"orientation": "horizontal",
|
||||||
|
"textMode": "auto"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 5,
|
||||||
|
"title": "Trace Explorer Shortcut",
|
||||||
|
"type": "text",
|
||||||
|
"options": {
|
||||||
|
"mode": "markdown",
|
||||||
|
"content": "[Open Trace Explorer](#/explore?left={\"datasource\":\"tempo\",\"queries\":[{\"query\":\"{service.name=\\\"stellaops-authority\\\", span_name=~\\\"authority.token.*\\\"}\",\"refId\":\"A\"}]})"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"links": []
|
||||||
|
}
|
||||||
81
docs/ops/authority-monitoring.md
Normal file
81
docs/ops/authority-monitoring.md
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
# Authority Monitoring & Alerting Playbook
|
||||||
|
|
||||||
|
## Telemetry Sources
|
||||||
|
- **Traces:** Activity source `StellaOps.Authority` emits spans for every token flow (`authority.token.validate_*`, `authority.token.handle_*`, `authority.token.validate_access`). Key tags include `authority.endpoint`, `authority.grant_type`, `authority.username`, `authority.client_id`, and `authority.identity_provider`.
|
||||||
|
- **Metrics:** OpenTelemetry instrumentation (`AddAspNetCoreInstrumentation`, `AddHttpClientInstrumentation`, custom meter `StellaOps.Authority`) exports:
|
||||||
|
- `http.server.request.duration` histogram (`http_route`, `http_status_code`, `authority.endpoint` tag via `aspnetcore` enrichment).
|
||||||
|
- `process.runtime.gc.*`, `process.runtime.dotnet.*` (from `AddRuntimeInstrumentation`).
|
||||||
|
- **Logs:** Serilog writes structured events to stdout. Notable templates:
|
||||||
|
- `"Password grant verification failed ..."` and `"Plugin {PluginName} denied access ... due to lockout"` (lockout spike detector).
|
||||||
|
- `"Granting StellaOps bypass for remote {RemoteIp}"` (bypass usage).
|
||||||
|
- `"Rate limit exceeded for path {Path} from {RemoteIp}"` (limiter alerts).
|
||||||
|
|
||||||
|
## Prometheus Metrics to Collect
|
||||||
|
| Metric | Query | Purpose |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| `token_requests_total` | `sum by (grant_type, status) (rate(http_server_duration_seconds_count{service_name="stellaops-authority", http_route="/token"}[5m]))` | Token issuance volume per grant type (`grant_type` comes via `authority.grant_type` span attribute → Exemplars in Grafana). |
|
||||||
|
| `token_failure_ratio` | `sum(rate(http_server_duration_seconds_count{service_name="stellaops-authority", http_route="/token", http_status_code=~"4..|5.."}[5m])) / sum(rate(http_server_duration_seconds_count{service_name="stellaops-authority", http_route="/token"}[5m]))` | Alert when > 5 % for 10 min. |
|
||||||
|
| `authorize_rate_limit_hits` | `sum(rate(aspnetcore_rate_limiting_rejections_total{service_name="stellaops-authority", limiter="authority-token"}[5m]))` | Detect rate limiting saturations (requires OTEL ASP.NET rate limiter exporter). |
|
||||||
|
| `lockout_events` | `sum by (plugin) (rate(log_messages_total{app="stellaops-authority", level="Warning", message_template="Plugin {PluginName} denied access for {Username} due to lockout (retry after {RetryAfter})."}[5m]))` | Derived from Loki/Promtail log counter. |
|
||||||
|
| `bypass_usage_total` | `sum(rate(log_messages_total{app="stellaops-authority", level="Information", message_template="Granting StellaOps bypass for remote {RemoteIp}; required scopes {RequiredScopes}."}[5m]))` | Track trusted bypass invocations. |
|
||||||
|
|
||||||
|
> **Exporter note:** Enable `aspnetcore` meters (`dotnet-counters` name `Microsoft.AspNetCore.Hosting`), or configure the OpenTelemetry Collector `metrics` pipeline with `metric_statements` to remap histogram counts into the shown series.
|
||||||
|
|
||||||
|
## Alert Rules
|
||||||
|
1. **Token Failure Surge**
|
||||||
|
- _Expression_: `token_failure_ratio > 0.05`
|
||||||
|
- _For_: `10m`
|
||||||
|
- _Labels_: `severity="critical"`
|
||||||
|
- _Annotations_: Include `topk(5, sum by (authority_identity_provider) (increase(authority_token_rejections_total[10m])))` as diagnostic hint (requires span → metric transformation).
|
||||||
|
2. **Lockout Spike**
|
||||||
|
- _Expression_: `sum(rate(log_messages_total{message_template="Plugin {PluginName} denied access for {Username} due to lockout (retry after {RetryAfter})."}[15m])) > 10`
|
||||||
|
- _For_: `15m`
|
||||||
|
- Investigate credential stuffing; consider temporarily tightening `RateLimiting.Token`.
|
||||||
|
3. **Bypass Threshold**
|
||||||
|
- _Expression_: `sum(rate(log_messages_total{message_template="Granting StellaOps bypass for remote {RemoteIp}; required scopes {RequiredScopes}."}[5m])) > 1`
|
||||||
|
- _For_: `5m`
|
||||||
|
- Alert severity `warning` — verify the calling host list.
|
||||||
|
4. **Rate Limiter Saturation**
|
||||||
|
- _Expression_: `sum(rate(aspnetcore_rate_limiting_rejections_total{service_name="stellaops-authority"}[5m])) > 0`
|
||||||
|
- Escalate if sustained for 5 min; confirm trusted clients aren’t misconfigured.
|
||||||
|
|
||||||
|
## Grafana Dashboard
|
||||||
|
- Import `docs/ops/authority-grafana-dashboard.json` to provision baseline panels:
|
||||||
|
- **Token Success vs Failure** – stacked rate visualization split by grant type.
|
||||||
|
- **Rate Limiter Hits** – bar chart showing `authority-token` and `authority-authorize`.
|
||||||
|
- **Bypass & Lockout Events** – dual-stat panel using Loki-derived counters.
|
||||||
|
- **Trace Explorer Link** – panel links to `StellaOps.Authority` span search pre-filtered by `authority.grant_type`.
|
||||||
|
|
||||||
|
## Collector Configuration Snippets
|
||||||
|
```yaml
|
||||||
|
receivers:
|
||||||
|
otlp:
|
||||||
|
protocols:
|
||||||
|
http:
|
||||||
|
exporters:
|
||||||
|
prometheus:
|
||||||
|
endpoint: "0.0.0.0:9464"
|
||||||
|
processors:
|
||||||
|
batch:
|
||||||
|
attributes/token_grant:
|
||||||
|
actions:
|
||||||
|
- key: grant_type
|
||||||
|
action: upsert
|
||||||
|
from_attribute: authority.grant_type
|
||||||
|
service:
|
||||||
|
pipelines:
|
||||||
|
metrics:
|
||||||
|
receivers: [otlp]
|
||||||
|
processors: [attributes/token_grant, batch]
|
||||||
|
exporters: [prometheus]
|
||||||
|
logs:
|
||||||
|
receivers: [otlp]
|
||||||
|
processors: [batch]
|
||||||
|
exporters: [loki]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Operational Checklist
|
||||||
|
- [ ] Confirm `STELLAOPS_AUTHORITY__OBSERVABILITY__EXPORTERS` enables OTLP in production builds.
|
||||||
|
- [ ] Ensure Promtail captures container stdout with Serilog structured formatting.
|
||||||
|
- [ ] Periodically validate alert noise by running load tests that trigger the rate limiter.
|
||||||
|
- [ ] Include dashboard JSON in Offline Kit for air-gapped clusters; update version header when metrics change.
|
||||||
130
docs/ops/feedser-conflict-resolution.md
Normal file
130
docs/ops/feedser-conflict-resolution.md
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
# Feedser Conflict Resolution Runbook (Sprint 3)
|
||||||
|
|
||||||
|
This runbook equips Feedser operators to detect, triage, and resolve advisory conflicts now that the Sprint 3 merge engine landed (`AdvisoryPrecedenceMerger`, merge-event hashing, and telemetry counters). It builds on the canonical rules defined in `src/DEDUP_CONFLICTS_RESOLUTION_ALGO.md` and the metrics/logging instrumentation delivered this sprint.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1. Precedence Model (recap)
|
||||||
|
|
||||||
|
- **Default ranking:** `GHSA -> NVD -> OSV`, with distro/vendor PSIRTs outranking ecosystem feeds (`AdvisoryPrecedenceDefaults`). Use `feedser:merge:precedence:ranks` to override per source when incident response requires it.
|
||||||
|
- **Freshness override:** if a lower-ranked source is >= 48 hours newer for a freshness-sensitive field (title, summary, affected ranges, references, credits), it wins. Every override stamps `provenance[].decisionReason = freshness`.
|
||||||
|
- **Tie-breakers:** when precedence and freshness tie, the engine falls back to (1) primary source order, (2) shortest normalized text, (3) lowest stable hash. Merge-generated provenance records set `decisionReason = tie-breaker`.
|
||||||
|
- **Audit trail:** each merged advisory receives a `merge` provenance entry listing the participating sources plus a `merge_event` record with canonical before/after SHA-256 hashes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2. Telemetry Shipped This Sprint
|
||||||
|
|
||||||
|
| Instrument | Type | Key Tags | Purpose |
|
||||||
|
|------------|------|----------|---------|
|
||||||
|
| `feedser.merge.operations` | Counter | `inputs` | Total precedence merges executed. |
|
||||||
|
| `feedser.merge.overrides` | Counter | `primary_source`, `suppressed_source`, `primary_rank`, `suppressed_rank` | Field-level overrides chosen by precedence. |
|
||||||
|
| `feedser.merge.range_overrides` | Counter | `advisory_key`, `package_type`, `primary_source`, `suppressed_source`, `primary_range_count`, `suppressed_range_count` | Package range overrides emitted by `AffectedPackagePrecedenceResolver`. |
|
||||||
|
| `feedser.merge.conflicts` | Counter | `type` (`severity`, `precedence_tie`), `reason` (`mismatch`, `primary_missing`, `equal_rank`) | Conflicts requiring operator review. |
|
||||||
|
| `feedser.merge.identity_conflicts` | Counter | `scheme`, `alias_value`, `advisory_count` | Alias collisions surfaced by the identity graph. |
|
||||||
|
|
||||||
|
### Structured logs
|
||||||
|
|
||||||
|
- `AdvisoryOverride` (EventId 1000) - logs merge suppressions with alias/provenance counts.
|
||||||
|
- `PackageRangeOverride` (EventId 1001) - logs package-level precedence decisions.
|
||||||
|
- `PrecedenceConflict` (EventId 1002) - logs mismatched severity or equal-rank scenarios.
|
||||||
|
- `Alias collision ...` (no EventId) - emitted when `feedser.merge.identity_conflicts` increments.
|
||||||
|
|
||||||
|
Expect all logs at `Information`. Ensure OTEL exporters include the scope `StellaOps.Feedser.Merge`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3. Detection & Alerting
|
||||||
|
|
||||||
|
1. **Dashboard panels**
|
||||||
|
- `feedser.merge.conflicts` - table grouped by `type/reason`. Alert when > 0 in a 15 minute window.
|
||||||
|
- `feedser.merge.range_overrides` - stacked bar by `package_type`. Spikes highlight vendor PSIRT overrides over registry data.
|
||||||
|
- `feedser.merge.overrides` with `primary_source|suppressed_source` - catches unexpected precedence flips (e.g., OSV overtaking GHSA).
|
||||||
|
- `feedser.merge.identity_conflicts` - single-stat; alert when alias collisions occur more than once per day.
|
||||||
|
2. **Log based alerts**
|
||||||
|
- `eventId=1002` with `reason="equal_rank"` - indicates precedence table gaps; page merge owners.
|
||||||
|
- `eventId=1002` with `reason="mismatch"` - severity disagreement; open connector bug if sustained.
|
||||||
|
3. **Job health**
|
||||||
|
- `stellaops-cli db merge` exit code `1` signifies unresolved conflicts. Pipe to automation that captures logs and notifies #feedser-ops.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4. Triage Workflow
|
||||||
|
|
||||||
|
1. **Confirm job context**
|
||||||
|
- `stellaops-cli db merge` (CLI) or `POST /jobs/merge:reconcile` (API) to rehydrate the merge job. Use `--verbose` to stream structured logs during triage.
|
||||||
|
2. **Inspect metrics**
|
||||||
|
- Correlate spikes in `feedser.merge.conflicts` with `primary_source`/`suppressed_source` tags from `feedser.merge.overrides`.
|
||||||
|
3. **Pull structured logs**
|
||||||
|
- Example (vector output):
|
||||||
|
```
|
||||||
|
jq 'select(.EventId.Name=="PrecedenceConflict") | {advisory: .State[0].Value, type: .ConflictType, reason: .Reason, primary: .PrimarySources, suppressed: .SuppressedSources}' stellaops-feedser.log
|
||||||
|
```
|
||||||
|
4. **Review merge events**
|
||||||
|
- `mongosh`:
|
||||||
|
```javascript
|
||||||
|
use feedser;
|
||||||
|
db.merge_event.find({ advisoryKey: "CVE-2025-1234" }).sort({ mergedAt: -1 }).limit(5);
|
||||||
|
```
|
||||||
|
- Compare `beforeHash` vs `afterHash` to confirm the merge actually changed canonical output.
|
||||||
|
5. **Interrogate provenance**
|
||||||
|
- `db.advisories.findOne({ advisoryKey: "CVE-2025-1234" }, { title: 1, severity: 1, provenance: 1, "affectedPackages.provenance": 1 })`
|
||||||
|
- Check `provenance[].decisionReason` values (`precedence`, `freshness`, `tie-breaker`) to understand why the winning field was chosen.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5. Conflict Classification Matrix
|
||||||
|
|
||||||
|
| Signal | Likely Cause | Immediate Action |
|
||||||
|
|--------|--------------|------------------|
|
||||||
|
| `reason="mismatch"` with `type="severity"` | Upstream feeds disagree on CVSS vector/severity. | Verify which feed is freshest; if correctness is known, adjust connector mapping or precedence override. |
|
||||||
|
| `reason="primary_missing"` | Higher-ranked source lacks the field entirely. | Backfill connector data or temporarily allow lower-ranked source via precedence override. |
|
||||||
|
| `reason="equal_rank"` | Two feeds share the same precedence rank (custom config or missing entry). | Update `feedser:merge:precedence:ranks` to break the tie; restart merge job. |
|
||||||
|
| Rising `feedser.merge.range_overrides` for a package type | Vendor PSIRT now supplies richer ranges. | Validate connectors emit `decisionReason="precedence"` and update dashboards to treat registry ranges as fallback. |
|
||||||
|
| `feedser.merge.identity_conflicts` > 0 | Alias scheme mapping produced collisions (duplicate CVE <-> advisory pairs). | Inspect `Alias collision` log payload; reconcile the alias graph by adjusting connector alias output. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6. Resolution Playbook
|
||||||
|
|
||||||
|
1. **Connector data fix**
|
||||||
|
- Re-run the offending connector stages (`stellaops-cli db fetch --source ghsa --stage map` etc.).
|
||||||
|
- Once fixed, rerun merge and verify `decisionReason` reflects `freshness` or `precedence` as expected.
|
||||||
|
2. **Temporary precedence override**
|
||||||
|
- Edit `etc/feedser.yaml`:
|
||||||
|
```yaml
|
||||||
|
feedser:
|
||||||
|
merge:
|
||||||
|
precedence:
|
||||||
|
ranks:
|
||||||
|
osv: 1
|
||||||
|
ghsa: 0
|
||||||
|
```
|
||||||
|
- Restart Feedser workers; confirm tags in `feedser.merge.overrides` show the new ranks.
|
||||||
|
- Document the override with expiry in the change log.
|
||||||
|
3. **Alias remediation**
|
||||||
|
- Update connector mapping rules to weed out duplicate aliases (e.g., skip GHSA aliases that mirror CVE IDs).
|
||||||
|
- Flush cached alias graphs if necessary (`db.alias_graph.drop()` is destructive-coordinate with Storage before issuing).
|
||||||
|
4. **Escalation**
|
||||||
|
- If override metrics spike due to upstream regression, open an incident with Security Guild, referencing merge logs and `merge_event` IDs.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7. Validation Checklist
|
||||||
|
|
||||||
|
- [ ] Merge job rerun returns exit code `0`.
|
||||||
|
- [ ] `feedser.merge.conflicts` baseline returns to zero after corrective action.
|
||||||
|
- [ ] Latest `merge_event` entry shows expected hash delta.
|
||||||
|
- [ ] Affected advisory document shows updated `provenance[].decisionReason`.
|
||||||
|
- [ ] Ops change log updated with incident summary, config overrides, and rollback plan.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8. Reference Material
|
||||||
|
|
||||||
|
- Canonical conflict rules: `src/DEDUP_CONFLICTS_RESOLUTION_ALGO.md`.
|
||||||
|
- Merge engine internals: `src/StellaOps.Feedser.Merge/Services/AdvisoryPrecedenceMerger.cs`.
|
||||||
|
- Metrics definitions: `src/StellaOps.Feedser.Merge/Services/AdvisoryMergeService.cs` (identity conflicts) and `AdvisoryPrecedenceMerger`.
|
||||||
|
- Storage audit trail: `src/StellaOps.Feedser.Merge/Services/MergeEventWriter.cs`, `src/StellaOps.Feedser.Storage.Mongo/MergeEvents`.
|
||||||
|
|
||||||
|
Keep this runbook synchronized with future sprint notes and update alert thresholds as baseline volumes change.
|
||||||
104
docs/ops/feedser-cve-kev-operations.md
Normal file
104
docs/ops/feedser-cve-kev-operations.md
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
# Feedser CVE & KEV Connector Operations
|
||||||
|
|
||||||
|
This playbook equips operators with the steps required to roll out and monitor the CVE Services and CISA KEV connectors across environments.
|
||||||
|
|
||||||
|
## 1. CVE Services Connector (`source:cve:*`)
|
||||||
|
|
||||||
|
### 1.1 Prerequisites
|
||||||
|
|
||||||
|
- CVE Services API credentials (organisation ID, user ID, API key) with access to the JSON 5 API.
|
||||||
|
- Network egress to `https://cveawg.mitre.org` (or a mirrored endpoint) from the Feedser workers.
|
||||||
|
- Updated `feedser.yaml` (or the matching environment variables) with the following section:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
feedser:
|
||||||
|
sources:
|
||||||
|
cve:
|
||||||
|
baseEndpoint: "https://cveawg.mitre.org/api/"
|
||||||
|
apiOrg: "ORG123"
|
||||||
|
apiUser: "user@example.org"
|
||||||
|
apiKeyFile: "/var/run/secrets/feedser/cve-api-key"
|
||||||
|
pageSize: 200
|
||||||
|
maxPagesPerFetch: 5
|
||||||
|
initialBackfill: "30.00:00:00"
|
||||||
|
requestDelay: "00:00:00.250"
|
||||||
|
failureBackoff: "00:10:00"
|
||||||
|
```
|
||||||
|
|
||||||
|
> ℹ️ Store the API key outside source control. When using `apiKeyFile`, mount the secret file into the container/host; alternatively supply `apiKey` via `FEEDSER_SOURCES__CVE__APIKEY`.
|
||||||
|
|
||||||
|
### 1.2 Smoke Test (staging)
|
||||||
|
|
||||||
|
1. Deploy the updated configuration and restart the Feedser service so the connector picks up the credentials.
|
||||||
|
2. Trigger one end-to-end cycle:
|
||||||
|
- Feedser CLI: `stella db jobs run source:cve:fetch --and-then source:cve:parse --and-then source:cve:map`
|
||||||
|
- REST fallback: `POST /jobs/run { "kind": "source:cve:fetch", "chain": ["source:cve:parse", "source:cve:map"] }`
|
||||||
|
3. Observe the following metrics (exported via OTEL meter `StellaOps.Feedser.Source.Cve`):
|
||||||
|
- `cve.fetch.attempts`, `cve.fetch.success`, `cve.fetch.failures`, `cve.fetch.unchanged`
|
||||||
|
- `cve.parse.success`, `cve.parse.failures`, `cve.parse.quarantine`
|
||||||
|
- `cve.map.success`
|
||||||
|
4. Verify the MongoDB advisory store contains fresh CVE advisories (`advisoryKey` prefix `cve/`) and that the source cursor (`source_states` collection) advanced.
|
||||||
|
|
||||||
|
### 1.3 Production Monitoring
|
||||||
|
|
||||||
|
- **Dashboards** – Add the counters above plus `feedser.range.primitives` (filtered by `scheme=semver` or `scheme=vendor`) to the Feedser overview board. Alert when:
|
||||||
|
- `rate(cve.fetch.failures[5m]) > 0`
|
||||||
|
- `rate(cve.map.success[15m]) == 0` while fetch attempts continue
|
||||||
|
- `sum_over_time(cve.parse.quarantine[1h]) > 0`
|
||||||
|
- **Logs** – Watch for `CveConnector` warnings such as `Failed fetching CVE record` or schema validation errors (`Malformed CVE JSON`). These are emitted with the CVE ID and document identifier for triage.
|
||||||
|
- **Backfill window** – operators can tighten or widen the `initialBackfill` / `maxPagesPerFetch` values after validating baseline throughput. Update the config and restart the worker to apply changes.
|
||||||
|
|
||||||
|
## 2. CISA KEV Connector (`source:kev:*`)
|
||||||
|
|
||||||
|
### 2.1 Prerequisites
|
||||||
|
|
||||||
|
- Network egress (or mirrored content) for `https://www.cisa.gov/sites/default/files/feeds/known_exploited_vulnerabilities.json`.
|
||||||
|
- No credentials are required, but the HTTP allow-list must include `www.cisa.gov`.
|
||||||
|
- Confirm the following snippet in `feedser.yaml` (defaults shown; tune as needed):
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
feedser:
|
||||||
|
sources:
|
||||||
|
kev:
|
||||||
|
feedUri: "https://www.cisa.gov/sites/default/files/feeds/known_exploited_vulnerabilities.json"
|
||||||
|
requestTimeout: "00:01:00"
|
||||||
|
failureBackoff: "00:05:00"
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2.2 Schema validation & anomaly handling
|
||||||
|
|
||||||
|
From this sprint the connector validates the KEV JSON payload against `Schemas/kev-catalog.schema.json`. Malformed documents are quarantined, and entries missing a CVE ID are dropped with a warning (`reason=missingCveId`). Operators should treat repeated schema failures as an upstream regression and coordinate with CISA or mirror maintainers.
|
||||||
|
|
||||||
|
### 2.3 Smoke Test (staging)
|
||||||
|
|
||||||
|
1. Deploy the configuration and restart Feedser.
|
||||||
|
2. Trigger a pipeline run:
|
||||||
|
- CLI: `stella db jobs run source:kev:fetch --and-then source:kev:parse --and-then source:kev:map`
|
||||||
|
- REST: `POST /jobs/run { "kind": "source:kev:fetch", "chain": ["source:kev:parse", "source:kev:map"] }`
|
||||||
|
3. Verify the metrics exposed by meter `StellaOps.Feedser.Source.Kev`:
|
||||||
|
- `kev.fetch.attempts`, `kev.fetch.success`, `kev.fetch.unchanged`, `kev.fetch.failures`
|
||||||
|
- `kev.parse.entries` (tag `catalogVersion`), `kev.parse.failures`, `kev.parse.anomalies` (tag `reason`)
|
||||||
|
- `kev.map.advisories` (tag `catalogVersion`)
|
||||||
|
4. Confirm MongoDB documents exist for the catalog JSON (`raw_documents` & `dtos`) and that advisories with prefix `kev/` are written.
|
||||||
|
|
||||||
|
### 2.4 Production Monitoring
|
||||||
|
|
||||||
|
- Alert when `kev.fetch.success` goes to zero for longer than the expected daily cadence (default: trigger if `rate(kev.fetch.success[8h]) == 0` during business hours).
|
||||||
|
- Track anomaly spikes via `kev.parse.anomalies{reason="missingCveId"}`. A sustained non-zero rate means the upstream catalog contains unexpected records.
|
||||||
|
- The connector logs each validated catalog: `Parsed KEV catalog document … entries=X`. Absence of that log alongside consecutive `kev.fetch.success` counts suggests schema validation failures—correlate with warning-level events in the `StellaOps.Feedser.Source.Kev` logger.
|
||||||
|
|
||||||
|
### 2.5 Known good dashboard tiles
|
||||||
|
|
||||||
|
Add the following panels to the Feedser observability board:
|
||||||
|
|
||||||
|
| Metric | Recommended visualisation |
|
||||||
|
|--------|---------------------------|
|
||||||
|
| `kev.fetch.success` | Single-stat (last 24 h) with threshold alert |
|
||||||
|
| `rate(kev.parse.entries[1h])` by `catalogVersion` | Stacked area – highlights daily release size |
|
||||||
|
| `sum_over_time(kev.parse.anomalies[1d])` by `reason` | Table – anomaly breakdown |
|
||||||
|
|
||||||
|
## 3. Runbook updates
|
||||||
|
|
||||||
|
- Record staging/production smoke test results (date, catalog version, advisory counts) in your team’s change log.
|
||||||
|
- Add the CVE/KEV job kinds to the standard maintenance checklist so operators can manually trigger them after planned downtime.
|
||||||
|
- Keep this document in sync with future connector changes (for example, new anomaly reasons or additional metrics).
|
||||||
50
docs/ops/migrations/SEMVER_STYLE.md
Normal file
50
docs/ops/migrations/SEMVER_STYLE.md
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
# SemVer Style Backfill Runbook
|
||||||
|
|
||||||
|
_Last updated: 2025-10-11_
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The SemVer style migration populates the new `normalizedVersions` field on advisory documents and ensures
|
||||||
|
provenance `decisionReason` values are preserved during future reads. The migration is idempotent and only
|
||||||
|
runs when the feature flag `feedser:storage:enableSemVerStyle` is enabled.
|
||||||
|
|
||||||
|
## Preconditions
|
||||||
|
|
||||||
|
1. **Review configuration** – set `feedser.storage.enableSemVerStyle` to `true` on all Feedser services.
|
||||||
|
2. **Confirm batch size** – adjust `feedser.storage.backfillBatchSize` if you need smaller batches for older
|
||||||
|
deployments (default: `250`).
|
||||||
|
3. **Back up** – capture a fresh snapshot of the `advisory` collection or a full MongoDB backup.
|
||||||
|
4. **Staging dry-run** – enable the flag in a staging environment and observe the migration output before
|
||||||
|
rolling to production.
|
||||||
|
|
||||||
|
## Execution
|
||||||
|
|
||||||
|
No manual command is required. After deploying the configuration change, restart the Feedser WebService or
|
||||||
|
any component that hosts the Mongo migration runner. During startup you will see log entries similar to:
|
||||||
|
|
||||||
|
```
|
||||||
|
Applying Mongo migration 20251011-semver-style-backfill: Populate advisory.normalizedVersions for existing documents when SemVer style storage is enabled.
|
||||||
|
Mongo migration 20251011-semver-style-backfill applied
|
||||||
|
```
|
||||||
|
|
||||||
|
The migration reads advisories in batches (`feedser.storage.backfillBatchSize`) and writes flattened
|
||||||
|
`normalizedVersions` arrays. Existing documents without SemVer ranges remain untouched.
|
||||||
|
|
||||||
|
## Post-checks
|
||||||
|
|
||||||
|
1. Verify the new indexes exist:
|
||||||
|
```
|
||||||
|
db.advisory.getIndexes()
|
||||||
|
```
|
||||||
|
You should see `advisory_normalizedVersions_pkg_scheme_type` and `advisory_normalizedVersions_value`.
|
||||||
|
2. Spot check a few advisories to confirm the top-level `normalizedVersions` array exists and matches
|
||||||
|
the embedded package data.
|
||||||
|
3. Run `dotnet test` for `StellaOps.Feedser.Storage.Mongo.Tests` (optional but recommended) in CI to confirm
|
||||||
|
the storage suite passes with the feature flag enabled.
|
||||||
|
|
||||||
|
## Rollback
|
||||||
|
|
||||||
|
Set `feedser.storage.enableSemVerStyle` back to `false` and redeploy. The migration will be skipped on
|
||||||
|
subsequent startups. You can leave the populated `normalizedVersions` arrays in place; they are ignored when
|
||||||
|
the feature flag is off. If you must remove them entirely, restore from the backup captured during
|
||||||
|
preparation.
|
||||||
136
docs/rfcs/authority-plugin-ldap.md
Normal file
136
docs/rfcs/authority-plugin-ldap.md
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
# RFC: StellaOps.Authority.Plugin.Ldap
|
||||||
|
|
||||||
|
**Status:** Draft – for review by Auth Guild, Security Guild, DevEx (2025-10-10)
|
||||||
|
**Authors:** Plugin Team 4 (Auth Libraries & Identity Providers)
|
||||||
|
**Related initiatives:** PLG7 backlog, CORE5 event handlers, DOC4 developer guide
|
||||||
|
|
||||||
|
## 1. Problem Statement
|
||||||
|
Many on-prem StellaOps deployments rely on existing LDAP/Active Directory domains for workforce identity. The current Standard Mongo-backed plugin requires duplicating users and secrets, which increases operational overhead and violates corporate policy in some regulated environments. We need a sovereign, offline-friendly LDAP plugin that:
|
||||||
|
|
||||||
|
- Supports password grant and bootstrap provisioning flows without storing credentials in Mongo.
|
||||||
|
- Enforces StellaOps security policies (lockout, password policy hints, audit logging) while delegating credential validation to LDAP.
|
||||||
|
- Operates deterministically in offline or partially connected environments by caching directory metadata when necessary.
|
||||||
|
|
||||||
|
## 2. Goals
|
||||||
|
- Provide a first-party `StellaOps.Authority.Plugin.Ldap` plugin advertising `password` and optional `clientProvisioning` capabilities at launch.
|
||||||
|
- Support username/password authentication against LDAP bind operations with configurable DN templates.
|
||||||
|
- Allow optional bootstrap seeding of service accounts by writing into LDAP (guarded behind explicit configuration) or by mapping to pre-existing entries.
|
||||||
|
- Surface directory-derived claims (groups, attributes) for downstream authorization via `IClaimsEnricher`.
|
||||||
|
- Integrate with Authority lockout telemetry and structured logging without persisting secrets locally.
|
||||||
|
|
||||||
|
## 3. Non-Goals
|
||||||
|
- Implement multi-factor authentication out of the box (future enhancement once TOTP/WebAuthn strategy is finalised).
|
||||||
|
- Provide write-heavy directory management (e.g., user creation workflows) beyond optional bootstrap service account seeding.
|
||||||
|
- Replace the Standard plugin; both must remain supported and selectable per environment.
|
||||||
|
|
||||||
|
## 4. Key Constraints & Assumptions
|
||||||
|
- Offline-first posture: deployments may operate without outbound internet and with intermittent directory connectivity (e.g., read-only replicas). The plugin must tolerate transient LDAP connectivity failures and degrade gracefully.
|
||||||
|
- Deterministic behaviour: identical configuration and directory state must yield identical token issuance results. Cached metadata (e.g., group lookups) must have defined expiration.
|
||||||
|
- Security: No plaintext credential storage; TLS must be enforced for LDAP connections unless explicitly overridden for air-gapped lab environments.
|
||||||
|
|
||||||
|
## 5. High-Level Architecture
|
||||||
|
1. **Configuration binding** (`ldap.yaml`): defines server endpoints, bind strategy, claim mapping, and optional bootstrap overrides.
|
||||||
|
2. **Connection factory**: pooled LDAP connections using a resilient client (preferred dependency: `Novell.Directory.Ldap.NETStandard`).
|
||||||
|
3. **Credential validator** (`IUserCredentialStore`): performs bind-as-user flow with optional fallback bind using service account when directories disallow anonymous search.
|
||||||
|
4. **Claims enricher** (`IClaimsEnricher`): queries group membership/attributes and projects them into canonical roles/claims.
|
||||||
|
5. **Optional client provisioning** (`IClientProvisioningStore`): maintains machine/service principals either in Mongo (metadata) or via LDAP `serviceConnectionPoint` entries based on configuration.
|
||||||
|
6. **Health checks**: periodic LDAP `whoami` or `search` probes surfaced through `AuthorityPluginHealthResult`.
|
||||||
|
|
||||||
|
```
|
||||||
|
Authority Host
|
||||||
|
├── Plugin Manifest (ldap)
|
||||||
|
├── Registrar → registers ConnectionFactory, LdapCredentialStore, LdapClaimsEnricher
|
||||||
|
├── Password Grant Handler → CredentialStore.VerifyPasswordAsync → LDAP Bind
|
||||||
|
└── Claims Pipeline → ClaimsEnricher.EnrichAsync → LDAP group lookup
|
||||||
|
```
|
||||||
|
|
||||||
|
## 6. Configuration Schema (Draft)
|
||||||
|
```yaml
|
||||||
|
connection:
|
||||||
|
host: "ldaps://ldap.example.internal"
|
||||||
|
port: 636
|
||||||
|
useStartTls: false
|
||||||
|
validateCertificates: true
|
||||||
|
bindDn: "cn=stellaops-bind,ou=service,dc=example,dc=internal"
|
||||||
|
bindPasswordSecret: "file:/etc/stellaops/secrets/ldap-bind.txt"
|
||||||
|
searchBase: "dc=example,dc=internal"
|
||||||
|
usernameAttribute: "uid"
|
||||||
|
userDnFormat: "uid={username},ou=people,dc=example,dc=internal" # optional template
|
||||||
|
security:
|
||||||
|
requireTls: true
|
||||||
|
allowedCipherSuites: [] # optional allow-list
|
||||||
|
referralChasing: false
|
||||||
|
lockout:
|
||||||
|
useAuthorityPolicies: true # reuse Authority lockout counters
|
||||||
|
directoryLockoutAttribute: "pwdAccountLockedTime"
|
||||||
|
claims:
|
||||||
|
groupAttribute: "memberOf"
|
||||||
|
groupToRoleMap:
|
||||||
|
"cn=stellaops-admins,ou=groups,dc=example,dc=internal": "operators"
|
||||||
|
"cn=stellaops-read,ou=groups,dc=example,dc=internal": "auditors"
|
||||||
|
extraAttributes:
|
||||||
|
displayName: "displayName"
|
||||||
|
email: "mail"
|
||||||
|
clientProvisioning:
|
||||||
|
enabled: false
|
||||||
|
containerDn: "ou=service,dc=example,dc=internal"
|
||||||
|
secretAttribute: "userPassword"
|
||||||
|
health:
|
||||||
|
probeIntervalSeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
```
|
||||||
|
|
||||||
|
## 7. Capability Mapping
|
||||||
|
| Capability | Implementation Notes |
|
||||||
|
|------------|---------------------|
|
||||||
|
| `password` | Bind-as-user validation with Authority lockout integration. Mandatory. |
|
||||||
|
| `clientProvisioning` | Optional; when enabled, creates/updates LDAP entries for machine clients or stores metadata in Mongo if directory writes are disabled. |
|
||||||
|
| `bootstrap` | Exposed only when bootstrap manifest provides service account credentials AND directory write permissions are confirmed during startup. |
|
||||||
|
| `mfa` | Not supported in MVP. Future iteration may integrate TOTP attributes or external MFA providers. |
|
||||||
|
|
||||||
|
## 8. Operational Considerations
|
||||||
|
- **Offline cache:** provide optional Mongo cache for group membership to keep `/ready` responsive if LDAP is temporarily unreachable. Cache entries must include TTL and invalidation hooks.
|
||||||
|
- **Secrets management:** accept `file:` and environment variable references; integrate with existing `StellaOps.Configuration` secret providers.
|
||||||
|
- **Observability:** emit structured logs with event IDs (`LDAP_BIND_START`, `LDAP_BIND_FAILURE`, `LDAP_GROUP_LOOKUP`), counters for success/failure, and latency histograms.
|
||||||
|
- **Throttling:** reuse Authority rate-limiting middleware; add per-connection throttles to avoid saturating directory servers during brute-force attacks.
|
||||||
|
|
||||||
|
## 9. Security & Compliance
|
||||||
|
- Enforce TLS (`ldaps://` or STARTTLS) by default. Provide explicit `allowInsecure` flag gated behind environment variable for lab/testing only.
|
||||||
|
- Support password hash migration by detecting directory lockout attributes and surfacing `RequiresPasswordReset` when policies demand changes.
|
||||||
|
- Log distinguished names only at `Debug` level to avoid leaking sensitive structure in default logs.
|
||||||
|
- Coordinate with Security Guild for penetration testing before GA; incorporate audit log entries for bind attempts and provisioning changes.
|
||||||
|
|
||||||
|
## 10. Testing Strategy
|
||||||
|
- **Unit tests:** mock LDAP connections to validate DN formatting, error mapping, and capability negotiation.
|
||||||
|
- **Integration tests:** run against an ephemeral OpenLDAP container (seeded via LDIF fixtures) within CI. Include offline cache regression (disconnect LDAP mid-test).
|
||||||
|
- **Determinism tests:** feed identical LDIF snapshots and configuration to ensure output tokens/claims remain stable across runs.
|
||||||
|
- **Smoke tests:** `dotnet test` harness plus manual `dotnet run` scenario verifying `/token` password grants and `/internal/users` bootstrap with LDAP-backed store.
|
||||||
|
|
||||||
|
## 11. Implementation Plan
|
||||||
|
1. Scaffold `StellaOps.Authority.Plugin.Ldap` project + tests (net10.0, `<IsAuthorityPlugin>` true).
|
||||||
|
2. Implement configuration options + validation (mirroring Standard plugin guardrails).
|
||||||
|
3. Build connection factory + credential store with bind logic.
|
||||||
|
4. Implement claims enricher and optional cache layer.
|
||||||
|
5. Add client provisioning store (optional) with toggles for read-only deployments.
|
||||||
|
6. Wire bootstrapper to validate connectivity/permissions and record findings in startup logs.
|
||||||
|
7. Extend developer guide with LDAP specifics (post-RFC acceptance).
|
||||||
|
8. Update Docs and TODO trackers; produce release notes entry once merged.
|
||||||
|
|
||||||
|
## 12. Open Questions
|
||||||
|
- Should client provisioning default to storing metadata in Mongo even when LDAP writes succeed (to preserve audit history)?
|
||||||
|
- Do we require LDAPS mutual TLS support (client certificates) for regulated environments? If yes, need to extend configuration schema.
|
||||||
|
- How will we map LDAP groups to Authority scopes/roles when names differ significantly? Consider supporting regex or mapping scripts.
|
||||||
|
|
||||||
|
## 13. Timeline (Tentative)
|
||||||
|
- **Week 1:** RFC review & sign-off.
|
||||||
|
- **Week 2-3:** Implementation & unit tests.
|
||||||
|
- **Week 4:** Integration tests + documentation updates.
|
||||||
|
- **Week 5:** Security review, release candidate packaging.
|
||||||
|
|
||||||
|
## 14. Approval
|
||||||
|
- **Auth Guild Lead:** _TBD_
|
||||||
|
- **Security Guild Representative:** _TBD_
|
||||||
|
- **DevEx Docs:** _TBD_
|
||||||
|
|
||||||
|
---
|
||||||
|
Please add comments inline or via PR review. Once approved, track execution under PLG7.
|
||||||
17
etc/authority.plugins/ldap.yaml
Normal file
17
etc/authority.plugins/ldap.yaml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
# Placeholder configuration for the LDAP identity provider plug-in.
|
||||||
|
# Replace values with your directory settings before enabling the plug-in.
|
||||||
|
connection:
|
||||||
|
host: "ldap.example.com"
|
||||||
|
port: 636
|
||||||
|
useTls: true
|
||||||
|
bindDn: "cn=service,dc=example,dc=com"
|
||||||
|
bindPassword: "CHANGE_ME"
|
||||||
|
|
||||||
|
queries:
|
||||||
|
userFilter: "(uid={username})"
|
||||||
|
groupFilter: "(member={distinguishedName})"
|
||||||
|
groupAttribute: "cn"
|
||||||
|
|
||||||
|
capabilities:
|
||||||
|
supportsPassword: true
|
||||||
|
supportsMfa: false
|
||||||
22
etc/authority.plugins/standard.yaml
Normal file
22
etc/authority.plugins/standard.yaml
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# Standard plugin configuration (Mongo-backed identity store).
|
||||||
|
bootstrapUser:
|
||||||
|
username: "admin"
|
||||||
|
password: "changeme"
|
||||||
|
|
||||||
|
passwordPolicy:
|
||||||
|
minimumLength: 12
|
||||||
|
requireUppercase: true
|
||||||
|
requireLowercase: true
|
||||||
|
requireDigit: true
|
||||||
|
requireSymbol: true
|
||||||
|
|
||||||
|
lockout:
|
||||||
|
enabled: true
|
||||||
|
maxAttempts: 5
|
||||||
|
windowMinutes: 15
|
||||||
|
|
||||||
|
tokenSigning:
|
||||||
|
# Path to the directory containing signing keys (relative paths resolve
|
||||||
|
# against the location of this manifest, environment variables are expanded,
|
||||||
|
# and the final value is normalised to an absolute path during startup.
|
||||||
|
keyDirectory: "../keys"
|
||||||
71
etc/authority.yaml.sample
Normal file
71
etc/authority.yaml.sample
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# StellaOps Authority configuration template.
|
||||||
|
# Copy to ../etc/authority.yaml (relative to the Authority content root)
|
||||||
|
# and adjust values to fit your environment. Environment variables
|
||||||
|
# prefixed with STELLAOPS_AUTHORITY_ override these values at runtime.
|
||||||
|
# Example: STELLAOPS_AUTHORITY__ISSUER=https://authority.example.com
|
||||||
|
|
||||||
|
schemaVersion: 1
|
||||||
|
|
||||||
|
# Absolute issuer URI advertised to clients. Use HTTPS for anything
|
||||||
|
# beyond loopback development.
|
||||||
|
issuer: "https://authority.stella-ops.local"
|
||||||
|
|
||||||
|
# Token lifetimes expressed as HH:MM:SS or DD.HH:MM:SS.
|
||||||
|
accessTokenLifetime: "00:15:00"
|
||||||
|
refreshTokenLifetime: "30.00:00:00"
|
||||||
|
identityTokenLifetime: "00:05:00"
|
||||||
|
authorizationCodeLifetime: "00:05:00"
|
||||||
|
deviceCodeLifetime: "00:15:00"
|
||||||
|
|
||||||
|
# MongoDB storage connection details.
|
||||||
|
storage:
|
||||||
|
connectionString: "mongodb://localhost:27017/stellaops-authority"
|
||||||
|
# databaseName: "stellaops_authority"
|
||||||
|
commandTimeout: "00:00:30"
|
||||||
|
|
||||||
|
# Bootstrap administrative endpoints (initial provisioning).
|
||||||
|
bootstrap:
|
||||||
|
enabled: false
|
||||||
|
apiKey: "change-me"
|
||||||
|
defaultIdentityProvider: "standard"
|
||||||
|
|
||||||
|
# Directories scanned for Authority plug-ins. Relative paths resolve
|
||||||
|
# against the application content root, enabling air-gapped deployments
|
||||||
|
# that package plug-ins alongside binaries.
|
||||||
|
pluginDirectories:
|
||||||
|
- "../PluginBinaries/Authority"
|
||||||
|
# "/var/lib/stellaops/authority/plugins"
|
||||||
|
|
||||||
|
# Plug-in manifests live in descriptors below; per-plugin settings are stored
|
||||||
|
# in the configurationDirectory (YAML files). Authority will load any enabled
|
||||||
|
# plugins and surface their metadata/capabilities to the host.
|
||||||
|
plugins:
|
||||||
|
configurationDirectory: "../etc/authority.plugins"
|
||||||
|
descriptors:
|
||||||
|
standard:
|
||||||
|
type: "standard"
|
||||||
|
assemblyName: "StellaOps.Authority.Plugin.Standard"
|
||||||
|
enabled: true
|
||||||
|
configFile: "standard.yaml"
|
||||||
|
capabilities:
|
||||||
|
- password
|
||||||
|
- bootstrap
|
||||||
|
- clientProvisioning
|
||||||
|
metadata:
|
||||||
|
defaultRole: "operators"
|
||||||
|
# Example for an external identity provider plugin. Leave disabled unless
|
||||||
|
# the plug-in package exists under PluginBinaries/Authority.
|
||||||
|
ldap:
|
||||||
|
type: "ldap"
|
||||||
|
assemblyName: "StellaOps.Authority.Plugin.Ldap"
|
||||||
|
enabled: false
|
||||||
|
configFile: "ldap.yaml"
|
||||||
|
capabilities:
|
||||||
|
- password
|
||||||
|
- mfa
|
||||||
|
|
||||||
|
# CIDR ranges that bypass network-sensitive policies (e.g. on-host cron jobs).
|
||||||
|
# Keep the list tight: localhost is sufficient for most air-gapped installs.
|
||||||
|
bypassNetworks:
|
||||||
|
- "127.0.0.1/32"
|
||||||
|
- "::1/128"
|
||||||
65
etc/feedser.yaml.sample
Normal file
65
etc/feedser.yaml.sample
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
# Feedser configuration template for StellaOps deployments.
|
||||||
|
# Copy to ../etc/feedser.yaml (relative to the web service content root)
|
||||||
|
# and adjust the values to match your environment. Environment variables
|
||||||
|
# (prefixed with FEEDSER_) override these settings at runtime.
|
||||||
|
|
||||||
|
storage:
|
||||||
|
driver: mongo
|
||||||
|
# Mongo connection string. Use SRV URI or standard connection string.
|
||||||
|
dsn: "mongodb://feedser:feedser@mongo:27017/feedser?authSource=admin"
|
||||||
|
# Optional database name; defaults to the name embedded in the DSN or 'feedser'.
|
||||||
|
database: "feedser"
|
||||||
|
# Mongo command timeout in seconds.
|
||||||
|
commandTimeoutSeconds: 30
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
# Feedser resolves plug-ins relative to the content root; override as needed.
|
||||||
|
baseDirectory: ".."
|
||||||
|
directory: "PluginBinaries"
|
||||||
|
searchPatterns:
|
||||||
|
- "StellaOps.Feedser.Plugin.*.dll"
|
||||||
|
|
||||||
|
telemetry:
|
||||||
|
enabled: true
|
||||||
|
enableTracing: false
|
||||||
|
enableMetrics: false
|
||||||
|
enableLogging: true
|
||||||
|
minimumLogLevel: "Information"
|
||||||
|
serviceName: "stellaops-feedser"
|
||||||
|
# Configure OTLP endpoint when shipping traces/metrics/logs out-of-band.
|
||||||
|
otlpEndpoint: ""
|
||||||
|
# Optional headers for OTLP exporters, for example authentication tokens.
|
||||||
|
otlpHeaders: {}
|
||||||
|
# Attach additional resource attributes to telemetry exports.
|
||||||
|
resourceAttributes:
|
||||||
|
deployment.environment: "local"
|
||||||
|
# Emit console exporters for local debugging.
|
||||||
|
exportConsole: true
|
||||||
|
|
||||||
|
authority:
|
||||||
|
enabled: false
|
||||||
|
# Temporary rollout flag. When true, Feedser logs anonymous access but does not fail requests
|
||||||
|
# without tokens. Set to false before 2025-12-31 UTC to enforce authentication fully.
|
||||||
|
allowAnonymousFallback: true
|
||||||
|
# Issuer advertised by StellaOps Authority (e.g. https://authority.stella-ops.local).
|
||||||
|
issuer: "https://authority.stella-ops.local"
|
||||||
|
# Optional explicit metadata address; defaults to {issuer}/.well-known/openid-configuration.
|
||||||
|
metadataAddress: ""
|
||||||
|
requireHttpsMetadata: true
|
||||||
|
backchannelTimeoutSeconds: 30
|
||||||
|
tokenClockSkewSeconds: 60
|
||||||
|
audiences:
|
||||||
|
- "api://feedser"
|
||||||
|
requiredScopes:
|
||||||
|
- "feedser.jobs.trigger"
|
||||||
|
# Outbound credentials Feedser can use to call Authority (client credentials flow).
|
||||||
|
clientId: "feedser-jobs"
|
||||||
|
# Prefer storing the secret outside of the config file. Provide either clientSecret or clientSecretFile.
|
||||||
|
clientSecret: ""
|
||||||
|
clientSecretFile: ""
|
||||||
|
clientScopes:
|
||||||
|
- "feedser.jobs.trigger"
|
||||||
|
# Networks allowed to bypass authentication (loopback by default for on-host cron jobs).
|
||||||
|
bypassNetworks:
|
||||||
|
- "127.0.0.1/32"
|
||||||
|
- "::1/128"
|
||||||
16
ops/authority/AGENTS.md
Normal file
16
ops/authority/AGENTS.md
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# Authority DevOps Crew
|
||||||
|
|
||||||
|
## Mission
|
||||||
|
Operate and harden the StellaOps Authority platform in production and air-gapped environments: container images, deployment assets, observability defaults, backup/restore, and runtime key management.
|
||||||
|
|
||||||
|
## Focus Areas
|
||||||
|
- **Build & Packaging** – Dockerfiles, OCI bundles, offline artefact refresh.
|
||||||
|
- **Deployment Tooling** – Compose/Kubernetes manifests, secrets bootstrap, upgrade paths.
|
||||||
|
- **Observability** – Logging defaults, metrics/trace exporters, dashboards, alert policies.
|
||||||
|
- **Continuity & Security** – Backup/restore guides, key rotation playbooks, revocation propagation.
|
||||||
|
|
||||||
|
## Working Agreements
|
||||||
|
- Track work in `ops/authority/TASKS.md` (TODO → DOING → DONE/BLOCKED); keep entries dated.
|
||||||
|
- Validate container changes with the CI pipeline (`ops/authority` GitHub workflow) before marking DONE.
|
||||||
|
- Update operator documentation in `docs/` together with any behavioural change.
|
||||||
|
- Coordinate with Authority Core and Security Guild before altering sensitive defaults (rate limits, crypto providers, revocation jobs).
|
||||||
38
ops/authority/Dockerfile
Normal file
38
ops/authority/Dockerfile
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
# syntax=docker/dockerfile:1.7-labs
|
||||||
|
|
||||||
|
#
|
||||||
|
# StellaOps Authority – distroless container build
|
||||||
|
# Produces a minimal image containing the Authority host and its plugins.
|
||||||
|
#
|
||||||
|
|
||||||
|
ARG SDK_IMAGE=mcr.microsoft.com/dotnet/nightly/sdk:10.0
|
||||||
|
ARG RUNTIME_IMAGE=gcr.io/distroless/dotnet/aspnet:latest
|
||||||
|
|
||||||
|
FROM ${SDK_IMAGE} AS build
|
||||||
|
|
||||||
|
WORKDIR /src
|
||||||
|
|
||||||
|
# Restore & publish
|
||||||
|
COPY . .
|
||||||
|
RUN dotnet restore src/StellaOps.sln
|
||||||
|
RUN dotnet publish src/StellaOps.Authority/StellaOps.Authority/StellaOps.Authority.csproj \
|
||||||
|
-c Release \
|
||||||
|
-o /app/publish \
|
||||||
|
/p:UseAppHost=false
|
||||||
|
|
||||||
|
FROM ${RUNTIME_IMAGE} AS runtime
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
ENV ASPNETCORE_URLS=http://0.0.0.0:8080
|
||||||
|
ENV STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0=/app/plugins
|
||||||
|
ENV STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY=/app/etc/authority.plugins
|
||||||
|
|
||||||
|
COPY --from=build /app/publish ./
|
||||||
|
|
||||||
|
# Provide writable mount points for configs/keys/plugins
|
||||||
|
VOLUME ["/app/etc", "/app/plugins", "/app/keys"]
|
||||||
|
|
||||||
|
EXPOSE 8080
|
||||||
|
|
||||||
|
ENTRYPOINT ["dotnet", "StellaOps.Authority.dll"]
|
||||||
39
ops/authority/README.md
Normal file
39
ops/authority/README.md
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
# StellaOps Authority Container Scaffold
|
||||||
|
|
||||||
|
This directory provides a distroless Dockerfile and `docker-compose` sample for bootstrapping the Authority service alongside MongoDB (required) and Redis (optional).
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Docker Engine 25+ and Compose V2
|
||||||
|
- .NET 10 preview SDK (only required when building locally outside of Compose)
|
||||||
|
- Populated Authority configuration at `etc/authority.yaml` and plugin manifests under `etc/authority.plugins/`
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Ensure configuration files exist (copied from etc/authority.yaml.sample, etc/authority.plugins/*.yaml)
|
||||||
|
# 2. Build and start the stack
|
||||||
|
docker compose -f ops/authority/docker-compose.authority.yaml up --build
|
||||||
|
```
|
||||||
|
|
||||||
|
`authority.yaml` is mounted read-only at `/etc/authority.yaml` inside the container. Plugin manifests are mounted to `/app/etc/authority.plugins`. Update the issuer URL plus any Mongo credentials in the compose file or via an `.env`.
|
||||||
|
|
||||||
|
To run with pre-built images, replace the `build:` block in the compose file with an `image:` reference.
|
||||||
|
|
||||||
|
## Volumes
|
||||||
|
|
||||||
|
- `mongo-data` – persists MongoDB state.
|
||||||
|
- `redis-data` – optional Redis persistence (enable the service before use).
|
||||||
|
- `authority-keys` – writable volume for Authority signing keys.
|
||||||
|
|
||||||
|
## Environment overrides
|
||||||
|
|
||||||
|
Key environment variables (mirroring `StellaOpsAuthorityOptions`):
|
||||||
|
|
||||||
|
| Variable | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| `STELLAOPS_AUTHORITY__ISSUER` | Public issuer URL advertised by Authority |
|
||||||
|
| `STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0` | Primary plugin binaries directory inside the container |
|
||||||
|
| `STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY` | Path to plugin manifest directory |
|
||||||
|
|
||||||
|
For additional options, see `etc/authority.yaml.sample`.
|
||||||
6
ops/authority/TASKS.md
Normal file
6
ops/authority/TASKS.md
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
# Authority DevOps Task Board (UTC 2025-10-10)
|
||||||
|
|
||||||
|
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||||
|
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||||
|
|----|--------|----------|------------|-------------|---------------|
|
||||||
|
| OPS3.KEY-ROTATION | BLOCKED | DevOps Crew, Authority Core | CORE10.JWKS | Implement key rotation tooling + pipeline hook once rotating JWKS lands. Document SOP and secret handling. | ✅ CLI/script rotates keys + updates JWKS; ✅ Pipeline job documented; ✅ docs/ops runbook updated. |
|
||||||
58
ops/authority/docker-compose.authority.yaml
Normal file
58
ops/authority/docker-compose.authority.yaml
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
version: "3.9"
|
||||||
|
|
||||||
|
services:
|
||||||
|
authority:
|
||||||
|
build:
|
||||||
|
context: ../..
|
||||||
|
dockerfile: ops/authority/Dockerfile
|
||||||
|
image: stellaops-authority:dev
|
||||||
|
container_name: stellaops-authority
|
||||||
|
depends_on:
|
||||||
|
mongo:
|
||||||
|
condition: service_started
|
||||||
|
environment:
|
||||||
|
# Override issuer to match your deployment URL.
|
||||||
|
STELLAOPS_AUTHORITY__ISSUER: "https://authority.localtest.me"
|
||||||
|
# Point the Authority host at the Mongo instance defined below.
|
||||||
|
STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0: "/app/plugins"
|
||||||
|
STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY: "/app/etc/authority.plugins"
|
||||||
|
volumes:
|
||||||
|
# Mount Authority configuration + plugins (edit etc/authority.yaml before running).
|
||||||
|
- ../../etc/authority.yaml:/etc/authority.yaml:ro
|
||||||
|
- ../../etc/authority.plugins:/app/etc/authority.plugins:ro
|
||||||
|
# Optional: persist plugin binaries or key material outside the container.
|
||||||
|
- authority-keys:/app/keys
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
mongo:
|
||||||
|
image: mongo:7
|
||||||
|
container_name: stellaops-authority-mongo
|
||||||
|
command: ["mongod", "--bind_ip_all"]
|
||||||
|
environment:
|
||||||
|
MONGO_INITDB_ROOT_USERNAME: stellaops
|
||||||
|
MONGO_INITDB_ROOT_PASSWORD: stellaops
|
||||||
|
volumes:
|
||||||
|
- mongo-data:/data/db
|
||||||
|
ports:
|
||||||
|
- "27017:27017"
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
redis:
|
||||||
|
image: redis:7-alpine
|
||||||
|
container_name: stellaops-authority-redis
|
||||||
|
command: ["redis-server", "--save", "60", "1"]
|
||||||
|
volumes:
|
||||||
|
- redis-data:/data
|
||||||
|
ports:
|
||||||
|
- "6379:6379"
|
||||||
|
restart: unless-stopped
|
||||||
|
# Uncomment to enable if/when Authority consumes Redis.
|
||||||
|
# deploy:
|
||||||
|
# replicas: 0
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
mongo-data:
|
||||||
|
redis-data:
|
||||||
|
authority-keys:
|
||||||
254
scripts/render_docs.py
Normal file
254
scripts/render_docs.py
Normal file
@@ -0,0 +1,254 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Render Markdown documentation under docs/ into a static HTML bundle.
|
||||||
|
|
||||||
|
The script converts every Markdown file into a standalone HTML document,
|
||||||
|
mirroring the original folder structure under the output directory. A
|
||||||
|
`manifest.json` file is also produced to list the generated documents and
|
||||||
|
surface basic metadata (title, source path, output path).
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python scripts/render_docs.py --source docs --output build/docs-site
|
||||||
|
|
||||||
|
Dependencies:
|
||||||
|
pip install markdown pygments
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Iterable, List
|
||||||
|
|
||||||
|
import markdown
|
||||||
|
|
||||||
|
# Enable fenced code blocks, tables, and definition lists. These cover the
|
||||||
|
# Markdown constructs heavily used across the documentation set.
|
||||||
|
MD_EXTENSIONS = [
|
||||||
|
"fenced_code",
|
||||||
|
"codehilite",
|
||||||
|
"tables",
|
||||||
|
"toc",
|
||||||
|
"def_list",
|
||||||
|
"admonition",
|
||||||
|
]
|
||||||
|
|
||||||
|
HTML_TEMPLATE = """<!DOCTYPE html>
|
||||||
|
<html lang=\"en\">
|
||||||
|
<head>
|
||||||
|
<meta charset=\"utf-8\" />
|
||||||
|
<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />
|
||||||
|
<title>{title}</title>
|
||||||
|
<style>
|
||||||
|
:root {{
|
||||||
|
color-scheme: light dark;
|
||||||
|
font-family: system-ui, -apple-system, Segoe UI, sans-serif;
|
||||||
|
line-height: 1.6;
|
||||||
|
}}
|
||||||
|
body {{
|
||||||
|
margin: 2.5rem auto;
|
||||||
|
padding: 0 1.5rem;
|
||||||
|
max-width: 70ch;
|
||||||
|
background: var(--background, #1118270d);
|
||||||
|
}}
|
||||||
|
pre {{
|
||||||
|
overflow: auto;
|
||||||
|
padding: 1rem;
|
||||||
|
background: #11182714;
|
||||||
|
border-radius: 0.5rem;
|
||||||
|
}}
|
||||||
|
code {{
|
||||||
|
font-family: SFMono-Regular, Consolas, 'Liberation Mono', monospace;
|
||||||
|
font-size: 0.95em;
|
||||||
|
}}
|
||||||
|
table {{
|
||||||
|
width: 100%;
|
||||||
|
border-collapse: collapse;
|
||||||
|
margin: 1rem 0;
|
||||||
|
}}
|
||||||
|
th, td {{
|
||||||
|
border: 1px solid #4b5563;
|
||||||
|
padding: 0.5rem;
|
||||||
|
text-align: left;
|
||||||
|
}}
|
||||||
|
a {{
|
||||||
|
color: #2563eb;
|
||||||
|
}}
|
||||||
|
footer {{
|
||||||
|
margin-top: 3rem;
|
||||||
|
font-size: 0.85rem;
|
||||||
|
color: #6b7280;
|
||||||
|
}}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<main>
|
||||||
|
{body}
|
||||||
|
</main>
|
||||||
|
<footer>
|
||||||
|
<p>Generated on {generated_at} UTC · Source: {source}</p>
|
||||||
|
</footer>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DocEntry:
|
||||||
|
source: Path
|
||||||
|
output: Path
|
||||||
|
title: str
|
||||||
|
|
||||||
|
def to_manifest(self) -> dict[str, str]:
|
||||||
|
return {
|
||||||
|
"source": self.source.as_posix(),
|
||||||
|
"output": self.output.as_posix(),
|
||||||
|
"title": self.title,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def discover_markdown_files(source_root: Path) -> Iterable[Path]:
|
||||||
|
for path in source_root.rglob("*.md"):
|
||||||
|
if path.is_file():
|
||||||
|
yield path
|
||||||
|
|
||||||
|
|
||||||
|
def read_title(markdown_text: str, fallback: str) -> str:
|
||||||
|
for raw_line in markdown_text.splitlines():
|
||||||
|
line = raw_line.strip()
|
||||||
|
if line.startswith("#"):
|
||||||
|
return line.lstrip("#").strip() or fallback
|
||||||
|
return fallback
|
||||||
|
|
||||||
|
|
||||||
|
def convert_markdown(path: Path, source_root: Path, output_root: Path) -> DocEntry:
|
||||||
|
relative = path.relative_to(source_root)
|
||||||
|
output_path = output_root / relative.with_suffix(".html")
|
||||||
|
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
text = path.read_text(encoding="utf-8")
|
||||||
|
html_body = markdown.markdown(text, extensions=MD_EXTENSIONS)
|
||||||
|
|
||||||
|
title = read_title(text, fallback=relative.stem.replace("_", " "))
|
||||||
|
generated_at = datetime.now(tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S")
|
||||||
|
|
||||||
|
output_path.write_text(
|
||||||
|
HTML_TEMPLATE.format(
|
||||||
|
title=title,
|
||||||
|
body=html_body,
|
||||||
|
generated_at=generated_at,
|
||||||
|
source=relative.as_posix(),
|
||||||
|
),
|
||||||
|
encoding="utf-8",
|
||||||
|
)
|
||||||
|
|
||||||
|
return DocEntry(source=relative, output=output_path.relative_to(output_root), title=title)
|
||||||
|
|
||||||
|
|
||||||
|
def copy_static_assets(source_root: Path, output_root: Path) -> None:
|
||||||
|
for path in source_root.rglob("*"):
|
||||||
|
if path.is_dir() or path.suffix.lower() == ".md":
|
||||||
|
# Skip Markdown (already rendered separately).
|
||||||
|
continue
|
||||||
|
relative = path.relative_to(source_root)
|
||||||
|
destination = output_root / relative
|
||||||
|
destination.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
destination.write_bytes(path.read_bytes())
|
||||||
|
logging.info("Copied asset %s", relative)
|
||||||
|
|
||||||
|
|
||||||
|
def write_manifest(entries: Iterable[DocEntry], output_root: Path) -> None:
|
||||||
|
manifest_path = output_root / "manifest.json"
|
||||||
|
manifest = [entry.to_manifest() for entry in entries]
|
||||||
|
manifest_path.write_text(json.dumps(manifest, indent=2), encoding="utf-8")
|
||||||
|
logging.info("Wrote manifest with %d entries", len(manifest))
|
||||||
|
|
||||||
|
|
||||||
|
def write_index(entries: List[DocEntry], output_root: Path) -> None:
|
||||||
|
index_path = output_root / "index.html"
|
||||||
|
generated_at = datetime.now(tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S")
|
||||||
|
|
||||||
|
items = "\n".join(
|
||||||
|
f" <li><a href='{entry.output.as_posix()}'>{entry.title}</a>" f" · <code>{entry.source.as_posix()}</code></li>"
|
||||||
|
for entry in sorted(entries, key=lambda e: e.title.lower())
|
||||||
|
)
|
||||||
|
|
||||||
|
html = f"""<!DOCTYPE html>
|
||||||
|
<html lang=\"en\">
|
||||||
|
<head>
|
||||||
|
<meta charset=\"utf-8\" />
|
||||||
|
<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />
|
||||||
|
<title>Stella Ops Documentation Index</title>
|
||||||
|
<style>
|
||||||
|
body {{
|
||||||
|
margin: 2.5rem auto;
|
||||||
|
padding: 0 1.5rem;
|
||||||
|
max-width: 70ch;
|
||||||
|
font-family: system-ui, -apple-system, 'Segoe UI', sans-serif;
|
||||||
|
line-height: 1.6;
|
||||||
|
}}
|
||||||
|
h1 {{ font-size: 2.25rem; margin-bottom: 1rem; }}
|
||||||
|
ul {{ list-style: none; padding: 0; }}
|
||||||
|
li {{ margin-bottom: 0.75rem; }}
|
||||||
|
code {{ background: #11182714; padding: 0.2rem 0.35rem; border-radius: 0.35rem; }}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h1>Stella Ops Documentation</h1>
|
||||||
|
<p>Generated on {generated_at} UTC</p>
|
||||||
|
<ul>
|
||||||
|
{items}
|
||||||
|
</ul>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
"""
|
||||||
|
index_path.write_text(html, encoding="utf-8")
|
||||||
|
logging.info("Wrote HTML index with %d entries", len(entries))
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args() -> argparse.Namespace:
|
||||||
|
parser = argparse.ArgumentParser(description="Render documentation bundle")
|
||||||
|
parser.add_argument("--source", default="docs", type=Path, help="Directory containing Markdown sources")
|
||||||
|
parser.add_argument("--output", default=Path("build/docs-site"), type=Path, help="Directory for rendered output")
|
||||||
|
parser.add_argument("--clean", action="store_true", help="Remove the output directory before rendering")
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> int:
|
||||||
|
logging.basicConfig(level=logging.INFO, format="%(levelname)s %(message)s")
|
||||||
|
args = parse_args()
|
||||||
|
|
||||||
|
source_root: Path = args.source.resolve()
|
||||||
|
output_root: Path = args.output.resolve()
|
||||||
|
|
||||||
|
if not source_root.exists():
|
||||||
|
logging.error("Source directory %s does not exist", source_root)
|
||||||
|
return os.EX_NOINPUT
|
||||||
|
|
||||||
|
if args.clean and output_root.exists():
|
||||||
|
logging.info("Cleaning existing output directory %s", output_root)
|
||||||
|
shutil.rmtree(output_root)
|
||||||
|
|
||||||
|
output_root.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
entries: List[DocEntry] = []
|
||||||
|
for md_file in discover_markdown_files(source_root):
|
||||||
|
entry = convert_markdown(md_file, source_root, output_root)
|
||||||
|
entries.append(entry)
|
||||||
|
logging.info("Rendered %s -> %s", entry.source, entry.output)
|
||||||
|
|
||||||
|
write_manifest(entries, output_root)
|
||||||
|
write_index(entries, output_root)
|
||||||
|
copy_static_assets(source_root, output_root)
|
||||||
|
|
||||||
|
logging.info("Documentation bundle available at %s", output_root)
|
||||||
|
return os.EX_OK
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
raise SystemExit(main())
|
||||||
9
scripts/update-model-goldens.ps1
Normal file
9
scripts/update-model-goldens.ps1
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Param(
|
||||||
|
[Parameter(ValueFromRemainingArguments = $true)]
|
||||||
|
[string[]] $RestArgs
|
||||||
|
)
|
||||||
|
|
||||||
|
$Root = Split-Path -Parent $PSScriptRoot
|
||||||
|
$env:UPDATE_GOLDENS = "1"
|
||||||
|
|
||||||
|
dotnet test (Join-Path $Root "src/StellaOps.Feedser.Models.Tests/StellaOps.Feedser.Models.Tests.csproj") @RestArgs
|
||||||
8
scripts/update-model-goldens.sh
Normal file
8
scripts/update-model-goldens.sh
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
|
||||||
|
export UPDATE_GOLDENS=1
|
||||||
|
|
||||||
|
dotnet test "$ROOT_DIR/src/StellaOps.Feedser.Models.Tests/StellaOps.Feedser.Models.Tests.csproj" "$@"
|
||||||
143
src/DEDUP_CONFLICTS_RESOLUTION_ALGO.md
Normal file
143
src/DEDUP_CONFLICTS_RESOLUTION_ALGO.md
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
````markdown
|
||||||
|
# Feedser Vulnerability Conflict Resolution Rules
|
||||||
|
|
||||||
|
This document defines the canonical, deterministic conflict resolution strategy for merging vulnerability data from **NVD**, **GHSA**, and **OSV** in Feedser.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🧭 Source Precedence
|
||||||
|
|
||||||
|
1. **Primary order:**
|
||||||
|
`GHSA > NVD > OSV`
|
||||||
|
|
||||||
|
**Rationale:**
|
||||||
|
GHSA advisories are human-curated and fast to correct; NVD has the broadest CVE coverage; OSV excels in ecosystem-specific precision.
|
||||||
|
|
||||||
|
2. **Freshness override (≥48 h):**
|
||||||
|
If a **lower-priority** source is **newer by at least 48 hours** for a freshness-sensitive field, its value overrides the higher-priority one.
|
||||||
|
Always store the decision in a provenance record.
|
||||||
|
|
||||||
|
3. **Merge scope:**
|
||||||
|
Only merge data referring to the **same CVE ID** or the same GHSA/OSV advisory explicitly mapped to that CVE.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🧩 Field-Level Precedence
|
||||||
|
|
||||||
|
| Field | Priority | Freshness-Sensitive | Notes |
|
||||||
|
|-------|-----------|--------------------|-------|
|
||||||
|
| Title / Summary | GHSA → NVD → OSV | ✅ | Prefer concise structured titles |
|
||||||
|
| Description | GHSA → NVD → OSV | ✅ | |
|
||||||
|
| Severity (CVSS) | NVD → GHSA → OSV | ❌ | Keep all under `metrics[]`, mark `canonicalMetric` by order |
|
||||||
|
| Ecosystem Severity Label | GHSA → OSV | ❌ | Supplemental tag only |
|
||||||
|
| Affected Packages / Ranges | OSV → GHSA → NVD | ✅ | OSV strongest for SemVer normalization |
|
||||||
|
| CWE(s) | NVD → GHSA → OSV | ❌ | NVD taxonomy most stable |
|
||||||
|
| References / Links | Union of all | ✅ | Deduplicate by normalized URL |
|
||||||
|
| Credits / Acknowledgements | Union of all | ✅ | Sort by role, displayName |
|
||||||
|
| Published / Modified timestamps | Earliest published / Latest modified | ✅ | |
|
||||||
|
| EPSS / KEV / Exploit status | Specialized feed only | ❌ | Do not override manually |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## ⚖️ Deterministic Tie-Breakers
|
||||||
|
|
||||||
|
If precedence and freshness both tie:
|
||||||
|
|
||||||
|
1. **Source order:** GHSA > NVD > OSV
|
||||||
|
2. **Lexicographic stability:** Prefer shorter normalized text; if equal, ASCIIbetical
|
||||||
|
3. **Stable hash of payload:** Lowest hash wins
|
||||||
|
|
||||||
|
Each chosen value must store the merge rationale:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"provenance": {
|
||||||
|
"source": "GHSA",
|
||||||
|
"kind": "merge",
|
||||||
|
"value": "description",
|
||||||
|
"decisionReason": "precedence"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
````
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🧮 Merge Algorithm (Pseudocode)
|
||||||
|
|
||||||
|
```csharp
|
||||||
|
inputs: records = {ghsa?, nvd?, osv?}
|
||||||
|
out = new CanonicalVuln(CVE)
|
||||||
|
|
||||||
|
foreach field in CANONICAL_SCHEMA:
|
||||||
|
candidates = collect(values, source, lastModified)
|
||||||
|
if freshnessSensitive(field) and newerBy48h(lowerPriority):
|
||||||
|
pick newest
|
||||||
|
else:
|
||||||
|
pick by precedence(field)
|
||||||
|
if tie:
|
||||||
|
applyTieBreakers()
|
||||||
|
out.field = normalize(field, value)
|
||||||
|
out.provenance[field] = decisionTrail
|
||||||
|
|
||||||
|
out.references = dedupe(union(all.references))
|
||||||
|
out.affected = normalizeAndUnion(OSV, GHSA, NVD)
|
||||||
|
out.metrics = rankAndSetCanonical(NVDv3 → GHSA → OSV → v2)
|
||||||
|
return out
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🔧 Normalization Rules
|
||||||
|
|
||||||
|
* **SemVer:**
|
||||||
|
Parse with tolerant builder; normalize `v` prefixes; map comparators (`<=`, `<`, `>=`, `>`); expand OSV events into continuous ranges.
|
||||||
|
|
||||||
|
* **Packages:**
|
||||||
|
Canonical key = `(ecosystem, packageName, language?)`; maintain aliases (purl, npm, Maven GAV, etc.).
|
||||||
|
|
||||||
|
* **CWE:**
|
||||||
|
Store both ID and name; validate against current CWE catalog.
|
||||||
|
|
||||||
|
* **CVSS:**
|
||||||
|
Preserve provided vector and base score; recompute only for validation.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## ✅ Output Guarantees
|
||||||
|
|
||||||
|
| Property | Description |
|
||||||
|
| ---------------- | ------------------------------------------------------------------------------- |
|
||||||
|
| **Reproducible** | Same input → same canonical output |
|
||||||
|
| **Auditable** | Provenance stored per field |
|
||||||
|
| **Complete** | Unions with de-duplication |
|
||||||
|
| **Composable** | Future layers (KEV, EPSS, vendor advisories) can safely extend precedence rules |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🧠 Example
|
||||||
|
|
||||||
|
* GHSA summary updated on *2025-10-09*
|
||||||
|
* NVD last modified *2025-10-05*
|
||||||
|
* OSV updated *2025-10-10*
|
||||||
|
|
||||||
|
→ **Summary:** OSV wins (freshness override)
|
||||||
|
→ **CVSS:** NVD v3.1 remains canonical
|
||||||
|
→ **Affected:** OSV ranges canonical; GHSA aliases merged
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🧰 Optional C# Helper Class
|
||||||
|
|
||||||
|
`StellaOps.Feedser.Core/CanonicalMerger.cs`
|
||||||
|
|
||||||
|
Implements:
|
||||||
|
|
||||||
|
* `FieldPrecedenceMap`
|
||||||
|
* `FreshnessSensitiveFields`
|
||||||
|
* `ApplyTieBreakers()`
|
||||||
|
* `NormalizeAndUnion()`
|
||||||
|
|
||||||
|
Deterministically builds `CanonicalVuln` with full provenance tracking.
|
||||||
|
|
||||||
|
```
|
||||||
|
```
|
||||||
33
src/Directory.Build.props
Normal file
33
src/Directory.Build.props
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
<Project>
|
||||||
|
<PropertyGroup>
|
||||||
|
<FeedserPluginOutputRoot Condition="'$(FeedserPluginOutputRoot)' == ''">$(SolutionDir)PluginBinaries</FeedserPluginOutputRoot>
|
||||||
|
<FeedserPluginOutputRoot Condition="'$(FeedserPluginOutputRoot)' == '' and '$(SolutionDir)' == ''">$(MSBuildThisFileDirectory)PluginBinaries</FeedserPluginOutputRoot>
|
||||||
|
<AuthorityPluginOutputRoot Condition="'$(AuthorityPluginOutputRoot)' == ''">$(SolutionDir)PluginBinaries\Authority</AuthorityPluginOutputRoot>
|
||||||
|
<AuthorityPluginOutputRoot Condition="'$(AuthorityPluginOutputRoot)' == '' and '$(SolutionDir)' == ''">$(MSBuildThisFileDirectory)PluginBinaries\Authority</AuthorityPluginOutputRoot>
|
||||||
|
<IsFeedserPlugin Condition="'$(IsFeedserPlugin)' == '' and $([System.String]::Copy('$(MSBuildProjectName)').StartsWith('StellaOps.Feedser.Source.'))">true</IsFeedserPlugin>
|
||||||
|
<IsFeedserPlugin Condition="'$(IsFeedserPlugin)' == '' and $([System.String]::Copy('$(MSBuildProjectName)').StartsWith('StellaOps.Feedser.Exporter.'))">true</IsFeedserPlugin>
|
||||||
|
<IsAuthorityPlugin Condition="'$(IsAuthorityPlugin)' == '' and $([System.String]::Copy('$(MSBuildProjectName)').StartsWith('StellaOps.Authority.Plugin.'))">true</IsAuthorityPlugin>
|
||||||
|
</PropertyGroup>
|
||||||
|
|
||||||
|
<ItemGroup>
|
||||||
|
<ProjectReference Update="../StellaOps.Plugin/StellaOps.Plugin.csproj">
|
||||||
|
<Private>false</Private>
|
||||||
|
<ExcludeAssets>runtime</ExcludeAssets>
|
||||||
|
</ProjectReference>
|
||||||
|
</ItemGroup>
|
||||||
|
|
||||||
|
<ItemGroup Condition="$([System.String]::Copy('$(MSBuildProjectName)').EndsWith('.Tests'))">
|
||||||
|
<PackageReference Include="coverlet.collector" Version="6.0.4" />
|
||||||
|
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.0" />
|
||||||
|
<PackageReference Include="Microsoft.AspNetCore.Mvc.Testing" Version="8.0.8" />
|
||||||
|
<PackageReference Include="Mongo2Go" Version="3.1.3" />
|
||||||
|
<PackageReference Include="xunit" Version="2.9.2" />
|
||||||
|
<PackageReference Include="xunit.runner.visualstudio" Version="2.8.2" />
|
||||||
|
<PackageReference Include="Microsoft.Extensions.TimeProvider.Testing" Version="8.4.0" />
|
||||||
|
<Compile Include="$(MSBuildThisFileDirectory)StellaOps.Feedser.Tests.Shared\AssemblyInfo.cs" Link="Shared\AssemblyInfo.cs" />
|
||||||
|
<Compile Include="$(MSBuildThisFileDirectory)StellaOps.Feedser.Tests.Shared\MongoFixtureCollection.cs" Link="Shared\MongoFixtureCollection.cs" />
|
||||||
|
<ProjectReference Include="$(MSBuildThisFileDirectory)StellaOps.Feedser.Testing\StellaOps.Feedser.Testing.csproj" />
|
||||||
|
<Using Include="StellaOps.Feedser.Testing" />
|
||||||
|
<Using Include="Xunit" />
|
||||||
|
</ItemGroup>
|
||||||
|
</Project>
|
||||||
33
src/Directory.Build.targets
Normal file
33
src/Directory.Build.targets
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
<Project>
|
||||||
|
<Target Name="FeedserCopyPluginArtifacts" AfterTargets="Build" Condition="'$(IsFeedserPlugin)' == 'true'">
|
||||||
|
<PropertyGroup>
|
||||||
|
<FeedserPluginOutputDirectory>$(FeedserPluginOutputRoot)\$(MSBuildProjectName)</FeedserPluginOutputDirectory>
|
||||||
|
</PropertyGroup>
|
||||||
|
|
||||||
|
<MakeDir Directories="$(FeedserPluginOutputDirectory)" />
|
||||||
|
|
||||||
|
<ItemGroup>
|
||||||
|
<FeedserPluginArtifacts Include="$(TargetPath)" />
|
||||||
|
<FeedserPluginArtifacts Include="$(TargetPath).deps.json" Condition="Exists('$(TargetPath).deps.json')" />
|
||||||
|
<FeedserPluginArtifacts Include="$(TargetDir)$(TargetName).pdb" Condition="Exists('$(TargetDir)$(TargetName).pdb')" />
|
||||||
|
</ItemGroup>
|
||||||
|
|
||||||
|
<Copy SourceFiles="@(FeedserPluginArtifacts)" DestinationFolder="$(FeedserPluginOutputDirectory)" SkipUnchangedFiles="true" />
|
||||||
|
</Target>
|
||||||
|
|
||||||
|
<Target Name="AuthorityCopyPluginArtifacts" AfterTargets="Build" Condition="'$(IsAuthorityPlugin)' == 'true'">
|
||||||
|
<PropertyGroup>
|
||||||
|
<AuthorityPluginOutputDirectory>$(AuthorityPluginOutputRoot)\$(MSBuildProjectName)</AuthorityPluginOutputDirectory>
|
||||||
|
</PropertyGroup>
|
||||||
|
|
||||||
|
<MakeDir Directories="$(AuthorityPluginOutputDirectory)" />
|
||||||
|
|
||||||
|
<ItemGroup>
|
||||||
|
<AuthorityPluginArtifacts Include="$(TargetPath)" />
|
||||||
|
<AuthorityPluginArtifacts Include="$(TargetPath).deps.json" Condition="Exists('$(TargetPath).deps.json')" />
|
||||||
|
<AuthorityPluginArtifacts Include="$(TargetDir)$(TargetName).pdb" Condition="Exists('$(TargetDir)$(TargetName).pdb')" />
|
||||||
|
</ItemGroup>
|
||||||
|
|
||||||
|
<Copy SourceFiles="@(AuthorityPluginArtifacts)" DestinationFolder="$(AuthorityPluginOutputDirectory)" SkipUnchangedFiles="true" />
|
||||||
|
</Target>
|
||||||
|
</Project>
|
||||||
139
src/FASTER_MODELING_AND_NORMALIZATION.md
Normal file
139
src/FASTER_MODELING_AND_NORMALIZATION.md
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
Here’s a quick, practical idea to make your version-range modeling cleaner and faster to query.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
# Rethinking `SemVerRangeBuilder` + MongoDB
|
||||||
|
|
||||||
|
**Problem (today):** Version normalization rules live as a nested object (and often as a bespoke structure per source). This can force awkward `$objectToArray`, `$map`, and conditional logic in pipelines when you need to:
|
||||||
|
|
||||||
|
* match “is version X affected?”
|
||||||
|
* flatten ranges for analytics
|
||||||
|
* de-duplicate across sources
|
||||||
|
|
||||||
|
**Proposal:** Store *normalized version rules as an embedded collection (array of small docs)* instead of a single nested object.
|
||||||
|
|
||||||
|
## Minimal background
|
||||||
|
|
||||||
|
* **SemVer normalization**: converting all source-specific version notations into a single, strict representation (e.g., `>=1.2.3 <2.0.0`, exact pins, wildcards).
|
||||||
|
* **Embedded collection**: an array of consistently shaped items inside the parent doc—great for `$unwind`-centric analytics and direct matches.
|
||||||
|
|
||||||
|
## Suggested shape
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"_id": "VULN-123",
|
||||||
|
"packageId": "pkg:npm/lodash",
|
||||||
|
"source": "NVD",
|
||||||
|
"normalizedVersions": [
|
||||||
|
{
|
||||||
|
"scheme": "semver",
|
||||||
|
"type": "range", // "range" | "exact" | "lt" | "lte" | "gt" | "gte"
|
||||||
|
"min": "1.2.3", // optional
|
||||||
|
"minInclusive": true, // optional
|
||||||
|
"max": "2.0.0", // optional
|
||||||
|
"maxInclusive": false, // optional
|
||||||
|
"notes": "from GHSA GHSA-xxxx" // traceability
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"scheme": "semver",
|
||||||
|
"type": "exact",
|
||||||
|
"value": "1.5.0"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": { "ingestedAt": "2025-10-10T12:00:00Z" }
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Why this helps
|
||||||
|
|
||||||
|
* **Simpler queries**
|
||||||
|
|
||||||
|
* *Is v affected?*
|
||||||
|
|
||||||
|
```js
|
||||||
|
db.vulns.aggregate([
|
||||||
|
{ $match: { packageId: "pkg:npm/lodash" } },
|
||||||
|
{ $unwind: "$normalizedVersions" },
|
||||||
|
{ $match: {
|
||||||
|
$or: [
|
||||||
|
{ "normalizedVersions.type": "exact", "normalizedVersions.value": "1.5.0" },
|
||||||
|
{ "normalizedVersions.type": "range",
|
||||||
|
"normalizedVersions.min": { $lte: "1.5.0" },
|
||||||
|
"normalizedVersions.max": { $gt: "1.5.0" } }
|
||||||
|
]
|
||||||
|
}},
|
||||||
|
{ $project: { _id: 1 } }
|
||||||
|
])
|
||||||
|
```
|
||||||
|
* No `$objectToArray`, fewer `$cond`s.
|
||||||
|
|
||||||
|
* **Cheaper storage**
|
||||||
|
|
||||||
|
* Arrays of tiny docs compress well and avoid wide nested structures with many nulls/keys.
|
||||||
|
|
||||||
|
* **Easier dedup/merge**
|
||||||
|
|
||||||
|
* `$unwind` → normalize → `$group` by `{scheme,type,min,max,value}` to collapse equivalent rules across sources.
|
||||||
|
|
||||||
|
## Builder changes (`SemVerRangeBuilder`)
|
||||||
|
|
||||||
|
* **Emit items, not a monolith**: have the builder return `IEnumerable<NormalizedVersionRule>`.
|
||||||
|
* **Normalize early**: resolve “aliases” (`1.2.x`, `^1.2.3`, distro styles) into canonical `(type,min,max,…)` before persistence.
|
||||||
|
* **Traceability**: include `notes`/`sourceRef` on each rule so you can re-materialize provenance during audits.
|
||||||
|
|
||||||
|
### C# sketch
|
||||||
|
|
||||||
|
```csharp
|
||||||
|
public record NormalizedVersionRule(
|
||||||
|
string Scheme, // "semver"
|
||||||
|
string Type, // "range" | "exact" | ...
|
||||||
|
string? Min = null,
|
||||||
|
bool? MinInclusive = null,
|
||||||
|
string? Max = null,
|
||||||
|
bool? MaxInclusive = null,
|
||||||
|
string? Value = null,
|
||||||
|
string? Notes = null
|
||||||
|
);
|
||||||
|
|
||||||
|
public static class SemVerRangeBuilder
|
||||||
|
{
|
||||||
|
public static IEnumerable<NormalizedVersionRule> Build(string raw)
|
||||||
|
{
|
||||||
|
// parse raw (^1.2.3, 1.2.x, <=2.0.0, etc.)
|
||||||
|
// yield canonical rules:
|
||||||
|
yield return new NormalizedVersionRule(
|
||||||
|
Scheme: "semver",
|
||||||
|
Type: "range",
|
||||||
|
Min: "1.2.3",
|
||||||
|
MinInclusive: true,
|
||||||
|
Max: "2.0.0",
|
||||||
|
MaxInclusive: false,
|
||||||
|
Notes: "nvd:ABC-123"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Aggregation patterns you unlock
|
||||||
|
|
||||||
|
* **Fast “affected version” lookups** via `$unwind + $match` (can complement with a computed sort key).
|
||||||
|
* **Rollups**: count of vulns per `(major,minor)` by mapping each rule into bucketed segments.
|
||||||
|
* **Cross-source reconciliation**: group identical rules to de-duplicate.
|
||||||
|
|
||||||
|
## Indexing tips
|
||||||
|
|
||||||
|
* Compound index on `{ packageId: 1, "normalizedVersions.scheme": 1, "normalizedVersions.type": 1 }`.
|
||||||
|
* If lookups by exact value are common: add a sparse index on `"normalizedVersions.value"`.
|
||||||
|
|
||||||
|
## Migration path (safe + incremental)
|
||||||
|
|
||||||
|
1. **Dual-write**: keep old nested object while writing the new `normalizedVersions` array.
|
||||||
|
2. **Backfill** existing docs with a one-time script using your current builder.
|
||||||
|
3. **Cutover** queries/aggregations to the new path (behind a feature flag).
|
||||||
|
4. **Clean up** old field after soak.
|
||||||
|
|
||||||
|
If you want, I can draft:
|
||||||
|
|
||||||
|
* a one-time Mongo backfill script,
|
||||||
|
* the new EF/Mongo C# POCOs, and
|
||||||
|
* a test matrix (edge cases: prerelease tags, build metadata, `0.*` semantics, distro-style ranges).
|
||||||
20
src/StellaOps.Authority/AGENTS.md
Normal file
20
src/StellaOps.Authority/AGENTS.md
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Authority Host Crew
|
||||||
|
|
||||||
|
## Mission
|
||||||
|
Own the StellaOps Authority host service: ASP.NET minimal API, OpenIddict flows, plugin loading, storage orchestration, and cross-cutting security controls (rate limiting, audit logging, revocation exports).
|
||||||
|
|
||||||
|
## Teams On Call
|
||||||
|
- Team 2 (Authority Core)
|
||||||
|
- Team 8 (Security Guild) — collaborates on security-sensitive endpoints
|
||||||
|
|
||||||
|
## Operating Principles
|
||||||
|
- Deterministic responses, structured logging, cancellation-ready handlers.
|
||||||
|
- Use `StellaOps.Cryptography` abstractions for any crypto operations.
|
||||||
|
- Every change updates `TASKS.md` and related docs/tests.
|
||||||
|
- Coordinate with plugin teams before altering plugin-facing contracts.
|
||||||
|
|
||||||
|
## Key Directories
|
||||||
|
- `src/StellaOps.Authority/` — host app
|
||||||
|
- `src/StellaOps.Authority.Tests/` — integration/unit tests
|
||||||
|
- `src/StellaOps.Authority.Storage.Mongo/` — data access helpers
|
||||||
|
- `src/StellaOps.Authority.Plugin.Standard/` — default identity provider plugin
|
||||||
@@ -0,0 +1,75 @@
|
|||||||
|
using System;
|
||||||
|
using System.Net;
|
||||||
|
using StellaOps.Auth.Abstractions;
|
||||||
|
using Xunit;
|
||||||
|
|
||||||
|
namespace StellaOps.Auth.Abstractions.Tests;
|
||||||
|
|
||||||
|
public class NetworkMaskMatcherTests
|
||||||
|
{
|
||||||
|
[Fact]
|
||||||
|
public void Parse_SingleAddress_YieldsHostMask()
|
||||||
|
{
|
||||||
|
var mask = NetworkMask.Parse("192.168.1.42");
|
||||||
|
|
||||||
|
Assert.Equal(32, mask.PrefixLength);
|
||||||
|
Assert.True(mask.Contains(IPAddress.Parse("192.168.1.42")));
|
||||||
|
Assert.False(mask.Contains(IPAddress.Parse("192.168.1.43")));
|
||||||
|
}
|
||||||
|
|
||||||
|
[Fact]
|
||||||
|
public void Parse_Cidr_NormalisesHostBits()
|
||||||
|
{
|
||||||
|
var mask = NetworkMask.Parse("10.0.15.9/20");
|
||||||
|
|
||||||
|
Assert.Equal("10.0.0.0/20", mask.ToString());
|
||||||
|
Assert.True(mask.Contains(IPAddress.Parse("10.0.8.1")));
|
||||||
|
Assert.False(mask.Contains(IPAddress.Parse("10.0.32.1")));
|
||||||
|
}
|
||||||
|
|
||||||
|
[Fact]
|
||||||
|
public void Contains_ReturnsFalse_ForMismatchedAddressFamily()
|
||||||
|
{
|
||||||
|
var mask = NetworkMask.Parse("192.168.0.0/16");
|
||||||
|
|
||||||
|
Assert.False(mask.Contains(IPAddress.IPv6Loopback));
|
||||||
|
}
|
||||||
|
|
||||||
|
[Fact]
|
||||||
|
public void Matcher_AllowsAll_WhenStarProvided()
|
||||||
|
{
|
||||||
|
var matcher = new NetworkMaskMatcher(new[] { "*" });
|
||||||
|
|
||||||
|
Assert.False(matcher.IsEmpty);
|
||||||
|
Assert.True(matcher.IsAllowed(IPAddress.Parse("203.0.113.10")));
|
||||||
|
Assert.True(matcher.IsAllowed(IPAddress.IPv6Loopback));
|
||||||
|
}
|
||||||
|
|
||||||
|
[Fact]
|
||||||
|
public void Matcher_ReturnsFalse_WhenNoMasksConfigured()
|
||||||
|
{
|
||||||
|
var matcher = new NetworkMaskMatcher(Array.Empty<string>());
|
||||||
|
|
||||||
|
Assert.True(matcher.IsEmpty);
|
||||||
|
Assert.False(matcher.IsAllowed(IPAddress.Parse("127.0.0.1")));
|
||||||
|
Assert.False(matcher.IsAllowed(null));
|
||||||
|
}
|
||||||
|
|
||||||
|
[Fact]
|
||||||
|
public void Matcher_SupportsIpv4AndIpv6Masks()
|
||||||
|
{
|
||||||
|
var matcher = new NetworkMaskMatcher(new[] { "192.168.0.0/24", "::1/128" });
|
||||||
|
|
||||||
|
Assert.True(matcher.IsAllowed(IPAddress.Parse("192.168.0.42")));
|
||||||
|
Assert.False(matcher.IsAllowed(IPAddress.Parse("10.0.0.1")));
|
||||||
|
Assert.True(matcher.IsAllowed(IPAddress.IPv6Loopback));
|
||||||
|
Assert.False(matcher.IsAllowed(IPAddress.IPv6Any));
|
||||||
|
}
|
||||||
|
|
||||||
|
[Fact]
|
||||||
|
public void Matcher_Throws_ForInvalidEntries()
|
||||||
|
{
|
||||||
|
var exception = Assert.Throws<FormatException>(() => new NetworkMaskMatcher(new[] { "invalid-mask" }));
|
||||||
|
Assert.Contains("invalid-mask", exception.Message, StringComparison.OrdinalIgnoreCase);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
<Project Sdk="Microsoft.NET.Sdk">
|
||||||
|
<PropertyGroup>
|
||||||
|
<TargetFramework>net10.0</TargetFramework>
|
||||||
|
<ImplicitUsings>enable</ImplicitUsings>
|
||||||
|
<Nullable>enable</Nullable>
|
||||||
|
</PropertyGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<ProjectReference Include="..\StellaOps.Auth.Abstractions\StellaOps.Auth.Abstractions.csproj" />
|
||||||
|
</ItemGroup>
|
||||||
|
</Project>
|
||||||
@@ -0,0 +1,74 @@
|
|||||||
|
using System;
|
||||||
|
using System.Linq;
|
||||||
|
using System.Security.Claims;
|
||||||
|
using StellaOps.Auth.Abstractions;
|
||||||
|
using Xunit;
|
||||||
|
|
||||||
|
namespace StellaOps.Auth.Abstractions.Tests;
|
||||||
|
|
||||||
|
public class StellaOpsPrincipalBuilderTests
|
||||||
|
{
|
||||||
|
[Fact]
|
||||||
|
public void NormalizedScopes_AreSortedDeduplicatedLowerCased()
|
||||||
|
{
|
||||||
|
var builder = new StellaOpsPrincipalBuilder()
|
||||||
|
.WithScopes(new[] { "Feedser.Jobs.Trigger", " feedser.jobs.trigger ", "AUTHORITY.USERS.MANAGE" })
|
||||||
|
.WithAudiences(new[] { " api://feedser ", "api://cli", "api://feedser" });
|
||||||
|
|
||||||
|
Assert.Equal(
|
||||||
|
new[] { "authority.users.manage", "feedser.jobs.trigger" },
|
||||||
|
builder.NormalizedScopes);
|
||||||
|
|
||||||
|
Assert.Equal(
|
||||||
|
new[] { "api://cli", "api://feedser" },
|
||||||
|
builder.Audiences);
|
||||||
|
}
|
||||||
|
|
||||||
|
[Fact]
|
||||||
|
public void Build_ConstructsClaimsPrincipalWithNormalisedValues()
|
||||||
|
{
|
||||||
|
var now = DateTimeOffset.UtcNow;
|
||||||
|
var builder = new StellaOpsPrincipalBuilder()
|
||||||
|
.WithSubject(" user-1 ")
|
||||||
|
.WithClientId(" cli-01 ")
|
||||||
|
.WithTenant(" default ")
|
||||||
|
.WithName(" Jane Doe ")
|
||||||
|
.WithIdentityProvider(" internal ")
|
||||||
|
.WithSessionId(" session-123 ")
|
||||||
|
.WithTokenId(Guid.NewGuid().ToString("N"))
|
||||||
|
.WithAuthenticationMethod("password")
|
||||||
|
.WithAuthenticationType(" custom ")
|
||||||
|
.WithScopes(new[] { "Feedser.Jobs.Trigger", "AUTHORITY.USERS.MANAGE" })
|
||||||
|
.WithAudience(" api://feedser ")
|
||||||
|
.WithIssuedAt(now)
|
||||||
|
.WithExpires(now.AddMinutes(5))
|
||||||
|
.AddClaim(" custom ", " value ");
|
||||||
|
|
||||||
|
var principal = builder.Build();
|
||||||
|
var identity = Assert.IsType<ClaimsIdentity>(principal.Identity);
|
||||||
|
|
||||||
|
Assert.Equal("custom", identity.AuthenticationType);
|
||||||
|
Assert.Equal("Jane Doe", identity.Name);
|
||||||
|
Assert.Equal("user-1", principal.FindFirstValue(StellaOpsClaimTypes.Subject));
|
||||||
|
Assert.Equal("cli-01", principal.FindFirstValue(StellaOpsClaimTypes.ClientId));
|
||||||
|
Assert.Equal("default", principal.FindFirstValue(StellaOpsClaimTypes.Tenant));
|
||||||
|
Assert.Equal("internal", principal.FindFirstValue(StellaOpsClaimTypes.IdentityProvider));
|
||||||
|
Assert.Equal("session-123", principal.FindFirstValue(StellaOpsClaimTypes.SessionId));
|
||||||
|
Assert.Equal("value", principal.FindFirstValue("custom"));
|
||||||
|
|
||||||
|
var scopeClaims = principal.Claims.Where(claim => claim.Type == StellaOpsClaimTypes.ScopeItem).Select(claim => claim.Value).ToArray();
|
||||||
|
Assert.Equal(new[] { "authority.users.manage", "feedser.jobs.trigger" }, scopeClaims);
|
||||||
|
|
||||||
|
var scopeList = principal.FindFirstValue(StellaOpsClaimTypes.Scope);
|
||||||
|
Assert.Equal("authority.users.manage feedser.jobs.trigger", scopeList);
|
||||||
|
|
||||||
|
var audienceClaims = principal.Claims.Where(claim => claim.Type == StellaOpsClaimTypes.Audience).Select(claim => claim.Value).ToArray();
|
||||||
|
Assert.Equal(new[] { "api://feedser" }, audienceClaims);
|
||||||
|
|
||||||
|
var issuedAt = principal.FindFirstValue("iat");
|
||||||
|
Assert.Equal(now.ToUnixTimeSeconds().ToString(), issuedAt);
|
||||||
|
|
||||||
|
var expires = principal.FindFirstValue("exp");
|
||||||
|
Assert.Equal(now.AddMinutes(5).ToUnixTimeSeconds().ToString(), expires);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,53 @@
|
|||||||
|
using Microsoft.AspNetCore.Http;
|
||||||
|
using Microsoft.AspNetCore.Http.HttpResults;
|
||||||
|
using Microsoft.AspNetCore.Mvc;
|
||||||
|
using StellaOps.Auth.Abstractions;
|
||||||
|
using Xunit;
|
||||||
|
|
||||||
|
namespace StellaOps.Auth.Abstractions.Tests;
|
||||||
|
|
||||||
|
public class StellaOpsProblemResultFactoryTests
|
||||||
|
{
|
||||||
|
[Fact]
|
||||||
|
public void AuthenticationRequired_ReturnsCanonicalProblem()
|
||||||
|
{
|
||||||
|
var result = StellaOpsProblemResultFactory.AuthenticationRequired(instance: "/jobs");
|
||||||
|
|
||||||
|
Assert.Equal(StatusCodes.Status401Unauthorized, result.StatusCode);
|
||||||
|
var details = Assert.IsType<ProblemDetails>(result.ProblemDetails);
|
||||||
|
Assert.Equal("https://docs.stella-ops.org/problems/authentication-required", details.Type);
|
||||||
|
Assert.Equal("Authentication required", details.Title);
|
||||||
|
Assert.Equal("/jobs", details.Instance);
|
||||||
|
Assert.Equal("unauthorized", details.Extensions["error"]);
|
||||||
|
Assert.Equal(details.Detail, details.Extensions["error_description"]);
|
||||||
|
}
|
||||||
|
|
||||||
|
[Fact]
|
||||||
|
public void InvalidToken_UsesProvidedDetail()
|
||||||
|
{
|
||||||
|
var result = StellaOpsProblemResultFactory.InvalidToken("expired refresh token");
|
||||||
|
|
||||||
|
var details = Assert.IsType<ProblemDetails>(result.ProblemDetails);
|
||||||
|
Assert.Equal(StatusCodes.Status401Unauthorized, result.StatusCode);
|
||||||
|
Assert.Equal("expired refresh token", details.Detail);
|
||||||
|
Assert.Equal("invalid_token", details.Extensions["error"]);
|
||||||
|
}
|
||||||
|
|
||||||
|
[Fact]
|
||||||
|
public void InsufficientScope_AddsScopeExtensions()
|
||||||
|
{
|
||||||
|
var result = StellaOpsProblemResultFactory.InsufficientScope(
|
||||||
|
new[] { StellaOpsScopes.FeedserJobsTrigger },
|
||||||
|
new[] { StellaOpsScopes.AuthorityUsersManage },
|
||||||
|
instance: "/jobs/trigger");
|
||||||
|
|
||||||
|
Assert.Equal(StatusCodes.Status403Forbidden, result.StatusCode);
|
||||||
|
|
||||||
|
var details = Assert.IsType<ProblemDetails>(result.ProblemDetails);
|
||||||
|
Assert.Equal("https://docs.stella-ops.org/problems/insufficient-scope", details.Type);
|
||||||
|
Assert.Equal("insufficient_scope", details.Extensions["error"]);
|
||||||
|
Assert.Equal(new[] { StellaOpsScopes.FeedserJobsTrigger }, Assert.IsType<string[]>(details.Extensions["required_scopes"]));
|
||||||
|
Assert.Equal(new[] { StellaOpsScopes.AuthorityUsersManage }, Assert.IsType<string[]>(details.Extensions["granted_scopes"]));
|
||||||
|
Assert.Equal("/jobs/trigger", details.Instance);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,56 @@
|
|||||||
|
using System;
|
||||||
|
using System.Collections.Generic;
|
||||||
|
using System.Reflection;
|
||||||
|
|
||||||
|
namespace StellaOps.Auth;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Canonical telemetry metadata for the StellaOps Authority stack.
|
||||||
|
/// </summary>
|
||||||
|
public static class AuthorityTelemetry
|
||||||
|
{
|
||||||
|
/// <summary>
|
||||||
|
/// service.name resource attribute recorded by Authority components.
|
||||||
|
/// </summary>
|
||||||
|
public const string ServiceName = "stellaops-authority";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// service.namespace resource attribute aligning Authority with other StellaOps services.
|
||||||
|
/// </summary>
|
||||||
|
public const string ServiceNamespace = "stellaops";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Activity source identifier used by Authority instrumentation.
|
||||||
|
/// </summary>
|
||||||
|
public const string ActivitySourceName = "StellaOps.Authority";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Meter name used by Authority instrumentation.
|
||||||
|
/// </summary>
|
||||||
|
public const string MeterName = "StellaOps.Authority";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Builds the default set of resource attributes (service name/namespace/version).
|
||||||
|
/// </summary>
|
||||||
|
/// <param name="assembly">Optional assembly used to resolve the service version.</param>
|
||||||
|
public static IReadOnlyDictionary<string, object> BuildDefaultResourceAttributes(Assembly? assembly = null)
|
||||||
|
{
|
||||||
|
var version = ResolveServiceVersion(assembly);
|
||||||
|
|
||||||
|
return new Dictionary<string, object>(StringComparer.OrdinalIgnoreCase)
|
||||||
|
{
|
||||||
|
["service.name"] = ServiceName,
|
||||||
|
["service.namespace"] = ServiceNamespace,
|
||||||
|
["service.version"] = version
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Resolves the service version string from the provided assembly (defaults to the Authority telemetry assembly).
|
||||||
|
/// </summary>
|
||||||
|
public static string ResolveServiceVersion(Assembly? assembly = null)
|
||||||
|
{
|
||||||
|
assembly ??= typeof(AuthorityTelemetry).Assembly;
|
||||||
|
return assembly.GetName().Version?.ToString() ?? "0.0.0";
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,181 @@
|
|||||||
|
using System;
|
||||||
|
using System.Globalization;
|
||||||
|
using System.Net;
|
||||||
|
using System.Net.Sockets;
|
||||||
|
|
||||||
|
namespace StellaOps.Auth.Abstractions;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Represents an IP network expressed in CIDR notation.
|
||||||
|
/// </summary>
|
||||||
|
public readonly record struct NetworkMask
|
||||||
|
{
|
||||||
|
private readonly IPAddress address;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Initialises a new <see cref="NetworkMask"/>.
|
||||||
|
/// </summary>
|
||||||
|
/// <param name="network">Canonical network address with host bits zeroed.</param>
|
||||||
|
/// <param name="prefixLength">Prefix length (0-32 for IPv4, 0-128 for IPv6).</param>
|
||||||
|
public NetworkMask(IPAddress network, int prefixLength)
|
||||||
|
{
|
||||||
|
ArgumentNullException.ThrowIfNull(network);
|
||||||
|
|
||||||
|
var maxPrefix = GetMaxPrefix(network);
|
||||||
|
if (prefixLength is < 0 or > 128 || prefixLength > maxPrefix)
|
||||||
|
{
|
||||||
|
throw new ArgumentOutOfRangeException(nameof(prefixLength), $"Prefix length must be between 0 and {maxPrefix} for {network.AddressFamily}.");
|
||||||
|
}
|
||||||
|
|
||||||
|
address = Normalize(network, prefixLength);
|
||||||
|
PrefixLength = prefixLength;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Canonical network address with host bits zeroed.
|
||||||
|
/// </summary>
|
||||||
|
public IPAddress Network => address;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Prefix length.
|
||||||
|
/// </summary>
|
||||||
|
public int PrefixLength { get; }
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Attempts to parse the supplied value as CIDR notation or a single IP address.
|
||||||
|
/// </summary>
|
||||||
|
/// <exception cref="FormatException">Thrown when the input is not recognised.</exception>
|
||||||
|
public static NetworkMask Parse(string value)
|
||||||
|
{
|
||||||
|
if (!TryParse(value, out var mask))
|
||||||
|
{
|
||||||
|
throw new FormatException($"'{value}' is not a valid CIDR or IP address.");
|
||||||
|
}
|
||||||
|
|
||||||
|
return mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Attempts to parse the supplied value as CIDR notation or a single IP address.
|
||||||
|
/// </summary>
|
||||||
|
public static bool TryParse(string? value, out NetworkMask mask)
|
||||||
|
{
|
||||||
|
mask = default;
|
||||||
|
|
||||||
|
if (string.IsNullOrWhiteSpace(value))
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
var trimmed = value.Trim();
|
||||||
|
var slashIndex = trimmed.IndexOf('/', StringComparison.Ordinal);
|
||||||
|
|
||||||
|
if (slashIndex < 0)
|
||||||
|
{
|
||||||
|
if (!IPAddress.TryParse(trimmed, out var singleAddress))
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultPrefix = singleAddress.AddressFamily == AddressFamily.InterNetwork ? 32 : 128;
|
||||||
|
mask = new NetworkMask(singleAddress, defaultPrefix);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
var addressPart = trimmed[..slashIndex];
|
||||||
|
var prefixPart = trimmed[(slashIndex + 1)..];
|
||||||
|
|
||||||
|
if (!IPAddress.TryParse(addressPart, out var networkAddress))
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!int.TryParse(prefixPart, NumberStyles.Integer, CultureInfo.InvariantCulture, out var prefixLength))
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
mask = new NetworkMask(networkAddress, prefixLength);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
catch (ArgumentOutOfRangeException)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Determines whether the provided address belongs to this network.
|
||||||
|
/// </summary>
|
||||||
|
public bool Contains(IPAddress address)
|
||||||
|
{
|
||||||
|
ArgumentNullException.ThrowIfNull(address);
|
||||||
|
|
||||||
|
if (address.AddressFamily != this.address.AddressFamily)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (PrefixLength == 0)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
var targetBytes = address.GetAddressBytes();
|
||||||
|
var networkBytes = this.address.GetAddressBytes();
|
||||||
|
|
||||||
|
var fullBytes = PrefixLength / 8;
|
||||||
|
for (var i = 0; i < fullBytes; i++)
|
||||||
|
{
|
||||||
|
if (targetBytes[i] != networkBytes[i])
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var remainder = PrefixLength % 8;
|
||||||
|
if (remainder == 0)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
var mask = (byte)(0xFF << (8 - remainder));
|
||||||
|
return (targetBytes[fullBytes] & mask) == networkBytes[fullBytes];
|
||||||
|
}
|
||||||
|
|
||||||
|
private static int GetMaxPrefix(IPAddress address)
|
||||||
|
=> address.AddressFamily == AddressFamily.InterNetwork ? 32 :
|
||||||
|
address.AddressFamily == AddressFamily.InterNetworkV6 ? 128 :
|
||||||
|
throw new ArgumentOutOfRangeException(nameof(address), $"Unsupported address family {address.AddressFamily}.");
|
||||||
|
|
||||||
|
private static IPAddress Normalize(IPAddress address, int prefixLength)
|
||||||
|
{
|
||||||
|
var bytes = address.GetAddressBytes();
|
||||||
|
|
||||||
|
var fullBytes = prefixLength / 8;
|
||||||
|
var remainder = prefixLength % 8;
|
||||||
|
|
||||||
|
if (fullBytes < bytes.Length)
|
||||||
|
{
|
||||||
|
if (remainder > 0)
|
||||||
|
{
|
||||||
|
var mask = (byte)(0xFF << (8 - remainder));
|
||||||
|
bytes[fullBytes] &= mask;
|
||||||
|
fullBytes++;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (var index = fullBytes; index < bytes.Length; index++)
|
||||||
|
{
|
||||||
|
bytes[index] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return new IPAddress(bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <inheritdoc />
|
||||||
|
public override string ToString()
|
||||||
|
=> $"{Network}/{PrefixLength}";
|
||||||
|
}
|
||||||
@@ -0,0 +1,139 @@
|
|||||||
|
using System;
|
||||||
|
using System.Collections.Generic;
|
||||||
|
using System.Linq;
|
||||||
|
using System.Net;
|
||||||
|
|
||||||
|
namespace StellaOps.Auth.Abstractions;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Evaluates remote addresses against configured network masks.
|
||||||
|
/// </summary>
|
||||||
|
public sealed class NetworkMaskMatcher
|
||||||
|
{
|
||||||
|
private readonly NetworkMask[] masks;
|
||||||
|
private readonly bool matchAll;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Creates a matcher from raw CIDR strings.
|
||||||
|
/// </summary>
|
||||||
|
/// <param name="values">Sequence of CIDR entries or IP addresses.</param>
|
||||||
|
/// <exception cref="FormatException">Thrown when a value cannot be parsed.</exception>
|
||||||
|
public NetworkMaskMatcher(IEnumerable<string>? values)
|
||||||
|
: this(Parse(values))
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Creates a matcher from already parsed masks.
|
||||||
|
/// </summary>
|
||||||
|
/// <param name="masks">Sequence of network masks.</param>
|
||||||
|
public NetworkMaskMatcher(IEnumerable<NetworkMask> masks)
|
||||||
|
{
|
||||||
|
ArgumentNullException.ThrowIfNull(masks);
|
||||||
|
|
||||||
|
var unique = new HashSet<NetworkMask>();
|
||||||
|
foreach (var mask in masks)
|
||||||
|
{
|
||||||
|
unique.Add(mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.masks = unique.ToArray();
|
||||||
|
matchAll = this.masks.Length == 1 && this.masks[0].PrefixLength == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
private NetworkMaskMatcher((bool MatchAll, NetworkMask[] Masks) parsed)
|
||||||
|
{
|
||||||
|
matchAll = parsed.MatchAll;
|
||||||
|
masks = parsed.Masks;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Gets a matcher that allows every address.
|
||||||
|
/// </summary>
|
||||||
|
public static NetworkMaskMatcher AllowAll { get; } = new((true, Array.Empty<NetworkMask>()));
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Gets a matcher that denies every address (no masks configured).
|
||||||
|
/// </summary>
|
||||||
|
public static NetworkMaskMatcher DenyAll { get; } = new((false, Array.Empty<NetworkMask>()));
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Indicates whether this matcher has no masks configured and does not allow all.
|
||||||
|
/// </summary>
|
||||||
|
public bool IsEmpty => !matchAll && masks.Length == 0;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Returns the configured masks.
|
||||||
|
/// </summary>
|
||||||
|
public IReadOnlyList<NetworkMask> Masks => masks;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Checks whether the provided address matches any of the configured masks.
|
||||||
|
/// </summary>
|
||||||
|
/// <param name="address">Remote address to test.</param>
|
||||||
|
/// <returns><c>true</c> when the address is allowed.</returns>
|
||||||
|
public bool IsAllowed(IPAddress? address)
|
||||||
|
{
|
||||||
|
if (address is null)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (matchAll)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (masks.Length == 0)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
foreach (var mask in masks)
|
||||||
|
{
|
||||||
|
if (mask.Contains(address))
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static (bool MatchAll, NetworkMask[] Masks) Parse(IEnumerable<string>? values)
|
||||||
|
{
|
||||||
|
if (values is null)
|
||||||
|
{
|
||||||
|
return (false, Array.Empty<NetworkMask>());
|
||||||
|
}
|
||||||
|
|
||||||
|
var unique = new HashSet<NetworkMask>();
|
||||||
|
|
||||||
|
foreach (var raw in values)
|
||||||
|
{
|
||||||
|
if (string.IsNullOrWhiteSpace(raw))
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
var trimmed = raw.Trim();
|
||||||
|
|
||||||
|
if (IsAllowAll(trimmed))
|
||||||
|
{
|
||||||
|
return (true, Array.Empty<NetworkMask>());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!NetworkMask.TryParse(trimmed, out var mask))
|
||||||
|
{
|
||||||
|
throw new FormatException($"'{trimmed}' is not a valid network mask or IP address.");
|
||||||
|
}
|
||||||
|
|
||||||
|
unique.Add(mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
return (false, unique.ToArray());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static bool IsAllowAll(string value)
|
||||||
|
=> value is "*" or "0.0.0.0/0" or "::/0";
|
||||||
|
}
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
# StellaOps.Auth.Abstractions
|
||||||
|
|
||||||
|
Shared authentication primitives for StellaOps services:
|
||||||
|
|
||||||
|
- Scope and claim constants aligned with StellaOps Authority.
|
||||||
|
- Deterministic `PrincipalBuilder` and `ProblemResultFactory` helpers.
|
||||||
|
- Utility types used by resource servers, plug-ins, and client libraries.
|
||||||
|
|
||||||
|
These abstractions are referenced by `StellaOps.Auth.ServerIntegration` and `StellaOps.Auth.Client`. Review `docs/dev/32_AUTH_CLIENT_GUIDE.md` for downstream integration patterns.
|
||||||
@@ -0,0 +1,37 @@
|
|||||||
|
<Project Sdk="Microsoft.NET.Sdk">
|
||||||
|
<PropertyGroup>
|
||||||
|
<TargetFramework>net10.0</TargetFramework>
|
||||||
|
<LangVersion>preview</LangVersion>
|
||||||
|
<ImplicitUsings>enable</ImplicitUsings>
|
||||||
|
<Nullable>enable</Nullable>
|
||||||
|
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
|
||||||
|
</PropertyGroup>
|
||||||
|
<PropertyGroup>
|
||||||
|
<PackageId>StellaOps.Auth.Abstractions</PackageId>
|
||||||
|
<Description>Core authority authentication abstractions, scopes, and helpers for StellaOps services.</Description>
|
||||||
|
<Authors>StellaOps</Authors>
|
||||||
|
<Company>StellaOps</Company>
|
||||||
|
<PackageLicenseExpression>AGPL-3.0-or-later</PackageLicenseExpression>
|
||||||
|
<PackageProjectUrl>https://stella-ops.org</PackageProjectUrl>
|
||||||
|
<RepositoryUrl>https://git.stella-ops.org/stella-ops.org/git.stella-ops.org</RepositoryUrl>
|
||||||
|
<RepositoryType>git</RepositoryType>
|
||||||
|
<PublishRepositoryUrl>true</PublishRepositoryUrl>
|
||||||
|
<EmbedUntrackedSources>true</EmbedUntrackedSources>
|
||||||
|
<IncludeSymbols>true</IncludeSymbols>
|
||||||
|
<SymbolPackageFormat>snupkg</SymbolPackageFormat>
|
||||||
|
<PackageTags>stellaops;authentication;authority;oauth2</PackageTags>
|
||||||
|
<GenerateDocumentationFile>true</GenerateDocumentationFile>
|
||||||
|
<NoWarn>$(NoWarn);1591</NoWarn>
|
||||||
|
<PackageReadmeFile>README.NuGet.md</PackageReadmeFile>
|
||||||
|
<VersionPrefix>1.0.0-preview.1</VersionPrefix>
|
||||||
|
</PropertyGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<FrameworkReference Include="Microsoft.AspNetCore.App" />
|
||||||
|
</ItemGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<PackageReference Include="Microsoft.SourceLink.GitLab" Version="8.0.0" PrivateAssets="All" />
|
||||||
|
</ItemGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<None Include="README.NuGet.md" Pack="true" PackagePath="" />
|
||||||
|
</ItemGroup>
|
||||||
|
</Project>
|
||||||
@@ -0,0 +1,22 @@
|
|||||||
|
namespace StellaOps.Auth.Abstractions;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Default authentication constants used by StellaOps resource servers and clients.
|
||||||
|
/// </summary>
|
||||||
|
public static class StellaOpsAuthenticationDefaults
|
||||||
|
{
|
||||||
|
/// <summary>
|
||||||
|
/// Default authentication scheme for StellaOps bearer tokens.
|
||||||
|
/// </summary>
|
||||||
|
public const string AuthenticationScheme = "StellaOpsBearer";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Logical authentication type attached to <see cref="System.Security.Claims.ClaimsIdentity"/>.
|
||||||
|
/// </summary>
|
||||||
|
public const string AuthenticationType = "StellaOps";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Policy prefix applied to named authorization policies.
|
||||||
|
/// </summary>
|
||||||
|
public const string PolicyPrefix = "StellaOps.Policy.";
|
||||||
|
}
|
||||||
@@ -0,0 +1,57 @@
|
|||||||
|
namespace StellaOps.Auth.Abstractions;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Canonical claim type identifiers used across StellaOps services.
|
||||||
|
/// </summary>
|
||||||
|
public static class StellaOpsClaimTypes
|
||||||
|
{
|
||||||
|
/// <summary>
|
||||||
|
/// Subject identifier claim (maps to <c>sub</c> in JWTs).
|
||||||
|
/// </summary>
|
||||||
|
public const string Subject = "sub";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// StellaOps tenant identifier claim (multi-tenant deployments).
|
||||||
|
/// </summary>
|
||||||
|
public const string Tenant = "stellaops:tenant";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// OAuth2/OIDC client identifier claim (maps to <c>client_id</c>).
|
||||||
|
/// </summary>
|
||||||
|
public const string ClientId = "client_id";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Unique token identifier claim (maps to <c>jti</c>).
|
||||||
|
/// </summary>
|
||||||
|
public const string TokenId = "jti";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Authentication method reference claim (<c>amr</c>).
|
||||||
|
/// </summary>
|
||||||
|
public const string AuthenticationMethod = "amr";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Space separated scope list (<c>scope</c>).
|
||||||
|
/// </summary>
|
||||||
|
public const string Scope = "scope";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Individual scope items (<c>scp</c>).
|
||||||
|
/// </summary>
|
||||||
|
public const string ScopeItem = "scp";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// OAuth2 resource audiences (<c>aud</c>).
|
||||||
|
/// </summary>
|
||||||
|
public const string Audience = "aud";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Identity provider hint for downstream services.
|
||||||
|
/// </summary>
|
||||||
|
public const string IdentityProvider = "stellaops:idp";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Session identifier claim (<c>sid</c>).
|
||||||
|
/// </summary>
|
||||||
|
public const string SessionId = "sid";
|
||||||
|
}
|
||||||
@@ -0,0 +1,287 @@
|
|||||||
|
using System;
|
||||||
|
using System.Collections.Generic;
|
||||||
|
using System.Globalization;
|
||||||
|
using System.Linq;
|
||||||
|
using System.Security.Claims;
|
||||||
|
|
||||||
|
namespace StellaOps.Auth.Abstractions;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Fluent helper used to construct <see cref="ClaimsPrincipal"/> instances that follow StellaOps conventions.
|
||||||
|
/// </summary>
|
||||||
|
public sealed class StellaOpsPrincipalBuilder
|
||||||
|
{
|
||||||
|
private readonly Dictionary<string, Claim> singleClaims = new(StringComparer.Ordinal);
|
||||||
|
private readonly List<Claim> additionalClaims = new();
|
||||||
|
private readonly HashSet<string> scopes = new(StringComparer.OrdinalIgnoreCase);
|
||||||
|
private readonly HashSet<string> audiences = new(StringComparer.OrdinalIgnoreCase);
|
||||||
|
|
||||||
|
private string authenticationType = StellaOpsAuthenticationDefaults.AuthenticationType;
|
||||||
|
private string nameClaimType = ClaimTypes.Name;
|
||||||
|
private string roleClaimType = ClaimTypes.Role;
|
||||||
|
|
||||||
|
private string[]? cachedScopes;
|
||||||
|
private string[]? cachedAudiences;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Adds or replaces the canonical subject identifier.
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder WithSubject(string subject)
|
||||||
|
=> SetSingleClaim(StellaOpsClaimTypes.Subject, subject);
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Adds or replaces the canonical client identifier.
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder WithClientId(string clientId)
|
||||||
|
=> SetSingleClaim(StellaOpsClaimTypes.ClientId, clientId);
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Adds or replaces the tenant identifier claim.
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder WithTenant(string tenant)
|
||||||
|
=> SetSingleClaim(StellaOpsClaimTypes.Tenant, tenant);
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Adds or replaces the user display name claim.
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder WithName(string name)
|
||||||
|
{
|
||||||
|
ArgumentException.ThrowIfNullOrWhiteSpace(name);
|
||||||
|
singleClaims[nameClaimType] = new Claim(nameClaimType, name.Trim(), ClaimValueTypes.String);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Adds or replaces the identity provider claim.
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder WithIdentityProvider(string identityProvider)
|
||||||
|
=> SetSingleClaim(StellaOpsClaimTypes.IdentityProvider, identityProvider);
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Adds or replaces the session identifier claim.
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder WithSessionId(string sessionId)
|
||||||
|
=> SetSingleClaim(StellaOpsClaimTypes.SessionId, sessionId);
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Adds or replaces the token identifier claim.
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder WithTokenId(string tokenId)
|
||||||
|
=> SetSingleClaim(StellaOpsClaimTypes.TokenId, tokenId);
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Adds or replaces the authentication method reference claim.
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder WithAuthenticationMethod(string method)
|
||||||
|
=> SetSingleClaim(StellaOpsClaimTypes.AuthenticationMethod, method);
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Sets the name claim type appended when building the <see cref="ClaimsIdentity"/>.
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder WithNameClaimType(string claimType)
|
||||||
|
{
|
||||||
|
ArgumentException.ThrowIfNullOrWhiteSpace(claimType);
|
||||||
|
nameClaimType = claimType.Trim();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Sets the role claim type appended when building the <see cref="ClaimsIdentity"/>.
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder WithRoleClaimType(string claimType)
|
||||||
|
{
|
||||||
|
ArgumentException.ThrowIfNullOrWhiteSpace(claimType);
|
||||||
|
roleClaimType = claimType.Trim();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Sets the authentication type stamped on the <see cref="ClaimsIdentity"/>.
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder WithAuthenticationType(string authenticationType)
|
||||||
|
{
|
||||||
|
ArgumentException.ThrowIfNullOrWhiteSpace(authenticationType);
|
||||||
|
this.authenticationType = authenticationType.Trim();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Registers the supplied scopes (normalised to lower-case, deduplicated, sorted).
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder WithScopes(IEnumerable<string> scopes)
|
||||||
|
{
|
||||||
|
ArgumentNullException.ThrowIfNull(scopes);
|
||||||
|
|
||||||
|
foreach (var scope in scopes)
|
||||||
|
{
|
||||||
|
var normalized = StellaOpsScopes.Normalize(scope);
|
||||||
|
if (normalized is null)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.scopes.Add(normalized))
|
||||||
|
{
|
||||||
|
cachedScopes = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Registers the supplied audiences (trimmed, deduplicated, sorted).
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder WithAudiences(IEnumerable<string> audiences)
|
||||||
|
{
|
||||||
|
ArgumentNullException.ThrowIfNull(audiences);
|
||||||
|
|
||||||
|
foreach (var audience in audiences)
|
||||||
|
{
|
||||||
|
if (string.IsNullOrWhiteSpace(audience))
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.audiences.Add(audience.Trim()))
|
||||||
|
{
|
||||||
|
cachedAudiences = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Adds a single audience.
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder WithAudience(string audience)
|
||||||
|
=> WithAudiences(new[] { audience });
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Adds an arbitrary claim (no deduplication is performed).
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder AddClaim(string type, string value, string valueType = ClaimValueTypes.String)
|
||||||
|
{
|
||||||
|
ArgumentException.ThrowIfNullOrWhiteSpace(type);
|
||||||
|
ArgumentException.ThrowIfNullOrWhiteSpace(value);
|
||||||
|
|
||||||
|
var trimmedType = type.Trim();
|
||||||
|
var trimmedValue = value.Trim();
|
||||||
|
|
||||||
|
additionalClaims.Add(new Claim(trimmedType, trimmedValue, valueType));
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Adds multiple claims (incoming claims are cloned to enforce value trimming).
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder AddClaims(IEnumerable<Claim> claims)
|
||||||
|
{
|
||||||
|
ArgumentNullException.ThrowIfNull(claims);
|
||||||
|
|
||||||
|
foreach (var claim in claims)
|
||||||
|
{
|
||||||
|
ArgumentNullException.ThrowIfNull(claim);
|
||||||
|
AddClaim(claim.Type, claim.Value, claim.ValueType);
|
||||||
|
}
|
||||||
|
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Adds an <c>iat</c> (issued at) claim using Unix time seconds.
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder WithIssuedAt(DateTimeOffset issuedAt)
|
||||||
|
=> SetSingleClaim("iat", ToUnixTime(issuedAt));
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Adds an <c>nbf</c> (not before) claim using Unix time seconds.
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder WithNotBefore(DateTimeOffset notBefore)
|
||||||
|
=> SetSingleClaim("nbf", ToUnixTime(notBefore));
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Adds an <c>exp</c> (expires) claim using Unix time seconds.
|
||||||
|
/// </summary>
|
||||||
|
public StellaOpsPrincipalBuilder WithExpires(DateTimeOffset expires)
|
||||||
|
=> SetSingleClaim("exp", ToUnixTime(expires));
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Returns the normalised scope list (deduplicated + sorted).
|
||||||
|
/// </summary>
|
||||||
|
public IReadOnlyCollection<string> NormalizedScopes
|
||||||
|
{
|
||||||
|
get
|
||||||
|
{
|
||||||
|
cachedScopes ??= scopes.Count == 0
|
||||||
|
? Array.Empty<string>()
|
||||||
|
: scopes.OrderBy(static scope => scope, StringComparer.Ordinal).ToArray();
|
||||||
|
|
||||||
|
return cachedScopes;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Returns the normalised audience list (deduplicated + sorted).
|
||||||
|
/// </summary>
|
||||||
|
public IReadOnlyCollection<string> Audiences
|
||||||
|
{
|
||||||
|
get
|
||||||
|
{
|
||||||
|
cachedAudiences ??= audiences.Count == 0
|
||||||
|
? Array.Empty<string>()
|
||||||
|
: audiences.OrderBy(static audience => audience, StringComparer.Ordinal).ToArray();
|
||||||
|
|
||||||
|
return cachedAudiences;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Builds the immutable <see cref="ClaimsPrincipal"/> instance based on the registered data.
|
||||||
|
/// </summary>
|
||||||
|
public ClaimsPrincipal Build()
|
||||||
|
{
|
||||||
|
var claims = new List<Claim>(
|
||||||
|
singleClaims.Count +
|
||||||
|
additionalClaims.Count +
|
||||||
|
NormalizedScopes.Count * 2 +
|
||||||
|
Audiences.Count);
|
||||||
|
|
||||||
|
claims.AddRange(singleClaims.Values);
|
||||||
|
claims.AddRange(additionalClaims);
|
||||||
|
|
||||||
|
if (NormalizedScopes.Count > 0)
|
||||||
|
{
|
||||||
|
var joined = string.Join(' ', NormalizedScopes);
|
||||||
|
claims.Add(new Claim(StellaOpsClaimTypes.Scope, joined, ClaimValueTypes.String));
|
||||||
|
|
||||||
|
foreach (var scope in NormalizedScopes)
|
||||||
|
{
|
||||||
|
claims.Add(new Claim(StellaOpsClaimTypes.ScopeItem, scope, ClaimValueTypes.String));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Audiences.Count > 0)
|
||||||
|
{
|
||||||
|
foreach (var audience in Audiences)
|
||||||
|
{
|
||||||
|
claims.Add(new Claim(StellaOpsClaimTypes.Audience, audience, ClaimValueTypes.String));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var identity = new ClaimsIdentity(claims, authenticationType, nameClaimType, roleClaimType);
|
||||||
|
return new ClaimsPrincipal(identity);
|
||||||
|
}
|
||||||
|
|
||||||
|
private StellaOpsPrincipalBuilder SetSingleClaim(string type, string value)
|
||||||
|
{
|
||||||
|
ArgumentException.ThrowIfNullOrWhiteSpace(value);
|
||||||
|
var trimmedValue = value.Trim();
|
||||||
|
singleClaims[type] = new Claim(type, trimmedValue, ClaimValueTypes.String);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static string ToUnixTime(DateTimeOffset value)
|
||||||
|
=> value.ToUnixTimeSeconds().ToString(CultureInfo.InvariantCulture);
|
||||||
|
}
|
||||||
@@ -0,0 +1,114 @@
|
|||||||
|
using System;
|
||||||
|
using System.Collections.Generic;
|
||||||
|
using System.Linq;
|
||||||
|
using Microsoft.AspNetCore.Http;
|
||||||
|
using Microsoft.AspNetCore.Http.HttpResults;
|
||||||
|
using Microsoft.AspNetCore.Mvc;
|
||||||
|
|
||||||
|
namespace StellaOps.Auth.Abstractions;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Factory helpers for returning RFC 7807 problem responses using StellaOps conventions.
|
||||||
|
/// </summary>
|
||||||
|
public static class StellaOpsProblemResultFactory
|
||||||
|
{
|
||||||
|
private const string ProblemBase = "https://docs.stella-ops.org/problems";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Produces a 401 problem response indicating authentication is required.
|
||||||
|
/// </summary>
|
||||||
|
public static ProblemHttpResult AuthenticationRequired(string? detail = null, string? instance = null)
|
||||||
|
=> Create(
|
||||||
|
StatusCodes.Status401Unauthorized,
|
||||||
|
$"{ProblemBase}/authentication-required",
|
||||||
|
"Authentication required",
|
||||||
|
detail ?? "Authentication is required to access this resource.",
|
||||||
|
instance,
|
||||||
|
"unauthorized");
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Produces a 401 problem response for invalid, expired, or revoked tokens.
|
||||||
|
/// </summary>
|
||||||
|
public static ProblemHttpResult InvalidToken(string? detail = null, string? instance = null)
|
||||||
|
=> Create(
|
||||||
|
StatusCodes.Status401Unauthorized,
|
||||||
|
$"{ProblemBase}/invalid-token",
|
||||||
|
"Invalid token",
|
||||||
|
detail ?? "The supplied access token is invalid, expired, or revoked.",
|
||||||
|
instance,
|
||||||
|
"invalid_token");
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Produces a 403 problem response when access is denied.
|
||||||
|
/// </summary>
|
||||||
|
public static ProblemHttpResult Forbidden(string? detail = null, string? instance = null)
|
||||||
|
=> Create(
|
||||||
|
StatusCodes.Status403Forbidden,
|
||||||
|
$"{ProblemBase}/forbidden",
|
||||||
|
"Forbidden",
|
||||||
|
detail ?? "The authenticated principal is not authorised to access this resource.",
|
||||||
|
instance,
|
||||||
|
"forbidden");
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Produces a 403 problem response for insufficient scopes.
|
||||||
|
/// </summary>
|
||||||
|
public static ProblemHttpResult InsufficientScope(
|
||||||
|
IReadOnlyCollection<string> requiredScopes,
|
||||||
|
IReadOnlyCollection<string>? grantedScopes = null,
|
||||||
|
string? instance = null)
|
||||||
|
{
|
||||||
|
ArgumentNullException.ThrowIfNull(requiredScopes);
|
||||||
|
|
||||||
|
var extensions = new Dictionary<string, object?>(StringComparer.OrdinalIgnoreCase)
|
||||||
|
{
|
||||||
|
["required_scopes"] = requiredScopes.ToArray()
|
||||||
|
};
|
||||||
|
|
||||||
|
if (grantedScopes is not null)
|
||||||
|
{
|
||||||
|
extensions["granted_scopes"] = grantedScopes.ToArray();
|
||||||
|
}
|
||||||
|
|
||||||
|
return Create(
|
||||||
|
StatusCodes.Status403Forbidden,
|
||||||
|
$"{ProblemBase}/insufficient-scope",
|
||||||
|
"Insufficient scope",
|
||||||
|
"The authenticated principal does not hold the scopes required by this resource.",
|
||||||
|
instance,
|
||||||
|
"insufficient_scope",
|
||||||
|
extensions);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static ProblemHttpResult Create(
|
||||||
|
int status,
|
||||||
|
string type,
|
||||||
|
string title,
|
||||||
|
string detail,
|
||||||
|
string? instance,
|
||||||
|
string error,
|
||||||
|
IReadOnlyDictionary<string, object?>? extensions = null)
|
||||||
|
{
|
||||||
|
var problem = new ProblemDetails
|
||||||
|
{
|
||||||
|
Status = status,
|
||||||
|
Type = type,
|
||||||
|
Title = title,
|
||||||
|
Detail = detail,
|
||||||
|
Instance = instance
|
||||||
|
};
|
||||||
|
|
||||||
|
problem.Extensions["error"] = error;
|
||||||
|
problem.Extensions["error_description"] = detail;
|
||||||
|
|
||||||
|
if (extensions is not null)
|
||||||
|
{
|
||||||
|
foreach (var entry in extensions)
|
||||||
|
{
|
||||||
|
problem.Extensions[entry.Key] = entry.Value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return TypedResults.Problem(problem);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,79 @@
|
|||||||
|
using System;
|
||||||
|
using System.Collections.Generic;
|
||||||
|
|
||||||
|
namespace StellaOps.Auth.Abstractions;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Canonical scope names supported by StellaOps services.
|
||||||
|
/// </summary>
|
||||||
|
public static class StellaOpsScopes
|
||||||
|
{
|
||||||
|
/// <summary>
|
||||||
|
/// Scope required to trigger Feedser jobs.
|
||||||
|
/// </summary>
|
||||||
|
public const string FeedserJobsTrigger = "feedser.jobs.trigger";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Scope required to manage Feedser merge operations.
|
||||||
|
/// </summary>
|
||||||
|
public const string FeedserMerge = "feedser.merge";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Scope granting administrative access to Authority user management.
|
||||||
|
/// </summary>
|
||||||
|
public const string AuthorityUsersManage = "authority.users.manage";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Scope granting administrative access to Authority client registrations.
|
||||||
|
/// </summary>
|
||||||
|
public const string AuthorityClientsManage = "authority.clients.manage";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Scope granting read-only access to Authority audit logs.
|
||||||
|
/// </summary>
|
||||||
|
public const string AuthorityAuditRead = "authority.audit.read";
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Synthetic scope representing trusted network bypass.
|
||||||
|
/// </summary>
|
||||||
|
public const string Bypass = "stellaops.bypass";
|
||||||
|
|
||||||
|
private static readonly HashSet<string> KnownScopes = new(StringComparer.OrdinalIgnoreCase)
|
||||||
|
{
|
||||||
|
FeedserJobsTrigger,
|
||||||
|
FeedserMerge,
|
||||||
|
AuthorityUsersManage,
|
||||||
|
AuthorityClientsManage,
|
||||||
|
AuthorityAuditRead,
|
||||||
|
Bypass
|
||||||
|
};
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Normalises a scope string (trim/convert to lower case).
|
||||||
|
/// </summary>
|
||||||
|
/// <param name="scope">Scope raw value.</param>
|
||||||
|
/// <returns>Normalised scope or <c>null</c> when the input is blank.</returns>
|
||||||
|
public static string? Normalize(string? scope)
|
||||||
|
{
|
||||||
|
if (string.IsNullOrWhiteSpace(scope))
|
||||||
|
{
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return scope.Trim().ToLowerInvariant();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Checks whether the provided scope is registered as a built-in StellaOps scope.
|
||||||
|
/// </summary>
|
||||||
|
public static bool IsKnown(string scope)
|
||||||
|
{
|
||||||
|
ArgumentNullException.ThrowIfNull(scope);
|
||||||
|
return KnownScopes.Contains(scope);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Returns the full set of built-in scopes.
|
||||||
|
/// </summary>
|
||||||
|
public static IReadOnlyCollection<string> All => KnownScopes;
|
||||||
|
}
|
||||||
@@ -0,0 +1,95 @@
|
|||||||
|
using System;
|
||||||
|
using System.Collections.Generic;
|
||||||
|
using System.Linq;
|
||||||
|
using System.Net;
|
||||||
|
using System.Net.Http;
|
||||||
|
using System.Net.Http.Headers;
|
||||||
|
using System.Threading;
|
||||||
|
using System.Threading.Tasks;
|
||||||
|
using Microsoft.Extensions.DependencyInjection;
|
||||||
|
using Microsoft.Extensions.Http;
|
||||||
|
using StellaOps.Auth.Client;
|
||||||
|
using Xunit;
|
||||||
|
|
||||||
|
namespace StellaOps.Auth.Client.Tests;
|
||||||
|
|
||||||
|
public class ServiceCollectionExtensionsTests
|
||||||
|
{
|
||||||
|
[Fact]
|
||||||
|
public async Task AddStellaOpsAuthClient_ConfiguresRetryPolicy()
|
||||||
|
{
|
||||||
|
var services = new ServiceCollection();
|
||||||
|
services.AddLogging();
|
||||||
|
|
||||||
|
services.AddStellaOpsAuthClient(options =>
|
||||||
|
{
|
||||||
|
options.Authority = "https://authority.test";
|
||||||
|
options.RetryDelays.Clear();
|
||||||
|
options.RetryDelays.Add(TimeSpan.FromMilliseconds(1));
|
||||||
|
options.DiscoveryCacheLifetime = TimeSpan.FromMinutes(1);
|
||||||
|
options.JwksCacheLifetime = TimeSpan.FromMinutes(1);
|
||||||
|
options.AllowOfflineCacheFallback = false;
|
||||||
|
});
|
||||||
|
|
||||||
|
var recordedHandlers = new List<DelegatingHandler>();
|
||||||
|
var attemptCount = 0;
|
||||||
|
|
||||||
|
services.AddHttpClient<StellaOpsDiscoveryCache>()
|
||||||
|
.ConfigureHttpMessageHandlerBuilder(builder =>
|
||||||
|
{
|
||||||
|
recordedHandlers = new List<DelegatingHandler>(builder.AdditionalHandlers);
|
||||||
|
|
||||||
|
var responses = new Queue<Func<HttpResponseMessage>>(new[]
|
||||||
|
{
|
||||||
|
() => CreateResponse(HttpStatusCode.InternalServerError, "{}"),
|
||||||
|
() => CreateResponse(HttpStatusCode.OK, "{\"token_endpoint\":\"https://authority.test/connect/token\",\"jwks_uri\":\"https://authority.test/jwks\"}")
|
||||||
|
});
|
||||||
|
|
||||||
|
builder.PrimaryHandler = new LambdaHttpMessageHandler((_, _) =>
|
||||||
|
{
|
||||||
|
attemptCount++;
|
||||||
|
|
||||||
|
if (responses.Count == 0)
|
||||||
|
{
|
||||||
|
return Task.FromResult(CreateResponse(HttpStatusCode.OK, "{}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
var factory = responses.Dequeue();
|
||||||
|
return Task.FromResult(factory());
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
using var provider = services.BuildServiceProvider();
|
||||||
|
|
||||||
|
var cache = provider.GetRequiredService<StellaOpsDiscoveryCache>();
|
||||||
|
var configuration = await cache.GetAsync(CancellationToken.None);
|
||||||
|
|
||||||
|
Assert.Equal(new Uri("https://authority.test/connect/token"), configuration.TokenEndpoint);
|
||||||
|
Assert.Equal(2, attemptCount);
|
||||||
|
Assert.NotEmpty(recordedHandlers);
|
||||||
|
Assert.Contains(recordedHandlers, handler => handler.GetType().Name.Contains("PolicyHttpMessageHandler", StringComparison.Ordinal));
|
||||||
|
}
|
||||||
|
|
||||||
|
private static HttpResponseMessage CreateResponse(HttpStatusCode statusCode, string jsonContent)
|
||||||
|
{
|
||||||
|
return new HttpResponseMessage(statusCode)
|
||||||
|
{
|
||||||
|
Content = new StringContent(jsonContent)
|
||||||
|
{
|
||||||
|
Headers = { ContentType = new MediaTypeHeaderValue("application/json") }
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
private sealed class LambdaHttpMessageHandler : HttpMessageHandler
|
||||||
|
{
|
||||||
|
private readonly Func<HttpRequestMessage, CancellationToken, Task<HttpResponseMessage>> responder;
|
||||||
|
|
||||||
|
public LambdaHttpMessageHandler(Func<HttpRequestMessage, CancellationToken, Task<HttpResponseMessage>> responder)
|
||||||
|
{
|
||||||
|
this.responder = responder;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override Task<HttpResponseMessage> SendAsync(HttpRequestMessage request, CancellationToken cancellationToken)
|
||||||
|
=> responder(request, cancellationToken);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,15 @@
|
|||||||
|
<Project Sdk="Microsoft.NET.Sdk">
|
||||||
|
<PropertyGroup>
|
||||||
|
<TargetFramework>net10.0</TargetFramework>
|
||||||
|
<ImplicitUsings>enable</ImplicitUsings>
|
||||||
|
<Nullable>enable</Nullable>
|
||||||
|
</PropertyGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<ProjectReference Include="..\StellaOps.Auth.Client\StellaOps.Auth.Client.csproj" />
|
||||||
|
<ProjectReference Include="..\StellaOps.Auth.Abstractions\StellaOps.Auth.Abstractions.csproj" />
|
||||||
|
</ItemGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<PackageReference Include="Microsoft.Extensions.Http.Polly" Version="8.0.5" />
|
||||||
|
<PackageReference Include="Microsoft.Extensions.Http" Version="8.0.0" />
|
||||||
|
</ItemGroup>
|
||||||
|
</Project>
|
||||||
@@ -0,0 +1,84 @@
|
|||||||
|
using System;
|
||||||
|
using StellaOps.Auth.Client;
|
||||||
|
using Xunit;
|
||||||
|
|
||||||
|
namespace StellaOps.Auth.Client.Tests;
|
||||||
|
|
||||||
|
public class StellaOpsAuthClientOptionsTests
|
||||||
|
{
|
||||||
|
[Fact]
|
||||||
|
public void Validate_NormalizesScopes()
|
||||||
|
{
|
||||||
|
var options = new StellaOpsAuthClientOptions
|
||||||
|
{
|
||||||
|
Authority = "https://authority.test",
|
||||||
|
ClientId = "cli",
|
||||||
|
HttpTimeout = TimeSpan.FromSeconds(15)
|
||||||
|
};
|
||||||
|
options.DefaultScopes.Add(" Feedser.Jobs.Trigger ");
|
||||||
|
options.DefaultScopes.Add("feedser.jobs.trigger");
|
||||||
|
options.DefaultScopes.Add("AUTHORITY.USERS.MANAGE");
|
||||||
|
|
||||||
|
options.Validate();
|
||||||
|
|
||||||
|
Assert.Equal(new[] { "authority.users.manage", "feedser.jobs.trigger" }, options.NormalizedScopes);
|
||||||
|
Assert.Equal(new Uri("https://authority.test"), options.AuthorityUri);
|
||||||
|
Assert.Equal<TimeSpan>(options.RetryDelays, options.NormalizedRetryDelays);
|
||||||
|
}
|
||||||
|
|
||||||
|
[Fact]
|
||||||
|
public void Validate_Throws_When_AuthorityMissing()
|
||||||
|
{
|
||||||
|
var options = new StellaOpsAuthClientOptions();
|
||||||
|
|
||||||
|
var exception = Assert.Throws<InvalidOperationException>(() => options.Validate());
|
||||||
|
|
||||||
|
Assert.Contains("Authority", exception.Message, StringComparison.OrdinalIgnoreCase);
|
||||||
|
}
|
||||||
|
|
||||||
|
[Fact]
|
||||||
|
public void Validate_NormalizesRetryDelays()
|
||||||
|
{
|
||||||
|
var options = new StellaOpsAuthClientOptions
|
||||||
|
{
|
||||||
|
Authority = "https://authority.test"
|
||||||
|
};
|
||||||
|
options.RetryDelays.Clear();
|
||||||
|
options.RetryDelays.Add(TimeSpan.Zero);
|
||||||
|
options.RetryDelays.Add(TimeSpan.FromSeconds(3));
|
||||||
|
options.RetryDelays.Add(TimeSpan.FromMilliseconds(-1));
|
||||||
|
|
||||||
|
options.Validate();
|
||||||
|
|
||||||
|
Assert.Equal<TimeSpan>(new[] { TimeSpan.FromSeconds(3) }, options.NormalizedRetryDelays);
|
||||||
|
Assert.Equal<TimeSpan>(options.NormalizedRetryDelays, options.RetryDelays);
|
||||||
|
}
|
||||||
|
|
||||||
|
[Fact]
|
||||||
|
public void Validate_DisabledRetries_ProducesEmptyDelays()
|
||||||
|
{
|
||||||
|
var options = new StellaOpsAuthClientOptions
|
||||||
|
{
|
||||||
|
Authority = "https://authority.test",
|
||||||
|
EnableRetries = false
|
||||||
|
};
|
||||||
|
|
||||||
|
options.Validate();
|
||||||
|
|
||||||
|
Assert.Empty(options.NormalizedRetryDelays);
|
||||||
|
}
|
||||||
|
|
||||||
|
[Fact]
|
||||||
|
public void Validate_Throws_When_OfflineToleranceNegative()
|
||||||
|
{
|
||||||
|
var options = new StellaOpsAuthClientOptions
|
||||||
|
{
|
||||||
|
Authority = "https://authority.test",
|
||||||
|
OfflineCacheTolerance = TimeSpan.FromSeconds(-1)
|
||||||
|
};
|
||||||
|
|
||||||
|
var exception = Assert.Throws<InvalidOperationException>(() => options.Validate());
|
||||||
|
|
||||||
|
Assert.Contains("Offline cache tolerance", exception.Message, StringComparison.OrdinalIgnoreCase);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,134 @@
|
|||||||
|
using System;
|
||||||
|
using System.Net;
|
||||||
|
using System.Net.Http;
|
||||||
|
using System.Net.Http.Headers;
|
||||||
|
using System.Threading;
|
||||||
|
using System.Threading.Tasks;
|
||||||
|
using Microsoft.Extensions.Logging.Abstractions;
|
||||||
|
using Microsoft.Extensions.Options;
|
||||||
|
using Microsoft.Extensions.Time.Testing;
|
||||||
|
using StellaOps.Auth.Client;
|
||||||
|
using Xunit;
|
||||||
|
|
||||||
|
namespace StellaOps.Auth.Client.Tests;
|
||||||
|
|
||||||
|
public class StellaOpsDiscoveryCacheTests
|
||||||
|
{
|
||||||
|
[Fact]
|
||||||
|
public async Task GetAsync_UsesOfflineFallbackWithinTolerance()
|
||||||
|
{
|
||||||
|
var timeProvider = new FakeTimeProvider(DateTimeOffset.Parse("2025-01-01T00:00:00Z"));
|
||||||
|
var callCount = 0;
|
||||||
|
var handler = new StubHttpMessageHandler((request, _) =>
|
||||||
|
{
|
||||||
|
callCount++;
|
||||||
|
|
||||||
|
if (callCount == 1)
|
||||||
|
{
|
||||||
|
return Task.FromResult(CreateJsonResponse("{\"token_endpoint\":\"https://authority.test/connect/token\",\"jwks_uri\":\"https://authority.test/jwks\"}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new HttpRequestException("offline");
|
||||||
|
});
|
||||||
|
|
||||||
|
var httpClient = new HttpClient(handler);
|
||||||
|
|
||||||
|
var options = new StellaOpsAuthClientOptions
|
||||||
|
{
|
||||||
|
Authority = "https://authority.test",
|
||||||
|
DiscoveryCacheLifetime = TimeSpan.FromMinutes(1),
|
||||||
|
OfflineCacheTolerance = TimeSpan.FromMinutes(5),
|
||||||
|
AllowOfflineCacheFallback = true
|
||||||
|
};
|
||||||
|
options.Validate();
|
||||||
|
|
||||||
|
var monitor = new TestOptionsMonitor<StellaOpsAuthClientOptions>(options);
|
||||||
|
var cache = new StellaOpsDiscoveryCache(httpClient, monitor, timeProvider, NullLogger<StellaOpsDiscoveryCache>.Instance);
|
||||||
|
|
||||||
|
var configuration = await cache.GetAsync(CancellationToken.None);
|
||||||
|
Assert.Equal(new Uri("https://authority.test/connect/token"), configuration.TokenEndpoint);
|
||||||
|
|
||||||
|
timeProvider.Advance(TimeSpan.FromMinutes(1) + TimeSpan.FromSeconds(5));
|
||||||
|
|
||||||
|
configuration = await cache.GetAsync(CancellationToken.None);
|
||||||
|
Assert.Equal(new Uri("https://authority.test/connect/token"), configuration.TokenEndpoint);
|
||||||
|
Assert.Equal(2, callCount);
|
||||||
|
|
||||||
|
var offlineExpiry = GetOfflineExpiry(cache);
|
||||||
|
Assert.True(offlineExpiry > timeProvider.GetUtcNow());
|
||||||
|
|
||||||
|
timeProvider.Advance(options.OfflineCacheTolerance + TimeSpan.FromSeconds(1));
|
||||||
|
|
||||||
|
Assert.True(offlineExpiry < timeProvider.GetUtcNow());
|
||||||
|
|
||||||
|
HttpRequestException? exception = null;
|
||||||
|
try
|
||||||
|
{
|
||||||
|
await cache.GetAsync(CancellationToken.None);
|
||||||
|
}
|
||||||
|
catch (HttpRequestException ex)
|
||||||
|
{
|
||||||
|
exception = ex;
|
||||||
|
}
|
||||||
|
|
||||||
|
Assert.NotNull(exception);
|
||||||
|
Assert.Equal(3, callCount);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static HttpResponseMessage CreateJsonResponse(string json)
|
||||||
|
{
|
||||||
|
return new HttpResponseMessage(HttpStatusCode.OK)
|
||||||
|
{
|
||||||
|
Content = new StringContent(json)
|
||||||
|
{
|
||||||
|
Headers = { ContentType = new MediaTypeHeaderValue("application/json") }
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
private sealed class StubHttpMessageHandler : HttpMessageHandler
|
||||||
|
{
|
||||||
|
private readonly Func<HttpRequestMessage, CancellationToken, Task<HttpResponseMessage>> responder;
|
||||||
|
|
||||||
|
public StubHttpMessageHandler(Func<HttpRequestMessage, CancellationToken, Task<HttpResponseMessage>> responder)
|
||||||
|
{
|
||||||
|
this.responder = responder;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override Task<HttpResponseMessage> SendAsync(HttpRequestMessage request, CancellationToken cancellationToken)
|
||||||
|
=> responder(request, cancellationToken);
|
||||||
|
}
|
||||||
|
|
||||||
|
private sealed class TestOptionsMonitor<T> : IOptionsMonitor<T>
|
||||||
|
where T : class
|
||||||
|
{
|
||||||
|
private readonly T value;
|
||||||
|
|
||||||
|
public TestOptionsMonitor(T value)
|
||||||
|
{
|
||||||
|
this.value = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
public T CurrentValue => value;
|
||||||
|
|
||||||
|
public T Get(string? name) => value;
|
||||||
|
|
||||||
|
public IDisposable OnChange(Action<T, string> listener) => NullDisposable.Instance;
|
||||||
|
|
||||||
|
private sealed class NullDisposable : IDisposable
|
||||||
|
{
|
||||||
|
public static NullDisposable Instance { get; } = new();
|
||||||
|
|
||||||
|
public void Dispose()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static DateTimeOffset GetOfflineExpiry(StellaOpsDiscoveryCache cache)
|
||||||
|
{
|
||||||
|
var field = typeof(StellaOpsDiscoveryCache).GetField("offlineExpiresAt", System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Instance);
|
||||||
|
Assert.NotNull(field);
|
||||||
|
return (DateTimeOffset)field!.GetValue(cache)!;
|
||||||
|
}
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user