Compare commits

...

2 Commits

Author SHA1 Message Date
root
df5984d07e up
Some checks failed
Build Test Deploy / build-test (push) Has been cancelled
Build Test Deploy / authority-container (push) Has been cancelled
Build Test Deploy / docs (push) Has been cancelled
Build Test Deploy / deploy (push) Has been cancelled
Docs CI / lint-and-preview (push) Has been cancelled
2025-10-10 06:53:40 +00:00
root
3aed135fb5 Add StellaOps Authority planning artifacts and config templates 2025-10-10 06:52:41 +00:00
1095 changed files with 98593 additions and 61390 deletions

View File

@@ -1,29 +1,29 @@
name: Feedser CI
on:
push:
branches: ["main", "develop"]
pull_request:
branches: ["main", "develop"]
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- name: Check out repository
uses: actions/checkout@v4
- name: Setup .NET 10 preview
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100-rc.1.25451.107
include-prerelease: true
- name: Restore dependencies
run: dotnet restore src/StellaOps.Feedser/StellaOps.Feedser.sln
- name: Build
run: dotnet build src/StellaOps.Feedser/StellaOps.Feedser.sln --configuration Release --no-restore -warnaserror
- name: Test
run: dotnet test src/StellaOps.Feedser/StellaOps.Feedser.Tests/StellaOps.Feedser.Tests.csproj --configuration Release --no-restore --logger "trx;LogFileName=feedser-tests.trx"
name: Feedser CI
on:
push:
branches: ["main", "develop"]
pull_request:
branches: ["main", "develop"]
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- name: Check out repository
uses: actions/checkout@v4
- name: Setup .NET 10 preview
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100-rc.1.25451.107
include-prerelease: true
- name: Restore dependencies
run: dotnet restore src/StellaOps.Feedser/StellaOps.Feedser.sln
- name: Build
run: dotnet build src/StellaOps.Feedser/StellaOps.Feedser.sln --configuration Release --no-restore -warnaserror
- name: Test
run: dotnet test src/StellaOps.Feedser/StellaOps.Feedser.Tests/StellaOps.Feedser.Tests.csproj --configuration Release --no-restore --logger "trx;LogFileName=feedser-tests.trx"

View File

@@ -1,87 +1,87 @@
name: Feedser Tests CI
on:
push:
paths:
- 'StellaOps.Feedser/**'
- '.gitea/workflows/feedser-tests.yml'
pull_request:
paths:
- 'StellaOps.Feedser/**'
- '.gitea/workflows/feedser-tests.yml'
jobs:
advisory-store-performance:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up .NET SDK
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100-rc.1
- name: Restore dependencies
working-directory: StellaOps.Feedser
run: dotnet restore StellaOps.Feedser.Tests/StellaOps.Feedser.Tests.csproj
- name: Run advisory store performance test
working-directory: StellaOps.Feedser
run: |
set -euo pipefail
dotnet test \
StellaOps.Feedser.Tests/StellaOps.Feedser.Tests.csproj \
--filter "FullyQualifiedName~AdvisoryStorePerformanceTests" \
--logger:"console;verbosity=detailed" | tee performance.log
- name: Upload performance log
if: always()
uses: actions/upload-artifact@v4
with:
name: advisory-store-performance-log
path: StellaOps.Feedser/performance.log
full-test-suite:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up .NET SDK
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100-rc.1
- name: Restore dependencies
working-directory: StellaOps.Feedser
run: dotnet restore StellaOps.Feedser.Tests/StellaOps.Feedser.Tests.csproj
- name: Run full test suite with baseline guard
working-directory: StellaOps.Feedser
env:
BASELINE_SECONDS: "19.8"
TOLERANCE_PERCENT: "25"
run: |
set -euo pipefail
start=$(date +%s)
dotnet test StellaOps.Feedser.Tests/StellaOps.Feedser.Tests.csproj --no-build | tee full-tests.log
end=$(date +%s)
duration=$((end-start))
echo "Full test duration: ${duration}s"
export DURATION_SECONDS="$duration"
python - <<'PY'
import os, sys
duration = float(os.environ["DURATION_SECONDS"])
baseline = float(os.environ["BASELINE_SECONDS"])
tolerance = float(os.environ["TOLERANCE_PERCENT"])
threshold = baseline * (1 + tolerance / 100)
print(f"Baseline {baseline:.1f}s, threshold {threshold:.1f}s, observed {duration:.1f}s")
if duration > threshold:
sys.exit(f"Full test duration {duration:.1f}s exceeded threshold {threshold:.1f}s")
PY
- name: Upload full test log
if: always()
uses: actions/upload-artifact@v4
with:
name: full-test-suite-log
path: StellaOps.Feedser/full-tests.log
name: Feedser Tests CI
on:
push:
paths:
- 'StellaOps.Feedser/**'
- '.gitea/workflows/feedser-tests.yml'
pull_request:
paths:
- 'StellaOps.Feedser/**'
- '.gitea/workflows/feedser-tests.yml'
jobs:
advisory-store-performance:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up .NET SDK
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100-rc.1
- name: Restore dependencies
working-directory: StellaOps.Feedser
run: dotnet restore StellaOps.Feedser.Tests/StellaOps.Feedser.Tests.csproj
- name: Run advisory store performance test
working-directory: StellaOps.Feedser
run: |
set -euo pipefail
dotnet test \
StellaOps.Feedser.Tests/StellaOps.Feedser.Tests.csproj \
--filter "FullyQualifiedName~AdvisoryStorePerformanceTests" \
--logger:"console;verbosity=detailed" | tee performance.log
- name: Upload performance log
if: always()
uses: actions/upload-artifact@v4
with:
name: advisory-store-performance-log
path: StellaOps.Feedser/performance.log
full-test-suite:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up .NET SDK
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100-rc.1
- name: Restore dependencies
working-directory: StellaOps.Feedser
run: dotnet restore StellaOps.Feedser.Tests/StellaOps.Feedser.Tests.csproj
- name: Run full test suite with baseline guard
working-directory: StellaOps.Feedser
env:
BASELINE_SECONDS: "19.8"
TOLERANCE_PERCENT: "25"
run: |
set -euo pipefail
start=$(date +%s)
dotnet test StellaOps.Feedser.Tests/StellaOps.Feedser.Tests.csproj --no-build | tee full-tests.log
end=$(date +%s)
duration=$((end-start))
echo "Full test duration: ${duration}s"
export DURATION_SECONDS="$duration"
python - <<'PY'
import os, sys
duration = float(os.environ["DURATION_SECONDS"])
baseline = float(os.environ["BASELINE_SECONDS"])
tolerance = float(os.environ["TOLERANCE_PERCENT"])
threshold = baseline * (1 + tolerance / 100)
print(f"Baseline {baseline:.1f}s, threshold {threshold:.1f}s, observed {duration:.1f}s")
if duration > threshold:
sys.exit(f"Full test duration {duration:.1f}s exceeded threshold {threshold:.1f}s")
PY
- name: Upload full test log
if: always()
uses: actions/upload-artifact@v4
with:
name: full-test-suite-log
path: StellaOps.Feedser/full-tests.log

View File

@@ -1,74 +1,75 @@
# .gitea/workflows/build-test-deploy.yml
# Unified CI/CD workflow for git.stella-ops.org (Feedser monorepo)
name: Build Test Deploy
on:
push:
branches: [ main ]
paths:
- 'src/**'
- 'docs/**'
- 'scripts/**'
- 'Directory.Build.props'
- 'Directory.Build.targets'
- 'global.json'
- '.gitea/workflows/**'
pull_request:
branches: [ main, develop ]
paths:
- 'src/**'
- 'docs/**'
- 'scripts/**'
- '.gitea/workflows/**'
workflow_dispatch:
inputs:
force_deploy:
description: 'Ignore branch checks and run the deploy stage'
required: false
default: 'false'
type: boolean
env:
DOTNET_VERSION: '10.0.100-rc.1.25451.107'
BUILD_CONFIGURATION: Release
CI_CACHE_ROOT: /data/.cache/stella-ops/feedser
RUNNER_TOOL_CACHE: /toolcache
jobs:
# .gitea/workflows/build-test-deploy.yml
# Unified CI/CD workflow for git.stella-ops.org (Feedser monorepo)
name: Build Test Deploy
on:
push:
branches: [ main ]
paths:
- 'src/**'
- 'docs/**'
- 'scripts/**'
- 'Directory.Build.props'
- 'Directory.Build.targets'
- 'global.json'
- '.gitea/workflows/**'
pull_request:
branches: [ main, develop ]
paths:
- 'src/**'
- 'docs/**'
- 'scripts/**'
- '.gitea/workflows/**'
workflow_dispatch:
inputs:
force_deploy:
description: 'Ignore branch checks and run the deploy stage'
required: false
default: 'false'
type: boolean
env:
DOTNET_VERSION: '10.0.100-rc.1.25451.107'
BUILD_CONFIGURATION: Release
CI_CACHE_ROOT: /data/.cache/stella-ops/feedser
RUNNER_TOOL_CACHE: /toolcache
jobs:
build-test:
runs-on: ubuntu-22.04
environment: ${{ github.event_name == 'pull_request' && 'preview' || 'staging' }}
env:
PUBLISH_DIR: ${{ github.workspace }}/artifacts/publish/webservice
AUTHORITY_PUBLISH_DIR: ${{ github.workspace }}/artifacts/publish/authority
TEST_RESULTS_DIR: ${{ github.workspace }}/artifacts/test-results
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET ${{ env.DOTNET_VERSION }}
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Restore dependencies
run: dotnet restore src/StellaOps.Feedser.sln
- name: Build solution (warnings as errors)
run: dotnet build src/StellaOps.Feedser.sln --configuration $BUILD_CONFIGURATION --no-restore -warnaserror
- name: Run unit and integration tests
run: |
mkdir -p "$TEST_RESULTS_DIR"
dotnet test src/StellaOps.Feedser.sln \
--configuration $BUILD_CONFIGURATION \
--no-build \
--logger "trx;LogFileName=stellaops-feedser-tests.trx" \
--results-directory "$TEST_RESULTS_DIR"
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET ${{ env.DOTNET_VERSION }}
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Restore dependencies
run: dotnet restore src/StellaOps.Feedser.sln
- name: Build solution (warnings as errors)
run: dotnet build src/StellaOps.Feedser.sln --configuration $BUILD_CONFIGURATION --no-restore -warnaserror
- name: Run unit and integration tests
run: |
mkdir -p "$TEST_RESULTS_DIR"
dotnet test src/StellaOps.Feedser.sln \
--configuration $BUILD_CONFIGURATION \
--no-build \
--logger "trx;LogFileName=stellaops-feedser-tests.trx" \
--results-directory "$TEST_RESULTS_DIR"
- name: Publish Feedser web service
run: |
mkdir -p "$PUBLISH_DIR"
@@ -85,6 +86,36 @@ jobs:
if-no-files-found: error
retention-days: 7
- name: Restore Authority solution
run: dotnet restore src/StellaOps.Authority/StellaOps.Authority.sln
- name: Build Authority solution
run: dotnet build src/StellaOps.Authority/StellaOps.Authority.sln --configuration $BUILD_CONFIGURATION --no-restore -warnaserror
- name: Run Authority tests
run: |
dotnet test src/StellaOps.Configuration.Tests/StellaOps.Configuration.Tests.csproj \
--configuration $BUILD_CONFIGURATION \
--no-build \
--logger "trx;LogFileName=stellaops-authority-tests.trx" \
--results-directory "$TEST_RESULTS_DIR"
- name: Publish Authority web service
run: |
mkdir -p "$AUTHORITY_PUBLISH_DIR"
dotnet publish src/StellaOps.Authority/StellaOps.Authority/StellaOps.Authority.csproj \
--configuration $BUILD_CONFIGURATION \
--no-build \
--output "$AUTHORITY_PUBLISH_DIR"
- name: Upload Authority artifacts
uses: actions/upload-artifact@v4
with:
name: authority-publish
path: ${{ env.AUTHORITY_PUBLISH_DIR }}
if-no-files-found: error
retention-days: 7
- name: Upload test results
if: always()
uses: actions/upload-artifact@v4
@@ -94,204 +125,217 @@ jobs:
if-no-files-found: ignore
retention-days: 7
authority-container:
runs-on: ubuntu-22.04
needs: build-test
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Validate Authority compose file
run: docker compose -f ops/authority/docker-compose.authority.yaml config
- name: Build Authority container image
run: docker build -f ops/authority/Dockerfile -t stellaops-authority:ci .
docs:
runs-on: ubuntu-22.04
env:
DOCS_OUTPUT_DIR: ${{ github.workspace }}/artifacts/docs-site
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install documentation dependencies
run: |
python -m pip install --upgrade pip
python -m pip install markdown pygments
- name: Render documentation bundle
run: |
python scripts/render_docs.py --source docs --output "$DOCS_OUTPUT_DIR" --clean
- name: Upload documentation artifact
uses: actions/upload-artifact@v4
with:
name: feedser-docs-site
path: ${{ env.DOCS_OUTPUT_DIR }}
if-no-files-found: error
retention-days: 7
deploy:
runs-on: ubuntu-22.04
needs: [build-test, docs]
if: >-
needs.build-test.result == 'success' &&
needs.docs.result == 'success' &&
(
(github.event_name == 'push' && github.ref == 'refs/heads/main') ||
github.event_name == 'workflow_dispatch'
)
environment: staging
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
sparse-checkout: |
scripts
.gitea/workflows
sparse-checkout-cone-mode: true
- name: Check if deployment should proceed
id: check-deploy
run: |
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
if [ "${{ github.event.inputs.force_deploy }}" = "true" ]; then
echo "should-deploy=true" >> $GITHUB_OUTPUT
echo "✅ Manual deployment requested"
else
echo "should-deploy=false" >> $GITHUB_OUTPUT
echo " Manual dispatch without force_deploy=true — skipping"
fi
elif [ "${{ github.ref }}" = "refs/heads/main" ]; then
echo "should-deploy=true" >> $GITHUB_OUTPUT
echo "✅ Deploying latest main branch build"
else
echo "should-deploy=false" >> $GITHUB_OUTPUT
echo " Deployment restricted to main branch"
fi
- name: Resolve deployment credentials
id: params
if: steps.check-deploy.outputs.should-deploy == 'true'
run: |
missing=()
host="${{ secrets.STAGING_DEPLOYMENT_HOST }}"
if [ -z "$host" ]; then host="${{ vars.STAGING_DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then host="${{ secrets.DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then host="${{ vars.DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then missing+=("STAGING_DEPLOYMENT_HOST"); fi
user="${{ secrets.STAGING_DEPLOYMENT_USERNAME }}"
if [ -z "$user" ]; then user="${{ vars.STAGING_DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then user="${{ secrets.DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then user="${{ vars.DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then missing+=("STAGING_DEPLOYMENT_USERNAME"); fi
path="${{ secrets.STAGING_DEPLOYMENT_PATH }}"
if [ -z "$path" ]; then path="${{ vars.STAGING_DEPLOYMENT_PATH }}"; fi
docs_path="${{ secrets.STAGING_DOCS_PATH }}"
if [ -z "$docs_path" ]; then docs_path="${{ vars.STAGING_DOCS_PATH }}"; fi
key="${{ secrets.STAGING_DEPLOYMENT_KEY }}"
if [ -z "$key" ]; then key="${{ secrets.DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then key="${{ vars.STAGING_DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then key="${{ vars.DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then missing+=("STAGING_DEPLOYMENT_KEY"); fi
if [ ${#missing[@]} -gt 0 ]; then
echo "❌ Missing deployment configuration: ${missing[*]}"
exit 1
fi
key_file="$RUNNER_TEMP/staging_deploy_key"
printf '%s\n' "$key" > "$key_file"
chmod 600 "$key_file"
echo "host=$host" >> $GITHUB_OUTPUT
echo "user=$user" >> $GITHUB_OUTPUT
echo "path=$path" >> $GITHUB_OUTPUT
echo "docs-path=$docs_path" >> $GITHUB_OUTPUT
echo "key-file=$key_file" >> $GITHUB_OUTPUT
- name: Download service artifact
if: steps.check-deploy.outputs.should-deploy == 'true' && steps.params.outputs.path != ''
uses: actions/download-artifact@v4
with:
name: feedser-publish
path: artifacts/service
- name: Download documentation artifact
if: steps.check-deploy.outputs.should-deploy == 'true' && steps.params.outputs['docs-path'] != ''
uses: actions/download-artifact@v4
with:
name: feedser-docs-site
path: artifacts/docs
- name: Install rsync
if: steps.check-deploy.outputs.should-deploy == 'true'
run: |
if command -v rsync >/dev/null 2>&1; then
exit 0
fi
CACHE_DIR="${CI_CACHE_ROOT:-/tmp}/apt"
mkdir -p "$CACHE_DIR"
KEY="rsync-$(lsb_release -rs 2>/dev/null || echo unknown)"
DEB_DIR="$CACHE_DIR/$KEY"
mkdir -p "$DEB_DIR"
if ls "$DEB_DIR"/rsync*.deb >/dev/null 2>&1; then
apt-get update
apt-get install -y --no-install-recommends "$DEB_DIR"/libpopt0*.deb "$DEB_DIR"/rsync*.deb
else
apt-get update
apt-get download rsync libpopt0
mv rsync*.deb libpopt0*.deb "$DEB_DIR"/
dpkg -i "$DEB_DIR"/libpopt0*.deb "$DEB_DIR"/rsync*.deb || apt-get install -f -y
fi
- name: Deploy service bundle
if: steps.check-deploy.outputs.should-deploy == 'true' && steps.params.outputs.path != ''
env:
HOST: ${{ steps.params.outputs.host }}
USER: ${{ steps.params.outputs.user }}
TARGET: ${{ steps.params.outputs.path }}
KEY_FILE: ${{ steps.params.outputs['key-file'] }}
run: |
SERVICE_DIR="artifacts/service/feedser-publish"
if [ ! -d "$SERVICE_DIR" ]; then
echo "❌ Service artifact directory missing ($SERVICE_DIR)"
exit 1
fi
echo "🚀 Deploying Feedser web service to $HOST:$TARGET"
rsync -az --delete \
-e "ssh -i $KEY_FILE -o StrictHostKeyChecking=no" \
"$SERVICE_DIR"/ \
"$USER@$HOST:$TARGET/"
- name: Deploy documentation bundle
if: steps.check-deploy.outputs.should-deploy == 'true' && steps.params.outputs['docs-path'] != ''
env:
HOST: ${{ steps.params.outputs.host }}
USER: ${{ steps.params.outputs.user }}
DOCS_TARGET: ${{ steps.params.outputs['docs-path'] }}
KEY_FILE: ${{ steps.params.outputs['key-file'] }}
run: |
DOCS_DIR="artifacts/docs/feedser-docs-site"
if [ ! -d "$DOCS_DIR" ]; then
echo "❌ Documentation artifact directory missing ($DOCS_DIR)"
exit 1
fi
echo "📚 Deploying documentation bundle to $HOST:$DOCS_TARGET"
rsync -az --delete \
-e "ssh -i $KEY_FILE -o StrictHostKeyChecking=no" \
"$DOCS_DIR"/ \
"$USER@$HOST:$DOCS_TARGET/"
- name: Deployment summary
if: steps.check-deploy.outputs.should-deploy == 'true'
run: |
echo "✅ Deployment completed"
echo " Host: ${{ steps.params.outputs.host }}"
echo " Service path: ${{ steps.params.outputs.path || '(skipped)' }}"
echo " Docs path: ${{ steps.params.outputs['docs-path'] || '(skipped)' }}"
- name: Deployment skipped summary
if: steps.check-deploy.outputs.should-deploy != 'true'
run: |
echo " Deployment stage skipped"
echo " Event: ${{ github.event_name }}"
echo " Ref: ${{ github.ref }}"
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install documentation dependencies
run: |
python -m pip install --upgrade pip
python -m pip install markdown pygments
- name: Render documentation bundle
run: |
python scripts/render_docs.py --source docs --output "$DOCS_OUTPUT_DIR" --clean
- name: Upload documentation artifact
uses: actions/upload-artifact@v4
with:
name: feedser-docs-site
path: ${{ env.DOCS_OUTPUT_DIR }}
if-no-files-found: error
retention-days: 7
deploy:
runs-on: ubuntu-22.04
needs: [build-test, docs]
if: >-
needs.build-test.result == 'success' &&
needs.docs.result == 'success' &&
(
(github.event_name == 'push' && github.ref == 'refs/heads/main') ||
github.event_name == 'workflow_dispatch'
)
environment: staging
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
sparse-checkout: |
scripts
.gitea/workflows
sparse-checkout-cone-mode: true
- name: Check if deployment should proceed
id: check-deploy
run: |
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
if [ "${{ github.event.inputs.force_deploy }}" = "true" ]; then
echo "should-deploy=true" >> $GITHUB_OUTPUT
echo "✅ Manual deployment requested"
else
echo "should-deploy=false" >> $GITHUB_OUTPUT
echo " Manual dispatch without force_deploy=true — skipping"
fi
elif [ "${{ github.ref }}" = "refs/heads/main" ]; then
echo "should-deploy=true" >> $GITHUB_OUTPUT
echo "✅ Deploying latest main branch build"
else
echo "should-deploy=false" >> $GITHUB_OUTPUT
echo " Deployment restricted to main branch"
fi
- name: Resolve deployment credentials
id: params
if: steps.check-deploy.outputs.should-deploy == 'true'
run: |
missing=()
host="${{ secrets.STAGING_DEPLOYMENT_HOST }}"
if [ -z "$host" ]; then host="${{ vars.STAGING_DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then host="${{ secrets.DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then host="${{ vars.DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then missing+=("STAGING_DEPLOYMENT_HOST"); fi
user="${{ secrets.STAGING_DEPLOYMENT_USERNAME }}"
if [ -z "$user" ]; then user="${{ vars.STAGING_DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then user="${{ secrets.DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then user="${{ vars.DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then missing+=("STAGING_DEPLOYMENT_USERNAME"); fi
path="${{ secrets.STAGING_DEPLOYMENT_PATH }}"
if [ -z "$path" ]; then path="${{ vars.STAGING_DEPLOYMENT_PATH }}"; fi
docs_path="${{ secrets.STAGING_DOCS_PATH }}"
if [ -z "$docs_path" ]; then docs_path="${{ vars.STAGING_DOCS_PATH }}"; fi
key="${{ secrets.STAGING_DEPLOYMENT_KEY }}"
if [ -z "$key" ]; then key="${{ secrets.DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then key="${{ vars.STAGING_DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then key="${{ vars.DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then missing+=("STAGING_DEPLOYMENT_KEY"); fi
if [ ${#missing[@]} -gt 0 ]; then
echo "❌ Missing deployment configuration: ${missing[*]}"
exit 1
fi
key_file="$RUNNER_TEMP/staging_deploy_key"
printf '%s\n' "$key" > "$key_file"
chmod 600 "$key_file"
echo "host=$host" >> $GITHUB_OUTPUT
echo "user=$user" >> $GITHUB_OUTPUT
echo "path=$path" >> $GITHUB_OUTPUT
echo "docs-path=$docs_path" >> $GITHUB_OUTPUT
echo "key-file=$key_file" >> $GITHUB_OUTPUT
- name: Download service artifact
if: steps.check-deploy.outputs.should-deploy == 'true' && steps.params.outputs.path != ''
uses: actions/download-artifact@v4
with:
name: feedser-publish
path: artifacts/service
- name: Download documentation artifact
if: steps.check-deploy.outputs.should-deploy == 'true' && steps.params.outputs['docs-path'] != ''
uses: actions/download-artifact@v4
with:
name: feedser-docs-site
path: artifacts/docs
- name: Install rsync
if: steps.check-deploy.outputs.should-deploy == 'true'
run: |
if command -v rsync >/dev/null 2>&1; then
exit 0
fi
CACHE_DIR="${CI_CACHE_ROOT:-/tmp}/apt"
mkdir -p "$CACHE_DIR"
KEY="rsync-$(lsb_release -rs 2>/dev/null || echo unknown)"
DEB_DIR="$CACHE_DIR/$KEY"
mkdir -p "$DEB_DIR"
if ls "$DEB_DIR"/rsync*.deb >/dev/null 2>&1; then
apt-get update
apt-get install -y --no-install-recommends "$DEB_DIR"/libpopt0*.deb "$DEB_DIR"/rsync*.deb
else
apt-get update
apt-get download rsync libpopt0
mv rsync*.deb libpopt0*.deb "$DEB_DIR"/
dpkg -i "$DEB_DIR"/libpopt0*.deb "$DEB_DIR"/rsync*.deb || apt-get install -f -y
fi
- name: Deploy service bundle
if: steps.check-deploy.outputs.should-deploy == 'true' && steps.params.outputs.path != ''
env:
HOST: ${{ steps.params.outputs.host }}
USER: ${{ steps.params.outputs.user }}
TARGET: ${{ steps.params.outputs.path }}
KEY_FILE: ${{ steps.params.outputs['key-file'] }}
run: |
SERVICE_DIR="artifacts/service/feedser-publish"
if [ ! -d "$SERVICE_DIR" ]; then
echo "❌ Service artifact directory missing ($SERVICE_DIR)"
exit 1
fi
echo "🚀 Deploying Feedser web service to $HOST:$TARGET"
rsync -az --delete \
-e "ssh -i $KEY_FILE -o StrictHostKeyChecking=no" \
"$SERVICE_DIR"/ \
"$USER@$HOST:$TARGET/"
- name: Deploy documentation bundle
if: steps.check-deploy.outputs.should-deploy == 'true' && steps.params.outputs['docs-path'] != ''
env:
HOST: ${{ steps.params.outputs.host }}
USER: ${{ steps.params.outputs.user }}
DOCS_TARGET: ${{ steps.params.outputs['docs-path'] }}
KEY_FILE: ${{ steps.params.outputs['key-file'] }}
run: |
DOCS_DIR="artifacts/docs/feedser-docs-site"
if [ ! -d "$DOCS_DIR" ]; then
echo "❌ Documentation artifact directory missing ($DOCS_DIR)"
exit 1
fi
echo "📚 Deploying documentation bundle to $HOST:$DOCS_TARGET"
rsync -az --delete \
-e "ssh -i $KEY_FILE -o StrictHostKeyChecking=no" \
"$DOCS_DIR"/ \
"$USER@$HOST:$DOCS_TARGET/"
- name: Deployment summary
if: steps.check-deploy.outputs.should-deploy == 'true'
run: |
echo "✅ Deployment completed"
echo " Host: ${{ steps.params.outputs.host }}"
echo " Service path: ${{ steps.params.outputs.path || '(skipped)' }}"
echo " Docs path: ${{ steps.params.outputs['docs-path'] || '(skipped)' }}"
- name: Deployment skipped summary
if: steps.check-deploy.outputs.should-deploy != 'true'
run: |
echo " Deployment stage skipped"
echo " Event: ${{ github.event_name }}"
echo " Ref: ${{ github.ref }}"

View File

@@ -1,70 +1,70 @@
# .gitea/workflows/docs.yml
# Documentation quality checks and preview artefacts
name: Docs CI
on:
push:
paths:
- 'docs/**'
- 'scripts/render_docs.py'
- '.gitea/workflows/docs.yml'
pull_request:
paths:
- 'docs/**'
- 'scripts/render_docs.py'
- '.gitea/workflows/docs.yml'
workflow_dispatch: {}
env:
NODE_VERSION: '20'
PYTHON_VERSION: '3.11'
jobs:
lint-and-preview:
runs-on: ubuntu-22.04
env:
DOCS_OUTPUT_DIR: ${{ github.workspace }}/artifacts/docs-preview
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
- name: Install markdown linters
run: |
npm install markdown-link-check remark-cli remark-preset-lint-recommended
- name: Link check
run: |
find docs -name '*.md' -print0 | \
xargs -0 -n1 -I{} npx markdown-link-check --quiet '{}'
- name: Remark lint
run: |
npx remark docs -qf
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install documentation dependencies
run: |
python -m pip install --upgrade pip
python -m pip install markdown pygments
- name: Render documentation preview bundle
run: |
python scripts/render_docs.py --source docs --output "$DOCS_OUTPUT_DIR" --clean
- name: Upload documentation preview
if: always()
uses: actions/upload-artifact@v4
with:
name: feedser-docs-preview
path: ${{ env.DOCS_OUTPUT_DIR }}
retention-days: 7
# .gitea/workflows/docs.yml
# Documentation quality checks and preview artefacts
name: Docs CI
on:
push:
paths:
- 'docs/**'
- 'scripts/render_docs.py'
- '.gitea/workflows/docs.yml'
pull_request:
paths:
- 'docs/**'
- 'scripts/render_docs.py'
- '.gitea/workflows/docs.yml'
workflow_dispatch: {}
env:
NODE_VERSION: '20'
PYTHON_VERSION: '3.11'
jobs:
lint-and-preview:
runs-on: ubuntu-22.04
env:
DOCS_OUTPUT_DIR: ${{ github.workspace }}/artifacts/docs-preview
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
- name: Install markdown linters
run: |
npm install markdown-link-check remark-cli remark-preset-lint-recommended
- name: Link check
run: |
find docs -name '*.md' -print0 | \
xargs -0 -n1 -I{} npx markdown-link-check --quiet '{}'
- name: Remark lint
run: |
npx remark docs -qf
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install documentation dependencies
run: |
python -m pip install --upgrade pip
python -m pip install markdown pygments
- name: Render documentation preview bundle
run: |
python scripts/render_docs.py --source docs --output "$DOCS_OUTPUT_DIR" --clean
- name: Upload documentation preview
if: always()
uses: actions/upload-artifact@v4
with:
name: feedser-docs-preview
path: ${{ env.DOCS_OUTPUT_DIR }}
retention-days: 7

View File

@@ -1,206 +1,206 @@
# .gitea/workflows/promote.yml
# Manual promotion workflow to copy staged artefacts to production
name: Promote Feedser (Manual)
on:
workflow_dispatch:
inputs:
include_docs:
description: 'Also promote the generated documentation bundle'
required: false
default: 'true'
type: boolean
tag:
description: 'Optional build identifier to record in the summary'
required: false
default: 'latest'
type: string
jobs:
promote:
runs-on: ubuntu-22.04
environment: production
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Resolve staging credentials
id: staging
run: |
missing=()
host="${{ secrets.STAGING_DEPLOYMENT_HOST }}"
if [ -z "$host" ]; then host="${{ vars.STAGING_DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then host="${{ secrets.DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then host="${{ vars.DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then missing+=("STAGING_DEPLOYMENT_HOST"); fi
user="${{ secrets.STAGING_DEPLOYMENT_USERNAME }}"
if [ -z "$user" ]; then user="${{ vars.STAGING_DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then user="${{ secrets.DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then user="${{ vars.DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then missing+=("STAGING_DEPLOYMENT_USERNAME"); fi
path="${{ secrets.STAGING_DEPLOYMENT_PATH }}"
if [ -z "$path" ]; then path="${{ vars.STAGING_DEPLOYMENT_PATH }}"; fi
if [ -z "$path" ]; then missing+=("STAGING_DEPLOYMENT_PATH")
fi
docs_path="${{ secrets.STAGING_DOCS_PATH }}"
if [ -z "$docs_path" ]; then docs_path="${{ vars.STAGING_DOCS_PATH }}"; fi
key="${{ secrets.STAGING_DEPLOYMENT_KEY }}"
if [ -z "$key" ]; then key="${{ secrets.DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then key="${{ vars.STAGING_DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then key="${{ vars.DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then missing+=("STAGING_DEPLOYMENT_KEY"); fi
if [ ${#missing[@]} -gt 0 ]; then
echo "❌ Missing staging configuration: ${missing[*]}"
exit 1
fi
key_file="$RUNNER_TEMP/staging_key"
printf '%s\n' "$key" > "$key_file"
chmod 600 "$key_file"
echo "host=$host" >> $GITHUB_OUTPUT
echo "user=$user" >> $GITHUB_OUTPUT
echo "path=$path" >> $GITHUB_OUTPUT
echo "docs-path=$docs_path" >> $GITHUB_OUTPUT
echo "key-file=$key_file" >> $GITHUB_OUTPUT
- name: Resolve production credentials
id: production
run: |
missing=()
host="${{ secrets.PRODUCTION_DEPLOYMENT_HOST }}"
if [ -z "$host" ]; then host="${{ vars.PRODUCTION_DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then host="${{ secrets.DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then host="${{ vars.DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then missing+=("PRODUCTION_DEPLOYMENT_HOST"); fi
user="${{ secrets.PRODUCTION_DEPLOYMENT_USERNAME }}"
if [ -z "$user" ]; then user="${{ vars.PRODUCTION_DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then user="${{ secrets.DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then user="${{ vars.DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then missing+=("PRODUCTION_DEPLOYMENT_USERNAME"); fi
path="${{ secrets.PRODUCTION_DEPLOYMENT_PATH }}"
if [ -z "$path" ]; then path="${{ vars.PRODUCTION_DEPLOYMENT_PATH }}"; fi
if [ -z "$path" ]; then missing+=("PRODUCTION_DEPLOYMENT_PATH")
fi
docs_path="${{ secrets.PRODUCTION_DOCS_PATH }}"
if [ -z "$docs_path" ]; then docs_path="${{ vars.PRODUCTION_DOCS_PATH }}"; fi
key="${{ secrets.PRODUCTION_DEPLOYMENT_KEY }}"
if [ -z "$key" ]; then key="${{ secrets.DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then key="${{ vars.PRODUCTION_DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then key="${{ vars.DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then missing+=("PRODUCTION_DEPLOYMENT_KEY"); fi
if [ ${#missing[@]} -gt 0 ]; then
echo "❌ Missing production configuration: ${missing[*]}"
exit 1
fi
key_file="$RUNNER_TEMP/production_key"
printf '%s\n' "$key" > "$key_file"
chmod 600 "$key_file"
echo "host=$host" >> $GITHUB_OUTPUT
echo "user=$user" >> $GITHUB_OUTPUT
echo "path=$path" >> $GITHUB_OUTPUT
echo "docs-path=$docs_path" >> $GITHUB_OUTPUT
echo "key-file=$key_file" >> $GITHUB_OUTPUT
- name: Install rsync
run: |
if command -v rsync >/dev/null 2>&1; then
exit 0
fi
CACHE_DIR="${CI_CACHE_ROOT:-/tmp}/apt"
mkdir -p "$CACHE_DIR"
KEY="rsync-$(lsb_release -rs 2>/dev/null || echo unknown)"
DEB_DIR="$CACHE_DIR/$KEY"
mkdir -p "$DEB_DIR"
if ls "$DEB_DIR"/rsync*.deb >/dev/null 2>&1; then
apt-get update
apt-get install -y --no-install-recommends "$DEB_DIR"/libpopt0*.deb "$DEB_DIR"/rsync*.deb
else
apt-get update
apt-get download rsync libpopt0
mv rsync*.deb libpopt0*.deb "$DEB_DIR"/
dpkg -i "$DEB_DIR"/libpopt0*.deb "$DEB_DIR"/rsync*.deb || apt-get install -f -y
fi
- name: Fetch staging artefacts
id: fetch
run: |
staging_root="${{ runner.temp }}/staging"
mkdir -p "$staging_root/service" "$staging_root/docs"
echo "📥 Copying service bundle from staging"
rsync -az --delete \
-e "ssh -i ${{ steps.staging.outputs['key-file'] }} -o StrictHostKeyChecking=no" \
"${{ steps.staging.outputs.user }}@${{ steps.staging.outputs.host }}:${{ steps.staging.outputs.path }}/" \
"$staging_root/service/"
if [ "${{ github.event.inputs.include_docs }}" = "true" ] && [ -n "${{ steps.staging.outputs['docs-path'] }}" ]; then
echo "📥 Copying documentation bundle from staging"
rsync -az --delete \
-e "ssh -i ${{ steps.staging.outputs['key-file'] }} -o StrictHostKeyChecking=no" \
"${{ steps.staging.outputs.user }}@${{ steps.staging.outputs.host }}:${{ steps.staging.outputs['docs-path'] }}/" \
"$staging_root/docs/"
else
echo " Documentation promotion skipped"
fi
echo "service-dir=$staging_root/service" >> $GITHUB_OUTPUT
echo "docs-dir=$staging_root/docs" >> $GITHUB_OUTPUT
- name: Backup production service content
run: |
ssh -o StrictHostKeyChecking=no -i "${{ steps.production.outputs['key-file'] }}" \
"${{ steps.production.outputs.user }}@${{ steps.production.outputs.host }}" \
"set -e; TARGET='${{ steps.production.outputs.path }}'; \
if [ -d \"$TARGET\" ]; then \
parent=\$(dirname \"$TARGET\"); \
base=\$(basename \"$TARGET\"); \
backup=\"\$parent/\${base}.backup.\$(date +%Y%m%d_%H%M%S)\"; \
mkdir -p \"\$backup\"; \
rsync -a --delete \"$TARGET/\" \"\$backup/\"; \
ls -dt \"\$parent/\${base}.backup.*\" 2>/dev/null | tail -n +6 | xargs rm -rf || true; \
echo 'Backup created at ' \"\$backup\"; \
else \
echo 'Production service path missing; skipping backup'; \
fi"
- name: Publish service to production
run: |
rsync -az --delete \
-e "ssh -i ${{ steps.production.outputs['key-file'] }} -o StrictHostKeyChecking=no" \
"${{ steps.fetch.outputs['service-dir'] }}/" \
"${{ steps.production.outputs.user }}@${{ steps.production.outputs.host }}:${{ steps.production.outputs.path }}/"
- name: Promote documentation bundle
if: github.event.inputs.include_docs == 'true' && steps.production.outputs['docs-path'] != ''
run: |
rsync -az --delete \
-e "ssh -i ${{ steps.production.outputs['key-file'] }} -o StrictHostKeyChecking=no" \
"${{ steps.fetch.outputs['docs-dir'] }}/" \
"${{ steps.production.outputs.user }}@${{ steps.production.outputs.host }}:${{ steps.production.outputs['docs-path'] }}/"
- name: Promotion summary
run: |
echo "✅ Promotion completed"
echo " Tag: ${{ github.event.inputs.tag }}"
echo " Service: ${{ steps.staging.outputs.host }} → ${{ steps.production.outputs.host }}"
if [ "${{ github.event.inputs.include_docs }}" = "true" ]; then
echo " Docs: included"
else
echo " Docs: skipped"
fi
# .gitea/workflows/promote.yml
# Manual promotion workflow to copy staged artefacts to production
name: Promote Feedser (Manual)
on:
workflow_dispatch:
inputs:
include_docs:
description: 'Also promote the generated documentation bundle'
required: false
default: 'true'
type: boolean
tag:
description: 'Optional build identifier to record in the summary'
required: false
default: 'latest'
type: string
jobs:
promote:
runs-on: ubuntu-22.04
environment: production
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Resolve staging credentials
id: staging
run: |
missing=()
host="${{ secrets.STAGING_DEPLOYMENT_HOST }}"
if [ -z "$host" ]; then host="${{ vars.STAGING_DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then host="${{ secrets.DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then host="${{ vars.DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then missing+=("STAGING_DEPLOYMENT_HOST"); fi
user="${{ secrets.STAGING_DEPLOYMENT_USERNAME }}"
if [ -z "$user" ]; then user="${{ vars.STAGING_DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then user="${{ secrets.DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then user="${{ vars.DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then missing+=("STAGING_DEPLOYMENT_USERNAME"); fi
path="${{ secrets.STAGING_DEPLOYMENT_PATH }}"
if [ -z "$path" ]; then path="${{ vars.STAGING_DEPLOYMENT_PATH }}"; fi
if [ -z "$path" ]; then missing+=("STAGING_DEPLOYMENT_PATH")
fi
docs_path="${{ secrets.STAGING_DOCS_PATH }}"
if [ -z "$docs_path" ]; then docs_path="${{ vars.STAGING_DOCS_PATH }}"; fi
key="${{ secrets.STAGING_DEPLOYMENT_KEY }}"
if [ -z "$key" ]; then key="${{ secrets.DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then key="${{ vars.STAGING_DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then key="${{ vars.DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then missing+=("STAGING_DEPLOYMENT_KEY"); fi
if [ ${#missing[@]} -gt 0 ]; then
echo "❌ Missing staging configuration: ${missing[*]}"
exit 1
fi
key_file="$RUNNER_TEMP/staging_key"
printf '%s\n' "$key" > "$key_file"
chmod 600 "$key_file"
echo "host=$host" >> $GITHUB_OUTPUT
echo "user=$user" >> $GITHUB_OUTPUT
echo "path=$path" >> $GITHUB_OUTPUT
echo "docs-path=$docs_path" >> $GITHUB_OUTPUT
echo "key-file=$key_file" >> $GITHUB_OUTPUT
- name: Resolve production credentials
id: production
run: |
missing=()
host="${{ secrets.PRODUCTION_DEPLOYMENT_HOST }}"
if [ -z "$host" ]; then host="${{ vars.PRODUCTION_DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then host="${{ secrets.DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then host="${{ vars.DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then missing+=("PRODUCTION_DEPLOYMENT_HOST"); fi
user="${{ secrets.PRODUCTION_DEPLOYMENT_USERNAME }}"
if [ -z "$user" ]; then user="${{ vars.PRODUCTION_DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then user="${{ secrets.DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then user="${{ vars.DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then missing+=("PRODUCTION_DEPLOYMENT_USERNAME"); fi
path="${{ secrets.PRODUCTION_DEPLOYMENT_PATH }}"
if [ -z "$path" ]; then path="${{ vars.PRODUCTION_DEPLOYMENT_PATH }}"; fi
if [ -z "$path" ]; then missing+=("PRODUCTION_DEPLOYMENT_PATH")
fi
docs_path="${{ secrets.PRODUCTION_DOCS_PATH }}"
if [ -z "$docs_path" ]; then docs_path="${{ vars.PRODUCTION_DOCS_PATH }}"; fi
key="${{ secrets.PRODUCTION_DEPLOYMENT_KEY }}"
if [ -z "$key" ]; then key="${{ secrets.DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then key="${{ vars.PRODUCTION_DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then key="${{ vars.DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then missing+=("PRODUCTION_DEPLOYMENT_KEY"); fi
if [ ${#missing[@]} -gt 0 ]; then
echo "❌ Missing production configuration: ${missing[*]}"
exit 1
fi
key_file="$RUNNER_TEMP/production_key"
printf '%s\n' "$key" > "$key_file"
chmod 600 "$key_file"
echo "host=$host" >> $GITHUB_OUTPUT
echo "user=$user" >> $GITHUB_OUTPUT
echo "path=$path" >> $GITHUB_OUTPUT
echo "docs-path=$docs_path" >> $GITHUB_OUTPUT
echo "key-file=$key_file" >> $GITHUB_OUTPUT
- name: Install rsync
run: |
if command -v rsync >/dev/null 2>&1; then
exit 0
fi
CACHE_DIR="${CI_CACHE_ROOT:-/tmp}/apt"
mkdir -p "$CACHE_DIR"
KEY="rsync-$(lsb_release -rs 2>/dev/null || echo unknown)"
DEB_DIR="$CACHE_DIR/$KEY"
mkdir -p "$DEB_DIR"
if ls "$DEB_DIR"/rsync*.deb >/dev/null 2>&1; then
apt-get update
apt-get install -y --no-install-recommends "$DEB_DIR"/libpopt0*.deb "$DEB_DIR"/rsync*.deb
else
apt-get update
apt-get download rsync libpopt0
mv rsync*.deb libpopt0*.deb "$DEB_DIR"/
dpkg -i "$DEB_DIR"/libpopt0*.deb "$DEB_DIR"/rsync*.deb || apt-get install -f -y
fi
- name: Fetch staging artefacts
id: fetch
run: |
staging_root="${{ runner.temp }}/staging"
mkdir -p "$staging_root/service" "$staging_root/docs"
echo "📥 Copying service bundle from staging"
rsync -az --delete \
-e "ssh -i ${{ steps.staging.outputs['key-file'] }} -o StrictHostKeyChecking=no" \
"${{ steps.staging.outputs.user }}@${{ steps.staging.outputs.host }}:${{ steps.staging.outputs.path }}/" \
"$staging_root/service/"
if [ "${{ github.event.inputs.include_docs }}" = "true" ] && [ -n "${{ steps.staging.outputs['docs-path'] }}" ]; then
echo "📥 Copying documentation bundle from staging"
rsync -az --delete \
-e "ssh -i ${{ steps.staging.outputs['key-file'] }} -o StrictHostKeyChecking=no" \
"${{ steps.staging.outputs.user }}@${{ steps.staging.outputs.host }}:${{ steps.staging.outputs['docs-path'] }}/" \
"$staging_root/docs/"
else
echo " Documentation promotion skipped"
fi
echo "service-dir=$staging_root/service" >> $GITHUB_OUTPUT
echo "docs-dir=$staging_root/docs" >> $GITHUB_OUTPUT
- name: Backup production service content
run: |
ssh -o StrictHostKeyChecking=no -i "${{ steps.production.outputs['key-file'] }}" \
"${{ steps.production.outputs.user }}@${{ steps.production.outputs.host }}" \
"set -e; TARGET='${{ steps.production.outputs.path }}'; \
if [ -d \"$TARGET\" ]; then \
parent=\$(dirname \"$TARGET\"); \
base=\$(basename \"$TARGET\"); \
backup=\"\$parent/\${base}.backup.\$(date +%Y%m%d_%H%M%S)\"; \
mkdir -p \"\$backup\"; \
rsync -a --delete \"$TARGET/\" \"\$backup/\"; \
ls -dt \"\$parent/\${base}.backup.*\" 2>/dev/null | tail -n +6 | xargs rm -rf || true; \
echo 'Backup created at ' \"\$backup\"; \
else \
echo 'Production service path missing; skipping backup'; \
fi"
- name: Publish service to production
run: |
rsync -az --delete \
-e "ssh -i ${{ steps.production.outputs['key-file'] }} -o StrictHostKeyChecking=no" \
"${{ steps.fetch.outputs['service-dir'] }}/" \
"${{ steps.production.outputs.user }}@${{ steps.production.outputs.host }}:${{ steps.production.outputs.path }}/"
- name: Promote documentation bundle
if: github.event.inputs.include_docs == 'true' && steps.production.outputs['docs-path'] != ''
run: |
rsync -az --delete \
-e "ssh -i ${{ steps.production.outputs['key-file'] }} -o StrictHostKeyChecking=no" \
"${{ steps.fetch.outputs['docs-dir'] }}/" \
"${{ steps.production.outputs.user }}@${{ steps.production.outputs.host }}:${{ steps.production.outputs['docs-path'] }}/"
- name: Promotion summary
run: |
echo "✅ Promotion completed"
echo " Tag: ${{ github.event.inputs.tag }}"
echo " Service: ${{ steps.staging.outputs.host }} → ${{ steps.production.outputs.host }}"
if [ "${{ github.event.inputs.include_docs }}" = "true" ]; then
echo " Docs: included"
else
echo " Docs: skipped"
fi

40
.gitignore vendored
View File

@@ -1,21 +1,21 @@
# Build outputs
bin/
obj/
*.pdb
*.dll
# IDE state
.vs/
*.user
*.suo
*.userprefs
# Rider/VSCode
.idea/
.vscode/
# Packages and logs
*.log
TestResults/
# Build outputs
bin/
obj/
*.pdb
*.dll
# IDE state
.vs/
*.user
*.suo
*.userprefs
# Rider/VSCode
.idea/
.vscode/
# Packages and logs
*.log
TestResults/
.dotnet

250
AGENTS.md
View File

@@ -1,125 +1,125 @@
# 1) What is StellaOps?
**StellaOps** an open, sovereign, modular container-security toolkit built for high-speed, offline operation, released under AGPL-3.0-or-later.
It follows an SBOM-first model—analyzing each container layer or ingesting existing CycloneDX/SPDX SBOMs, then enriching them with vulnerability, licence, secret-leak, and misconfiguration data to produce cryptographically signed reports.
Vulnerability detection maps OS and language dependencies to sources such as NVD, GHSA, OSV, ENISA.
Secrets sweep flags exposed credentials or keys in files or environment variables.
Licence audit identifies potential conflicts, especially copyleft obligations.
Misconfiguration checks detect unsafe Dockerfile patterns (root user, latest tags, permissive modes).
Provenance features include in-toto/SLSA attestations signed with cosign for supply-chain trust.
| Guiding principle | What it means for Feedser |
|-------------------|---------------------------|
| **SBOM-first ingest** | Prefer signed SBOMs or reproducible layer diffs before falling back to raw scraping; connectors treat source docs as provenance, never as mutable truth. |
| **Deterministic outputs** | Same inputs yield identical canonical advisories and exported JSON/Trivy DB artefacts; merge hashes and export manifests are reproducible across machines. |
| **Restart-time plug-ins only** | Connector/exporter plug-ins load at service start, keeping runtime sandboxing simple and avoiding hot-patch attack surface. |
| **Sovereign/offline-first** | No mandatory outbound calls beyond allow-listed advisories; Offline Kit bundles Mongo snapshots and exporter artefacts for air-gapped installs. |
| **Operational transparency** | Every stage logs structured events (fetch, parse, merge, export) with correlation IDs so parallel agents can debug without shared state. |
Performance: warm scans < 5 s, cold scans < 30 s on a 4 vCPU runner.
Deployment: entirely SaaS-free, suitable for air-gapped or on-prem use through its Offline Kit.
Policy: anonymous users 33 scans/day; verified 333 /day; nearing 90 % quota triggers throttling but never full blocks.
More documention is available ./docs/*.md files. Read `docs/README.md` to gather information about the available documentation. You could inquiry specific documents as your work requires it
---
# 3) Practices
## 3.1) Naming
All modules are .NET projects based on .NET 10 (preview). Exclussion is the UI. It is based on Angular
All modules are contained by one or more projects. Each project goes in its dedicated folder. Each project starts with StellaOps.<ModuleName>. In case it is common for for all StellaOps modules it is library or plugin and it is named StellaOps.<LibraryOrPlugin>.
## 3.2) Key technologies & integrations
- **Runtime**: .NET 10 (`net10.0`) preview SDK; C# latest preview features.
- **Data**: MongoDB (canonical store and job/export state).
- **Observability**: structured logs, counters, and (optional) OpenTelemetry traces.
- **Ops posture**: offlinefirst, allowlist for remote hosts, strict schema validation, gated LLM fallback (only where explicitly configured).
# 4) Modules
StellaOps is contained by different modules installable via docker containers
- Feedser. Responsible for aggregation and delivery of vulnerability database
- Cli. Command line tool to unlock full potential - request database operations, install scanner, request scan, configure backend
- Backend. Configures and Manages scans
- UI. UI to access the backend (and scanners)
- Agent. Installable daemon that does the scanning
- Zastava. Realtime monitor for allowed (verified) installations.
## 4.1) Feedser
It is webservice based module that is responsible for aggregating vulnerabilities information from various sources, parsing and normalizing them into a canonical shape, merging and deduplicating the results in one place, with export capabilities to Json and TrivyDb. It supports init and resume for all of the sources, parse/normalize and merge/deduplication operations, plus export. Export supports delta exports—similarly to full and incremential database backups.
### 4.1.1) Usage
It supports operations to be started by cmd line:
# stella db [fetch|merge|export] [init|resume <point>]
or
api available on https://db.stella-ops.org
### 4.1.2) Data flow (endtoend)
1. **Fetch**: connectors request source windows with retries/backoff, persist raw documents with SHA256/ETag metadata.
2. **Parse & Normalize**: validate to DTOs (schema-checked), quarantine failures, normalize to canonical advisories (aliases, affected ranges with NEVRA/EVR/SemVer, references, provenance).
3. **Merge & Deduplicate**: enforce precedence, build/maintain alias graphs, compute deterministic hashes, and eliminate duplicates before persisting to MongoDB.
4. **Export**: JSON tree and/or Trivy DB; package and (optionally) push; write export state.
### 4.1.3) Architecture
For more information of the architecture see `./docs/ARCHITECTURE_FEEDSER.md`.
---
### 4.1.4) Glossary (quick)
- **OVAL** — Vendor/distro security definition format; authoritative for OS packages.
- **NEVRA / EVR** — RPM and Debian version semantics for OS packages.
- **PURL / SemVer** — Coordinates and version semantics for OSS ecosystems.
- **KEV** — Known Exploited Vulnerabilities (flag only).
---
# 5) Your role as StellaOps contributor
You acting as information technology engineer that will take different type of roles in goal achieving StellaOps production implementation
In order you to work - you have to be supplied with directory that contains `AGENTS.md`,`TASKS.md` files. There will you have more information about the role you have, the scope of your work and the tasks you will have.
Boundaries:
- You operate only in the working directories I gave you, unless there is dependencies that makes you to work on dependency in shared directory. Then you ask for confirmation.
You main characteristics:
- Keep endpoints small, deterministic, and cancellation-aware.
- Improve logs/metrics as per tasks.
- Update `TASKS.md` when moving tasks forward.
- When you are done with all task you state explicitly you are done.
- Impersonate the role described on working directory `AGENTS.md` you will read, if role is not available - take role of the CTO of the StellaOps in early stages.
- You always strive for best practices
- You always strive for re-usability
- When in doubt of design decision - you ask then act
- You are autonomus - meaning that you will work for long time alone and achieve maximum without stopping for stupid questions
- You operate on the same directory where other agents will work. In case you need to work on directory that is dependency on provided `AGENTS.md`,`TASKS.md` files you have to ask for confirmation first.
## 5.1) Type of contributions
- **BEBase (Platform & Pipeline)**
Owns DI, plugin host, job scheduler/coordinator, configuration binding, minimal API endpoints, and Mongo bootstrapping.
- **BEConnX (Connectors)**
One agent per source family (NVD, Red Hat, Ubuntu, Debian, SUSE, GHSA, OSV, PSIRTs, CERTs, KEV, ICS). Implements fetch/parse/map with incremental watermarks.
- **BEMerge (Canonical Merge & Dedupe)**
Identity graph, precedence policies, canonical JSON serializer, and deterministic hashing (`merge_event`).
- **BEExport (JSON & Trivy DB)**
Deterministic export trees, Trivy DB packaging, optional ORAS push, and offline bundle.
- **QA (Validation & Observability)**
Schema tests, fixture goldens, determinism checks, metrics/logs/traces, e2e reproducibility runs.
- **DevEx/Docs**
Maintains this agent framework, templates, and perdirectory guides; assists parallelization and reviews.
## 5.2) Work-in-parallel rules (important)
- **Directory ownership**: Each agent works **only inside its module directory**. Crossmodule edits require a brief handshake in issues/PR description.
- **Scoping**: Use each modules `AGENTS.md` and `TASKS.md` to plan; autonomous agents must read `src/AGENTS.md` and the module docs before acting.
- **Determinism**: Sort keys, normalize timestamps to UTC ISO8601, avoid nondeterministic data in exports and tests.
- **Status tracking**: Update your modules `TASKS.md` as you progress (TODO → DOING → DONE/BLOCKED).
- **Tests**: Add/extend fixtures and unit tests per change; never regress determinism or precedence.
- **Test layout**: Use module-specific projects in `StellaOps.Feedser.<Component>.Tests`; shared fixtures/harnesses live in `StellaOps.Feedser.Testing`.
---
# 1) What is StellaOps?
**StellaOps** an open, sovereign, modular container-security toolkit built for high-speed, offline operation, released under AGPL-3.0-or-later.
It follows an SBOM-first model—analyzing each container layer or ingesting existing CycloneDX/SPDX SBOMs, then enriching them with vulnerability, licence, secret-leak, and misconfiguration data to produce cryptographically signed reports.
Vulnerability detection maps OS and language dependencies to sources such as NVD, GHSA, OSV, ENISA.
Secrets sweep flags exposed credentials or keys in files or environment variables.
Licence audit identifies potential conflicts, especially copyleft obligations.
Misconfiguration checks detect unsafe Dockerfile patterns (root user, latest tags, permissive modes).
Provenance features include in-toto/SLSA attestations signed with cosign for supply-chain trust.
| Guiding principle | What it means for Feedser |
|-------------------|---------------------------|
| **SBOM-first ingest** | Prefer signed SBOMs or reproducible layer diffs before falling back to raw scraping; connectors treat source docs as provenance, never as mutable truth. |
| **Deterministic outputs** | Same inputs yield identical canonical advisories and exported JSON/Trivy DB artefacts; merge hashes and export manifests are reproducible across machines. |
| **Restart-time plug-ins only** | Connector/exporter plug-ins load at service start, keeping runtime sandboxing simple and avoiding hot-patch attack surface. |
| **Sovereign/offline-first** | No mandatory outbound calls beyond allow-listed advisories; Offline Kit bundles Mongo snapshots and exporter artefacts for air-gapped installs. |
| **Operational transparency** | Every stage logs structured events (fetch, parse, merge, export) with correlation IDs so parallel agents can debug without shared state. |
Performance: warm scans < 5 s, cold scans < 30 s on a 4 vCPU runner.
Deployment: entirely SaaS-free, suitable for air-gapped or on-prem use through its Offline Kit.
Policy: anonymous users 33 scans/day; verified 333 /day; nearing 90 % quota triggers throttling but never full blocks.
More documention is available ./docs/*.md files. Read `docs/README.md` to gather information about the available documentation. You could inquiry specific documents as your work requires it
---
# 3) Practices
## 3.1) Naming
All modules are .NET projects based on .NET 10 (preview). Exclussion is the UI. It is based on Angular
All modules are contained by one or more projects. Each project goes in its dedicated folder. Each project starts with StellaOps.<ModuleName>. In case it is common for for all StellaOps modules it is library or plugin and it is named StellaOps.<LibraryOrPlugin>.
## 3.2) Key technologies & integrations
- **Runtime**: .NET 10 (`net10.0`) preview SDK; C# latest preview features.
- **Data**: MongoDB (canonical store and job/export state).
- **Observability**: structured logs, counters, and (optional) OpenTelemetry traces.
- **Ops posture**: offlinefirst, allowlist for remote hosts, strict schema validation, gated LLM fallback (only where explicitly configured).
# 4) Modules
StellaOps is contained by different modules installable via docker containers
- Feedser. Responsible for aggregation and delivery of vulnerability database
- Cli. Command line tool to unlock full potential - request database operations, install scanner, request scan, configure backend
- Backend. Configures and Manages scans
- UI. UI to access the backend (and scanners)
- Agent. Installable daemon that does the scanning
- Zastava. Realtime monitor for allowed (verified) installations.
## 4.1) Feedser
It is webservice based module that is responsible for aggregating vulnerabilities information from various sources, parsing and normalizing them into a canonical shape, merging and deduplicating the results in one place, with export capabilities to Json and TrivyDb. It supports init and resume for all of the sources, parse/normalize and merge/deduplication operations, plus export. Export supports delta exports—similarly to full and incremential database backups.
### 4.1.1) Usage
It supports operations to be started by cmd line:
# stella db [fetch|merge|export] [init|resume <point>]
or
api available on https://db.stella-ops.org
### 4.1.2) Data flow (endtoend)
1. **Fetch**: connectors request source windows with retries/backoff, persist raw documents with SHA256/ETag metadata.
2. **Parse & Normalize**: validate to DTOs (schema-checked), quarantine failures, normalize to canonical advisories (aliases, affected ranges with NEVRA/EVR/SemVer, references, provenance).
3. **Merge & Deduplicate**: enforce precedence, build/maintain alias graphs, compute deterministic hashes, and eliminate duplicates before persisting to MongoDB.
4. **Export**: JSON tree and/or Trivy DB; package and (optionally) push; write export state.
### 4.1.3) Architecture
For more information of the architecture see `./docs/ARCHITECTURE_FEEDSER.md`.
---
### 4.1.4) Glossary (quick)
- **OVAL** — Vendor/distro security definition format; authoritative for OS packages.
- **NEVRA / EVR** — RPM and Debian version semantics for OS packages.
- **PURL / SemVer** — Coordinates and version semantics for OSS ecosystems.
- **KEV** — Known Exploited Vulnerabilities (flag only).
---
# 5) Your role as StellaOps contributor
You acting as information technology engineer that will take different type of roles in goal achieving StellaOps production implementation
In order you to work - you have to be supplied with directory that contains `AGENTS.md`,`TASKS.md` files. There will you have more information about the role you have, the scope of your work and the tasks you will have.
Boundaries:
- You operate only in the working directories I gave you, unless there is dependencies that makes you to work on dependency in shared directory. Then you ask for confirmation.
You main characteristics:
- Keep endpoints small, deterministic, and cancellation-aware.
- Improve logs/metrics as per tasks.
- Update `TASKS.md` when moving tasks forward.
- When you are done with all task you state explicitly you are done.
- Impersonate the role described on working directory `AGENTS.md` you will read, if role is not available - take role of the CTO of the StellaOps in early stages.
- You always strive for best practices
- You always strive for re-usability
- When in doubt of design decision - you ask then act
- You are autonomus - meaning that you will work for long time alone and achieve maximum without stopping for stupid questions
- You operate on the same directory where other agents will work. In case you need to work on directory that is dependency on provided `AGENTS.md`,`TASKS.md` files you have to ask for confirmation first.
## 5.1) Type of contributions
- **BEBase (Platform & Pipeline)**
Owns DI, plugin host, job scheduler/coordinator, configuration binding, minimal API endpoints, and Mongo bootstrapping.
- **BEConnX (Connectors)**
One agent per source family (NVD, Red Hat, Ubuntu, Debian, SUSE, GHSA, OSV, PSIRTs, CERTs, KEV, ICS). Implements fetch/parse/map with incremental watermarks.
- **BEMerge (Canonical Merge & Dedupe)**
Identity graph, precedence policies, canonical JSON serializer, and deterministic hashing (`merge_event`).
- **BEExport (JSON & Trivy DB)**
Deterministic export trees, Trivy DB packaging, optional ORAS push, and offline bundle.
- **QA (Validation & Observability)**
Schema tests, fixture goldens, determinism checks, metrics/logs/traces, e2e reproducibility runs.
- **DevEx/Docs**
Maintains this agent framework, templates, and perdirectory guides; assists parallelization and reviews.
## 5.2) Work-in-parallel rules (important)
- **Directory ownership**: Each agent works **only inside its module directory**. Crossmodule edits require a brief handshake in issues/PR description.
- **Scoping**: Use each modules `AGENTS.md` and `TASKS.md` to plan; autonomous agents must read `src/AGENTS.md` and the module docs before acting.
- **Determinism**: Sort keys, normalize timestamps to UTC ISO8601, avoid nondeterministic data in exports and tests.
- **Status tracking**: Update your modules `TASKS.md` as you progress (TODO → DOING → DONE/BLOCKED).
- **Tests**: Add/extend fixtures and unit tests per change; never regress determinism or precedence.
- **Test layout**: Use module-specific projects in `StellaOps.Feedser.<Component>.Tests`; shared fixtures/harnesses live in `StellaOps.Feedser.Testing`.
---

View File

@@ -1,2 +1,28 @@
# git.stella-ops.org
# StellaOps Feedser & CLI
This repository hosts the StellaOps Feedser service, its plug-in ecosystem, and the
first-party CLI (`stellaops-cli`). Feedser ingests vulnerability advisories from
authoritative sources, stores them in MongoDB, and exports deterministic JSON and
Trivy DB artefacts. The CLI drives scanner distribution, scan execution, and job
control against the Feedser API.
## Quickstart
1. Prepare a MongoDB instance and (optionally) install `trivy-db`/`oras`.
2. Copy `etc/feedser.yaml.sample` to `etc/feedser.yaml` and update the storage + telemetry
settings.
3. Copy `etc/authority.yaml.sample` to `etc/authority.yaml`, review the issuer, token
lifetimes, and plug-in descriptors, then edit the companion manifests under
`etc/authority.plugins/*.yaml` to match your deployment.
4. Start the web service with `dotnet run --project src/StellaOps.Feedser.WebService`.
5. Configure the CLI via environment variables (e.g. `STELLAOPS_BACKEND_URL`) and trigger
jobs with `dotnet run --project src/StellaOps.Cli -- db merge`.
Detailed operator guidance is available in `docs/10_FEEDSER_CLI_QUICKSTART.md`. API and
command reference material lives in `docs/09_API_CLI_REFERENCE.md`.
Pipeline note: deployment workflows should template `etc/feedser.yaml` during CI/CD,
injecting environment-specific Mongo credentials and telemetry endpoints. Upcoming
releases will add Microsoft OAuth (Entra ID) authentication support—track the quickstart
for integration steps once available.

View File

@@ -0,0 +1,46 @@
StellaOps Authority Project Phased Execution Prompts
Teams:
- Team 1: DevEx / Platform (see AGENTS.md, StellaOps.Authority.TODOS.md, StellaOps.Authority.TODOS.DevEx.md)
- Team 2: Authority Core Service (see AGENTS.md, StellaOps.Authority.TODOS.md, StellaOps.Authority.TODOS.AuthorityCore.md)
- Team 3: Plugin Workstream (see AGENTS.md, StellaOps.Authority.TODOS.md, StellaOps.Authority.TODOS.Plugin.md)
- Team 4: Auth Libraries (see AGENTS.md, StellaOps.Authority.TODOS.md, StellaOps.Authority.TODOS.AuthLibraries.md)
- Team 5: Feedser Integration (see AGENTS.md, StellaOps.Authority.TODOS.md, StellaOps.Authority.TODOS.Feedser.md)
- Team 6: CLI (see AGENTS.md, StellaOps.Authority.TODOS.md, StellaOps.Authority.TODOS.CLI.md)
- Team 7: DevOps / Observability (see AGENTS.md, StellaOps.Authority.TODOS.md, StellaOps.Authority.TODOS.DevOps.md)
- Team 8: Security Guild (see AGENTS.md, StellaOps.Authority.TODOS.md, StellaOps.Authority.TODOS.Security.md)
- Team 9: Docs & Enablement (see AGENTS.md, StellaOps.Authority.TODOS.md, StellaOps.Authority.TODOS.Docs.md)
Phase 0 Bootstrapping
- Prompt Team 1: “Read AGENTS.md, StellaOps.Authority.TODOS.md, and StellaOps.Authority.TODOS.DevEx.md. Complete FND1 → FND3 (solution scaffold, build props, AuthorityOptions binding). Report when the Authority solution builds clean.”
- Wait until FND1FND3 are DONE before continuing.
Phase 1 Core Foundations
- Prompt Team 1: “Continue with StellaOps.Authority.TODOS.DevEx.md. Deliver FND4, FND5, and PLG5 (config samples, telemetry constants, plugin config loader).”
- Prompt Team 2: “Read AGENTS.md, StellaOps.Authority.TODOS.md, and StellaOps.Authority.TODOS.AuthorityCore.md. Implement CORE1 + CORE2 (minimal API host, OpenIddict endpoints). Verify /health and /ready before proceeding.”
- Prompt Team 3: “Read AGENTS.md, StellaOps.Authority.TODOS.md, and StellaOps.Authority.TODOS.Plugin.md. Execute PLG1PLG3 (abstractions, plugin loader integration, Mongo-based Standard plugin stub). Coordinate schema details with Team 1.”
- Do not start Phase 2 until Team 2 finishes CORE1CORE2 and Team 3 finishes PLG1PLG3.
Phase 2 Core Expansion & Libraries
- Prompt Team 2: “Continue with StellaOps.Authority.TODOS.AuthorityCore.md tasks CORE3CORE6 (Mongo stores, plugin capability wiring, bootstrap admin APIs).”
- Prompt Team 3: “Advance PLG4PLG6 (capability metadata, config validation, plugin developer guide draft).”
- Prompt Team 4: “Read AGENTS.md, StellaOps.Authority.TODOS.md, and StellaOps.Authority.TODOS.AuthLibraries.md. Deliver LIB1LIB4 (abstractions, NetworkMaskMatcher, ServerIntegration DI, Auth.Client).”
- Move to Phase 3 only after CORE3CORE6, PLG4PLG6, and LIB1LIB4 are DONE.
Phase 3 Integration & Ops
- Prompt Team 2: “Finish CORE7CORE10 (telemetry, rate limiting, revocation list, key rotation/JWKS).”
- Prompt Team 3: “Complete PLG6 handoff and draft PLG7 RFC if bandwidth allows.”
- Prompt Team 4: “Implement LIB5LIB6 (Polly integration, packaging metadata).”
- Prompt Team 5: “Read AGENTS.md, StellaOps.Authority.TODOS.md, and StellaOps.Authority.TODOS.Feedser.md. Execute FSR1FSR3 (config, auth wiring, bypass masks) then FSR4 docs updates.”
- Prompt Team 6: “Read AGENTS.md, StellaOps.Authority.TODOS.md, and StellaOps.Authority.TODOS.CLI.md. Deliver CLI1CLI4 (config, auth commands, bearer injection, docs).”
- Prompt Team 7: “Read AGENTS.md, StellaOps.Authority.TODOS.md, and StellaOps.Authority.TODOS.DevOps.md. Execute OPS1OPS5 (Dockerfile/compose, CI pipeline, key rotation tooling, backup docs, monitoring).”
- Prompt Team 9: “Read AGENTS.md, StellaOps.Authority.TODOS.md, and StellaOps.Authority.TODOS.Docs.md. Draft DOC1DOC4 in parallel; update DOC5 once Feedser/CLI changes land.”
- Proceed to Phase 4 only when CORE10, PLG6, LIB6, FSR4, CLI4, OPS5, and DOC4 are complete.
Phase 4 Security & Final Integration
- Prompt Team 8: “Read AGENTS.md, StellaOps.Authority.TODOS.md, and StellaOps.Authority.TODOS.Security.md. Execute SEC1SEC5 (password hashing, audit log review, lockout/rate-limit validation, revocation signing, threat model). Review Feedser/CLI for security compliance.”
- Prompt Team 5: “Run FSR5 (Authority ↔ Feedser integration tests) using the DevOps compose stack.”
- Prompt Team 6: “Finalize CLI auth enhancements and ensure tests reflect Security feedback.”
- Prompt Team 7: “Support integration testing, finalize runbooks, confirm monitoring dashboards.”
- Prompt Team 9: “Incorporate Security findings, finalize DOC3 migration guide, DOC5 README/quickstart updates, release notes.”
- Wrap up after SEC5 sign-off and successful FSR5 execution.

View File

@@ -0,0 +1,42 @@
# StellaOps Authority — Authentication Libraries Team
> **Read first:** `AGENTS.md`, `StellaOps.Authority.TODOS.md`, and this plan. Keep status synchronized across trackers.
## Mission
Deliver shared authentication components consumed by resource servers, clients, and tooling: abstractions, DI helpers, token clients, and supporting utilities.
## Task Breakdown
| Order | Task IDs | Description | Dependencies | Acceptance |
|-------|----------|-------------|--------------|------------|
| 1 | LIB1 | Stand up `StellaOps.Auth.Abstractions` (claims, scopes, principal builder, ProblemResultFactory). | DevEx FND1 | Unit tests covering claim normalization + problem responses. |
| 2 | LIB3 | Implement `NetworkMaskMatcher` with IPv4/IPv6 CIDR support; port tests from Serdica inspiration. | LIB1 | 100% branch coverage on mask utilities. |
| 3 | LIB2 | Build `StellaOps.Auth.ServerIntegration` (DI extension wiring JwtBearer, bypass masks, policy helpers). | LIB1, LIB3 | Add integration test with stub Authority JWKS. |
| 4 | LIB4 | Build `StellaOps.Auth.Client` (discovery, JWKS caching, password/client credential flows, token cache abstraction). | LIB1 | Provide `IStellaOpsTokenClient` interfaces. |
| 5 | LIB5 | Integrate Polly + HttpClientFactory patterns (configurable retries/backoff) in Auth.Client. | LIB4 | Config tested via options binding. |
| 6 | LIB6 | Prepare NuGet packaging metadata (license, tags) and update build pipeline to push once stabilized. | LIB1LIB5 | Validate `dotnet pack` outputs signed packages. |
## Implementation Notes
- All option classes should bind via `StellaOps.Configuration` naming conventions.
- Token client must support file-based cache (for CLI) and in-memory cache (for services).
- Provide sample usage snippets for Feedser integration (to hand off).
- Consider adding `IClaimsTransformation` helper for ASP.NET resource servers.
- Ensure authentication failures map to standard problem responses (missing/expired token, insufficient scope).
## Deliverables
- Three new projects: `StellaOps.Auth.Abstractions`, `.ServerIntegration`, `.Client`.
- Unit + integration tests, coverage reports.
- Example integration docs/snippets for Feedser and CLI teams.
- Packaging metadata ready for CI once green-lit.
## Coordination
- Weekly sync with Authority Core + Feedser Integration to align on scopes/policies.
- Share NuGet package versions with DevEx once published.
- Notify CLI team when client API stabilizes (unlock CLI1CLI3).
- Coordinate with Security Guild on bypass mask semantics and default policies.
## Status (2025-10-10)
- LIB1 DONE Principal builder/problem factory complete with unit coverage.
- LIB3 DONE `NetworkMaskMatcher` replaces Serdica helpers with IPv4/6 tests.
- LIB2 DONE `AddStellaOpsResourceServerAuthentication` with scope/bypass policies implemented.
- LIB4 DONE Auth client, discovery/JWKS caches, in-memory/file token caches with happy-path tests delivered.

View File

@@ -0,0 +1,56 @@
# StellaOps Authority — Core Service Team
> **Read first:** `AGENTS.md`, `StellaOps.Authority.TODOS.md`, and this plan. Update status in both TODO trackers.
## Mission
Design and implement the Authority host (OpenIddict server, token lifecycles, administrative endpoints) on top of the DevEx scaffold, coordinating with Plugin, Library, and Security teams.
## Work Breakdown
| Order | Task IDs | Description | Dependencies | Acceptance |
|-------|----------|-------------|--------------|------------|
| 1 | CORE1 | Wire minimal API host with configuration, logging, plugin discovery, `/health` + `/ready`. | DevEx FND1FND5 | Manual smoke: `dotnet run` returns healthy responses. |
| 2 | CORE2 | Configure OpenIddict server endpoints & flows (password, client credentials, refresh, jwks). | CORE1 | Supports HTTPS enforcement toggle via options. |
| 3 | CORE3 | Implement Mongo repositories for users/clients/scopes/tokens/login attempts. | CORE1 | Collections + indices documented; unit tests for CRUD. |
| 4 | CORE4 | Integrate plugin contracts (`IIdentityProviderPlugin`, etc.) into DI; load capabilities. | PLG1 | Plugins registered through host on startup. |
| 5 | CORE5 | Port/customize OpenIddict handlers (password/client creds validation) to use plugin contracts. | CORE4 | Unit tests for success/failure scenarios. |
| 5a | CORE5A | Add integration tests covering token persistence & revocation via `IAuthorityTokenStore`. | CORE5 | Ensure revoked tokens denied + fixtures for access/reference tokens. |
| 5b | CORE5B | Document token persistence & enrichment flows for resource servers/plugins. | CORE5 | Docs updated with claim expectations + revocation sync guidance. |
| 6 | CORE6 | Implement bootstrap admin endpoints (`/internal/users`, `/internal/clients`) secured via bootstrap API key. | CORE5 | Add rate limiting + audit logs. |
| 7 | CORE7 & CORE8 | Add structured logging, OTEL spans, and ASP.NET rate limiting for `/token`, `/authorize`. | CORE5 | Verify via integration tests, metrics exported. |
| 8 | CORE9 | Implement token revocation + signed offline revocation manifest generation hooks. | CORE5 | CLI call returns signed JSON; tests confirm revoked tokens denied. |
| 9 | CORE10 | Configure signing/encryption key rotation, JWKS publishing, certificate loader. | CORE5 | Document rotation steps; integration test covers key rollover. |
## Implementation Notes
- All Mongo repositories must align with offline-first design (no TTL for critical data unless configurable).
- Expose metrics counters (issued tokens, failed attempts) for DevOps consumption.
- Coordinate with Security Guild for password hashing options (Argon2 vs PBKDF2), lockout thresholds.
- Ensure plugin capability metadata is honored (e.g., if plugin lacks password support, reject password grants gracefully).
- Provide integration hooks for future LDAP plugin (capability flag + TODO comment).
## Status
- [x] CORE1 Completed 2025-10-09. Minimal API host loads validated configuration, configures Serilog, registers plugins, and exposes `/health` + `/ready`.
- [x] CORE2 Completed 2025-10-09. OpenIddict server configured with required endpoints, token lifetimes, sliding refresh tokens, and Development-only HTTPS relaxation.
- [x] CORE3 Completed 2025-10-09. Mongo storage project created with indexed Authority collections, repositories, and bootstrap migration runner.
- [ ] CORE4 Not started.
- [x] CORE5 Completed 2025-10-10 with client-credentials validation, token validation handlers, and token persistence wired through plugin contracts.
- [ ] CORE5A Pending integration tests for token persistence/revocation behaviour (QA + BE-Auth pairing).
- [ ] CORE5B Pending documentation refresh covering claims enrichment + token store expectations.
- [x] CORE6 Completed 2025-10-10. Bootstrap admin APIs behind API key provison users and clients through plugin stores.
- [ ] CORE7 Not started.
- [ ] CORE8 Not started.
- [ ] CORE9 Not started.
- [ ] CORE10 Not started.
## Deliverables
- `StellaOps.Authority` project with tested endpoints and handlers.
- Repository docs summarizing API responses (shared with Docs team).
- Integration tests (Authority-only) verifying token issuance + revocation.
- Audit logging implemented (structured with trace IDs).
## Coordination
- Daily stand-up with Plugin + Libraries teams until CORE5 complete (met objective 2025-10-10).
- Notify DevOps when `/token` contract stabilizes (OPS pipeline).
- Work with Docs to capture endpoint behavior for `docs/11_AUTHORITY.md`.
- Review PRs from Plugin & Libraries teams affecting Authority host.

View File

@@ -0,0 +1,35 @@
# StellaOps Authority — CLI Team
> **Read first:** `AGENTS.md`, `StellaOps.Authority.TODOS.md`, and this plan. Keep status aligned in all trackers.
## Mission
Enable `stellaops-cli` to authenticate against StellaOps Authority, manage tokens, and surface auth-related UX for operators.
## Task Queue
| Order | Task IDs | Description | Dependencies | Acceptance |
|-------|----------|-------------|--------------|------------|
| 1 | CLI1 | Extend `StellaOpsCliOptions` and configuration bootstrap to include Authority settings (AuthorityUrl, ClientId/Secret, Username/Password). | LIB4 | **DONE (2025-10-10)** Options bind authority fields, env fallbacks documented, and cache directory defaults to `~/.stellaops/tokens`. |
| 2 | CLI2 | Implement `auth` command group (`login`, `logout`, `status`) using `StellaOps.Auth.Client`. | CLI1, LIB4 | **DONE (2025-10-10)** Commands support client-credentials/password flows, force re-auth, and surface status output. |
| 3 | CLI3 | Ensure all backend calls attach bearer tokens; handle 401/403 with clear messaging and retry guidance. | CLI2, LIB2 | **DONE (2025-10-10)** Backend client now resolves cached tokens via shared helper and attaches Authorization headers on every call. |
| 4 | CLI4 | Update help text and docs (quickstart + API reference) to describe new auth workflow. | CLI1CLI3 | Coordinate with Docs team for final copy. |
| 5 | OPTIONAL | Add `auth whoami` to display token scopes/expiry (post-MVP if time allows). | CLI2 | Non-blocking enhancement. |
## Implementation Notes
- Token cache path defaults to `~/.stellaops/tokens`; allow override via config.
- Handle offline mode gracefully (cached token reuse, helpful errors).
- Provide verbose logging around token acquisition (without dumping secrets).
- Support non-interactive mode (env vars) for CI pipelines.
- Align CLI exit codes with backend problem types (401 -> exit 10, etc.).
## Deliverables
- Updated CLI project + tests.
- Docs/help updates referencing Authority integration.
- Sample command snippets for operators (login, job trigger with scope).
- Changelog entry describing auth changes.
## Coordination
- Collaborate with Auth Libraries team to stabilize client API.
- Sync with Feedser integration to ensure required scopes align.
- Provide feedback to Authority Core on error payloads for better CLI UX.
- Work with Docs team for documentation rollout.

View File

@@ -0,0 +1,35 @@
# StellaOps Authority — DevEx / Platform Workstream
> **Read first:** `AGENTS.md`, `StellaOps.Authority.TODOS.md`, and this file.
> Keep task status synced in both TODO trackers whenever items move (TODO → DOING → DONE/BLOCKED).
## Scope
- Repository scaffolding, shared configuration plumbing, sample configs, telemetry constants.
- Provide the baseline everyone else builds on; unblock quickly, announce breaking changes on the shared channel.
## Deliverables & Checklist
| Order | Task ID | Description | Dependencies | Notes |
|-------|---------|-------------|--------------|-------|
| 1 | FND1 | Create `src/StellaOps.Authority` solution layout (Authority host, Plugins.Abstractions, Plugin.Standard stub, Auth libraries). | none | **DONE** Solution scaffolding live with net10.0 preview defaults + project references. |
| 2 | FND2 | Update repository build props/targets for new projects; ensure analyzers + nullable + treat warnings as errors. | FND1 | **DONE** Directory.Build props/targets extended; root `StellaOps.sln` added (root build still surfaced existing Feedser compile failures). |
| 3 | FND3 | Extend `StellaOps.Configuration` with `StellaOpsAuthorityOptions`, binder, validation stubs. | FND1 | **DONE** Options schema + bootstrap helper + unit tests validating binding/normalisation. |
| 4 | FND4 | Publish `etc/authority.yaml.sample` (with plugin toggles) + README mention. | FND3 | **DONE** Sample config added with env var guidance; README + quickstart updated. |
| 5 | FND5 | Register OTEL resource constants (service.name = `stellaops-authority`, etc.). | FND3 | **DONE** Authority telemetry constants helper published for shared use. |
| 6 | PLG5 | Define plugin config directory structure (`etc/authority.plugins/*.yaml`), loader helpers, sample files. | FND3 | **DONE** Schema + loader shipped, standard/ldap samples published. |
| 7 | OPS1 (support) | Pair with DevOps on Dockerfile/compose scaffolding to ensure directories, config names match. | FND4 | **DONE** Provided distroless Dockerfile/compose guidance in `ops/authority/` for DevOps handoff. |
### Exit Criteria
- `dotnet build` succeeds from repo root with new projects.
- Configuration sample + docs referenced in README/Authority TODO file.
- Telemetry/resource constants ready for Authority Core team.
- Plugin config loader available before Plugin Team begins feature work.
### Risks / Mitigations
- **Risk:** Build props drift. → Run `dotnet format --verify-no-changes` before handoff.
- **Risk:** Config breaking changes mid-implementation. → Version `StellaOpsAuthorityOptions` and communicate via Slack + TODO updates.
### Coordination
- Daily async update until FND3 complete.
- Hand off AuthorityOptions schema to all other teams once finalized (tag repository issue).
- Keep an eye on PR queue—DevEx reviews required for structure/config changes.

View File

@@ -0,0 +1,36 @@
# StellaOps Authority — DevOps & Observability Team
> **Read first:** `AGENTS.md`, `StellaOps.Authority.TODOS.md`, and this plan. Reflect status changes in both TODO trackers.
## Mission
Deliver deployable artefacts, CI/CD automation, runtime observability, and operational runbooks for StellaOps Authority.
## Task Matrix
| Order | Task IDs | Description | Dependencies | Acceptance |
|-------|----------|-------------|--------------|------------|
| 1 | OPS1 | Author distroless Dockerfile + docker-compose sample (Authority + Mongo + optional Redis). | FND4, CORE1 | **DONE (DevEx scaffold)** see `ops/authority/` Dockerfile + compose; verify with production secrets before release. |
| 2 | OPS2 | Extend CI workflows (build/test/publish) for Authority + auth libraries (dotnet build/test, docker build, artefact publish). | OPS1 | **DONE** Authority build/test/publish integrated into `.gitea/workflows/build-test-deploy.yml`. |
| 3 | OPS3 | Implement key rotation script/CLI and wire pipeline job (manual trigger) to rotate signing keys + update JWKS. | CORE10 | Document rotation process + store secrets securely. |
| 4 | OPS4 | Document backup/restore for Authority Mongo collections, plugin configs, key material. | CORE3 | Produce runbook in `/docs/ops`. |
| 5 | OPS5 | Define monitoring metrics/alerts (token issuance failure rate, lockout spikes, bypass usage). Provide dashboards (Prometheus/Otel). | CORE7 | Share Grafana JSON or equivalent. |
| 6 | SUPPORT | Assist other teams with docker-compose variations for integration tests (Feedser, CLI). | OPS1, FSR5 | Provide templates + guidance. |
## Implementation Notes
- Container image must remain offline-friendly (no package installs at runtime).
- Compose sample should include environment variable settings referencing `etc/authority.yaml`.
- Store key rotation artefacts in secure storage (vault/secrets).
- Align metrics naming with existing StellaOps conventions.
- Provide fallback instructions for air-gapped deployments (manual image load, offline key rotation).
## Deliverables
- Dockerfile(s), compose stack, and documentation.
- Updated CI pipeline definitions.
- Runbooks for rotation, backup, restore.
- Monitoring/alerting templates.
## Coordination
- Sync with DevEx on configuration paths + plugin directories.
- Coordinate with Authority Core regarding key management endpoints.
- Work with Feedser Integration + CLI teams on integration test environments.
- Engage Security Guild to review key rotation + secret storage approach.

View File

@@ -0,0 +1,36 @@
# StellaOps Authority — Docs & Enablement Plan
> **Read first:** `AGENTS.md`, `StellaOps.Authority.TODOS.md`, and this plan. Keep progress synchronized across trackers.
## Mission
Produce operator and developer documentation for the new Authority stack, including configuration guides, API references, plugin tutorials, migration playbooks, and release notes.
## Task Pipeline
| Order | Task IDs | Description | Dependencies | Acceptance |
|-------|----------|-------------|--------------|------------|
| 1 | DOC1 | Draft `docs/11_AUTHORITY.md` (architecture overview, configuration, plugin model, deployment scenarios). | FND4, CORE1 | Reviewed by DevEx + Authority Core. |
| 2 | DOC2 | Generate API reference snippets for `/token`, `/jwks`, `/introspect`, `/revoke` (OpenAPI fragment + human-readable table). | CORE2, LIB4 | Linked from docs + README. |
| 3 | DOC3 | Write migration guide for Feedser moving from anonymous to secured mode (staged rollout, config updates). | FSR1FSR3 | Includes rollback plan + FAQ. |
| 4 | DOC4 | Create plugin developer how-to (leveraging Plugin Team notes) covering packaging, capability flags, logging. | PLG1PLG6 | **READY FOR DOCS REVIEW (2025-10-10)** `docs/dev/31_AUTHORITY_PLUGIN_DEVELOPER_GUIDE.md` aligned with PLG6 scope; pending Docs copy-edit, diagram export, and LDAP RFC cross-linking. |
| 5 | DOC5 | Update root README, quickstarts (`docs/10_FEEDSER_CLI_QUICKSTART.md`), CLI help text references. | CLI4, FSR4 | Make sure new links validated. |
| 6 | Cross | Collaborate on inline XML docs for public APIs across libraries. | LIB1LIB5 | Ensure DocFX/IntelliSense friendly summaries. |
## Implementation Notes
- Maintain offline-friendly instructions (no implicit internet requirements).
- Highlight security-critical steps (bootstrap credentials, key rotation) in callouts.
- Include environment-variable tables for configuration.
- Provide diagrams where useful (architecture, plugin flow).
- Prepare release note entry summarizing Authority MVP deliverables and upgrade steps.
## Deliverables
- New documentation pages + updated existing guides.
- OpenAPI snippet (JSON/YAML) committed to repo.
- Migration checklist for operators.
- Plugin developer tutorial ready for community/internal teams.
## Coordination
- Attend cross-team syncs to capture latest API contracts.
- Request reviews from respective teams (Authority Core, Plugin, CLI, Security).
- Work with DevEx to ensure docs packaged in Offline Kit if applicable.
- Update docs as soon as breaking changes occur—subscribe to relevant PRs.

View File

@@ -0,0 +1,35 @@
# StellaOps Authority — Feedser Integration Team
> **Read first:** `AGENTS.md`, `StellaOps.Authority.TODOS.md`, and this plan. Update both TODO trackers as tasks progress.
## Mission
Adopt the new authority stack inside Feedser: configure authentication, enforce scopes, update configuration, and validate end-to-end flows.
## Task Timeline
| Order | Task IDs | Description | Dependencies | Acceptance |
|-------|----------|-------------|--------------|------------|
| 1 | FSR1 | Extend `etc/feedser.yaml` with Authority configuration block (issuer, client credentials, bypass masks, scopes). | DevEx FND4, LIB2 | Sample config + docs updated. |
| 2 | FSR2 | Update Feedser WebService startup to use `AddStellaOpsResourceServerAuthentication`; annotate endpoints with `[Authorize]` and scope policies. | LIB2 | **DONE (2025-10-10)** Auth wiring is optional but enabled via config; `/jobs*` endpoints demand `feedser.jobs.trigger` and tests cover bypass mode. |
| 3 | FSR3 | Implement bypass mask handling for on-host cron jobs; log when mask used. | FSR2, LIB3 | Configurable via YAML; integration test ensures mask respected. |
| 4 | FSR4 | Refresh Feedser docs (quickstart, operator guide) to explain auth requirements + config knobs. | FSR1FSR3 | Coordinate with Docs team for final wording. |
| 5 | FSR5 | Build integration test harness (Authority + Feedser docker-compose) verifying token issuance and job triggering. | CORE1CORE5, LIB4 | CI job produces pass/fail artefact. |
## Implementation Notes
- Add feature flag to allow temporary anonymous mode for staged rollout (document sunset date).
- Ensure CLI + API docs reference required scopes and sample client creation.
- Logs should capture client ID, user ID, and scopes when jobs triggered for audit (without leaking secrets).
- Avoid coupling tests to specific plugin implementations—use Standard plugin via configuration.
- Share any new scopes/policies with Auth Libraries and Docs teams.
## Deliverables
- Updated Feedser configuration + startup code.
- Documentation updates in `docs/10_FEEDSER_CLI_QUICKSTART.md` and `docs/11_AUTHORITY.md` (in partnership with Docs team).
- Integration tests executed in CI (Authority + Feedser).
- Rollout checklist for existing deployments (feature flag, config changes).
## Coordination
- Sync with Authority Core on policy naming (`feedser.jobs.trigger`, `feedser.merge`).
- Coordinate with CLI team for shared sample configs.
- Work closely with DevOps to integrate integration tests into pipeline.
- Notify Security Guild once bypass masks implemented for review.

View File

@@ -0,0 +1,38 @@
# StellaOps Authority — Plugin Workstream
> **Read first:** `AGENTS.md`, `StellaOps.Authority.TODOS.md`, and this document. Sync status across all trackers.
## Scope
Deliver the plugin abstraction layer and the default Mongo-backed identity plugin (`StellaOps.Authority.Plugin.Standard`), plus lay groundwork for future LDAP integration.
## Task Plan
| Order | Task IDs | Description | Dependencies | Acceptance |
|-------|----------|-------------|--------------|------------|
| 1 | PLG1 | Implement plugin abstractions: `IIdentityProviderPlugin`, `IUserCredentialStore`, `IClaimsEnricher`, `IClientProvisioningStore`, result models, constants. | DevEx FND1 | **DONE** Abstractions published with XML docs and unit tests covering success/failure factories. |
| 2 | PLG2 | Integrate abstractions with plugin host (DI registration via `IAuthorityPluginRegistrar`). Emit diagnostics for load failures. | PLG1 | **DONE** Authority host loads registrars, logs registration summary, and unit tests cover success/missing cases. |
| 3 | PLG3 | Build Mongo-backed `Plugin.Standard` implementing password auth, lockout, claim enrichment, admin seeding. | CORE3 | **DONE** Standard plugin binds options, enforces password policy/lockout, seeds bootstrap user, and ships integration/unit tests. |
| 4 | PLG4 | Define capability metadata (supportsPassword, supportsMfa, supportsClientProvisioning). Update plugin registration to publish metadata. | PLG3 | **DONE (2025-10-10)** Capability descriptors validated; Standard plugin enforces password flag and registry exposes aggregated metadata to the host. |
| 5 | PLG5 (support) | Collaborate with DevEx on plugin config schema (`etc/authority.plugins/*.yaml`). Implement config parser + validation. | DevEx PLG5 | Provide typed options class + tests. |
| 6 | PLG6 | Author plugin developer guide (structure, packaging, capability flags, logging expectations). | PLG1PLG5 | **READY FOR DOCS REVIEW (2025-10-10)** Guide finalised, includes capability metadata usage, ops alignment, and packaging checklist; handoff blocked only on Docs copy-edit + diagram export. |
| 7 | PLG7 (backlog design) | Produce technical RFC for future `Plugin.Ldap` (data flows, dependencies, TODO list). | PLG1PLG4 | **RFC DRAFTED (2025-10-10)** `docs/rfcs/authority-plugin-ldap.md` outlines architecture, configuration schema, implementation plan; awaiting guild review & sign-off. |
## Implementation Notes
- Mongo plugin must support offline bootstrap: optional JSON file with initial users/clients hashed offline.
- Provide extensibility points for password hashing algorithm (allow Security team to swap Argon2).
- Ensure plugin logging leverages Authority logger, no console writes.
- Document expected configuration keys for plugin settings (`passwordPolicy`, `seedUsers`, etc.).
- Validate plugin configuration early at startup; fail fast with actionable errors.
## Deliverables
- `StellaOps.Authority.Plugins.Abstractions` project.
- `StellaOps.Authority.Plugin.Standard` project with tests + seed data sample.
- Plugin dev documentation + sample configuration files.
- Diagnostic logging verifying plugin load, capabilities, configuration.
- Future plugin RFC for LDAP integration.
## Coordination
- Coordinate with Authority Core for capability usage in handlers.
- Work with Security Guild on password hash settings/lockout thresholds.
- Notify DevEx when configuration schema changes.
- Review Docs PR for plugin developer guide.

View File

@@ -0,0 +1,36 @@
# StellaOps Authority — Security Guild Plan
> **Read first:** `AGENTS.md`, `StellaOps.Authority.TODOS.md`, and this plan. Track progress in both TODO files.
## Mission
Define and verify the security posture of StellaOps Authority: password/secret policies, audit logging, throttling, threat modelling, and offline revocation guarantees.
## Task Breakdown
| Order | Task IDs | Description | Dependencies | Acceptance |
|-------|----------|-------------|--------------|------------|
| 1 | SEC1 | Select and configure password hashing (Argon2 preferred) + identity lockout parameters; contribute config defaults. | PLG3, CORE3 | Hash verified via unit test + red team review. |
| 2 | SEC2 | Specify audit log schema/content (principal, client, scopes, IP) and ensure Authority Core implementation meets requirements. | CORE5CORE7 | Review sample logs; ensure PII handled safely. |
| 3 | SEC3 | Define lockout & rate limit policies (per user/IP) and validate implementation in Authority Core. | CORE8 | Test harness proves lockouts triggered appropriately. |
| 4 | SEC4 | Design offline revocation list format + signing procedure; review implementation with Core/DevOps. | CORE9, OPS3 | Provide verification script for downstream systems. |
| 5 | SEC5 | Conduct threat model / security review (STRIDE) covering plugins, token flows, admin endpoints; produce mitigation backlog if needed. | CORE1CORE10 | Document stored in `/docs/security`. |
| 6 | Oversight | Perform security review of CLI/Feedser integration changes (token handling, bypass masks). | FSR2, CLI2 | Approve PRs or request hardening changes. |
## Implementation Notes
- Require secrets (client, bootstrap API keys) to meet minimum entropy; document rotation expectations.
- Ensure bypass mask usage is fully logged + alertable.
- Recommend default TLS cipher suites for Authority deployments.
- Validate plugin capability metadata doesnt expose insecure combinations (e.g., plugin without password support cannot be selected for password grant).
- Develop checklist for production readiness (penetration test, log review, key rotation rehearsal).
## Deliverables
- Security configuration recommendations (encoded in options + documentation).
- Approved audit log schema & sample records.
- Threat model document + mitigation backlog (if gaps discovered).
- Sign-off memo to enable production rollout.
## Coordination
- Work closely with Authority Core and Plugin teams during implementation; request changes early.
- Pair with DevOps on key rotation / secret storage solutions.
- Review Docs to ensure operator guidance includes security-critical steps.
- Attend weekly Auth Guild sync to surface risks/blockers.

View File

@@ -0,0 +1,120 @@
# StellaOps.Authority — Implementation Backlog
> Status owner: Platform Authentication Guild
> Source inspiration: `inspiration/Ablera.Serdica.*` (do **not** copy-paste; align with StellaOps coding standards)
## 0. Foundations
| ID | Task | Owner | Notes / Acceptance |
|----|------|-------|---------------------|
| FND1 | Create solution scaffold under `src/StellaOps.Authority` (`StellaOps.Authority.sln` mirroring existing structure). | DevEx | **DONE** Authority host + auth libraries + plugin stub scaffolded with net10.0 preview defaults. |
| FND2 | Extend `global.json`/Directory props to include new projects (net10.0). | DevEx | **DONE** Directory props/targets cover Authority plugins; root `StellaOps.sln` enables repo-wide `dotnet build` (Feedser compile issues remain pre-existing). |
| FND3 | Define `StellaOpsAuthorityOptions` in `StellaOps.Configuration` (issuer, lifetimes, plugin directories, bypass masks). | BE-Base | **DONE** Options class + bootstrapper with validation and tests; binds from YAML/JSON/env. |
| FND4 | Provide sample config `etc/authority.yaml.sample` with sensible defaults for offline-first deployments. | DevEx/Docs | **DONE** Authority template published with token defaults + plug-in toggles and referenced in README/Quickstart. |
| FND5 | Add OpenTelemetry resource/version constants for Authority (service.name, namespace). | DevEx/Observability | **DONE** Authority telemetry constants & helpers published for reuse by host/plugins. |
## 1. Core Authority Service
| ID | Task | Owner | Notes / Acceptance |
|----|------|-------|---------------------|
| CORE1 | Bootstrap ASP.NET minimal API host with `StellaOps.Configuration` and plugin loading (reuse Feedser plugin host). | BE-Base | **DONE (2025-10-09)** Host loads Authority options, Serilog, plugin registry; `/health` and `/ready` return 200. |
| CORE2 | Integrate OpenIddict server: configure issuer, endpoints (`/authorize`, `/token`, `/jwks`, `/introspect`, `/revoke`), token lifetimes. | BE-Auth | **DONE (2025-10-09)** OpenIddict server wired with required endpoints, lifetimes, sliding refresh tokens, dev-only HTTPS relaxation. |
| CORE3 | Implement Mongo-backed stores (`AuthorityUser`, `AuthorityClient`, `AuthorityScope`, `AuthorityToken`, `AuthorityLoginAttempt`). | BE-Auth Storage | **DONE (2025-10-09)** Mongo storage project with indexed collections, repository layer, and bootstrap migration runner wired to host. |
| CORE4 | Add `IUserCredentialStore`, `IClaimsEnricher`, `IClientCredentialStore`, `IIdentityProviderPlugin` abstractions (plugin contracts). | BE-Auth | Live under `StellaOps.Authority.Plugins.Abstractions`. |
| CORE5 | Port/customize OpenIddict event handlers (password grant, client credentials, token validation) using plugin contracts. | BE-Auth | **DONE (2025-10-10)** Password, client-credentials, and token-validation handlers now enforce plugin capabilities, persist issued tokens, and run revocation checks. |
| CORE5A | Author integration tests verifying token persistence + revocation (client creds & refresh) through `IAuthorityTokenStore`. | QA, BE-Auth | Ensure revoked tokens are denied via handler + store wiring; cover reference token retrieval when implemented. |
| CORE5B | Document token persistence behaviour (revocation, enrichment) for resource servers + bootstrap guide. | Docs, BE-Auth | Update `docs/11_AUTHORITY.md` and plugin dev guide with new claims + store expectations before GA. |
| CORE6 | Implement API key protected bootstrap endpoints (`POST /internal/clients`, `POST /internal/users`) for initial provisioning. | BE-Auth | **DONE (2025-10-10)** `/internal` APIs gated by bootstrap API key create users/clients through plugin stores. |
| CORE7 | Wire structured logging + OTEL spans for `/token`, `/authorize`, plugin actions. | BE-Auth Observability | Follows StellaOps logging conventions. |
| CORE8 | Add rate limiting middleware on `/token` and `/authorize`. | BE-Auth | Configurable via options; tests ensure throttle triggered. |
| CORE9 | Implement revocation (refresh + access) and publish signed offline revocation list. | BE-Auth | CLI hook to export list for air-gapped sync. |
| CORE10 | Provide JWKS endpoint backed by rotating signing/encryption keys (pluggable certificate loader). | BE-Auth | Document rotation workflow. |
## 2. Plugin System
| ID | Task | Owner | Notes / Acceptance |
|----|------|-------|---------------------|
| PLG1 | Build `StellaOps.Authority.Plugins.Abstractions` (contracts, result models, constants). | BE-Auth | Align naming with StellaOps; add XML docs. |
| PLG2 | Implement plugin discovery via existing plugin host (search `PluginBinaries` for `StellaOps.Authority.Plugin.*`). | BE-Base | Provide diagnostics when plugin load fails. |
| PLG3 | Develop `StellaOps.Authority.Plugin.Standard` (Mongo-based user store, password hashing, lockout policy). | BE-Auth Storage | Includes configurable password policy + seed admin user. |
| PLG4 | Add plugin capability metadata (supportsPassword, supportsMfa, supportsClientProvisioning). | BE-Auth | **DONE (2025-10-10)** Descriptor validation + registry logging wired; Standard plugin forces password capability and warns on misconfiguration. |
| PLG5 | Define plugin configuration schema under `etc/authority.plugins/*.yaml`; load via `StellaOps.Configuration`. | DevEx/Docs | **DONE** Loader helpers + sample manifests committed; schema validated during bootstrap. |
| PLG6 | Publish developer guide for writing Authority plugins mirroring Feedser docs. | DevEx/Docs | **READY FOR DOCS REVIEW (2025-10-10)** `docs/dev/31_AUTHORITY_PLUGIN_DEVELOPER_GUIDE.md` finalised with capability guidance, ops alignment, testing checklist; awaiting copy-edit & diagram polish by Docs guild. |
| PLG7 | Future placeholder: outline backlog for LDAP plugin (`StellaOps.Authority.Plugin.Ldap`) with story-level TODOs. | BE-Auth | **RFC DRAFTED (2025-10-10)** See `docs/rfcs/authority-plugin-ldap.md` for architecture, configuration schema, testing plan, and open questions awaiting guild review. |
## 3. Shared Auth Libraries
| ID | Task | Owner | Notes / Acceptance |
|----|------|-------|---------------------|
| LIB1 | Create `StellaOps.Auth.Abstractions` (claims, scopes, ProblemResultFactory, PrincipalBuilder). | BE-Auth | **DONE (2025-10-10)** Added claim/scope constants, deterministic principal builder, problem result helpers, and xUnit coverage for normalization paths. |
| LIB2 | Implement `StellaOps.Auth.ServerIntegration` DI extensions (JWT bearer, bypass masks, policy helpers). | BE-Auth | **DONE (2025-10-10)** Delivered `AddStellaOpsResourceServerAuthentication`, scope policies, bypass evaluator, and integration tests. |
| LIB3 | Migrate CIDR-matching logic (`NetworkMaskMatcher`) with IPv4/6 support + tests. | BE-Auth | **DONE (2025-10-10)** New matcher + `NetworkMask` parser with 100% branch coverage replacing legacy serdica helpers. |
| LIB4 | Add `StellaOps.Auth.Client` with discovery, JWKS caching, password/client credentials flows, token cache abstraction. | DevEx/CLI | **DONE (2025-10-10)** Implemented typed client, discovery/JWKS caches, in-memory/file token caches, and CLI-focused unit tests. |
| LIB5 | Integrate Polly (configurable) and HttpClientFactory patterns in client library. | DevEx | Ensure retries/offline fallback configurable. |
| LIB6 | Publish NuGet packaging metadata (License, SourceLink) for new libraries. | DevEx | Align with repo packaging conventions. |
## 4. Feedser Integration
| ID | Task | Owner | Notes / Acceptance |
|----|------|-------|---------------------|
| FSR1 | Extend `etc/feedser.yaml` with Authority section (issuer, client credentials, bypass masks). | DevEx/Docs | Document mandatory vs optional settings. |
| FSR2 | Update Feedser WebService startup to call `AddStellaOpsResourceServerAuthentication` and enforce scopes/roles on job endpoints. | BE-Base | **DONE (2025-10-10)** Feedser conditionally wires the resource server auth helper, protects all `/jobs` routes, and documents `authority` config. |
| FSR3 | Add configuration-driven fallback for on-host cron (network mask bypass). | BE-Base | Must be auditable via logs. |
| FSR4 | Adjust Feedser CLI doc references to note new auth requirements. | Docs | Update quickstart & CLI reference. |
| FSR5 | Write end-to-end integration tests (Authority + Feedser) verifying token issuance and job trigger flow (use docker-compose). | QA | Runs in CI nightly. |
## 5. CLI Integration
| ID | Task | Owner | Notes / Acceptance |
|----|------|-------|---------------------|
| CLI1 | Extend CLI config (`StellaOpsCliOptions`) with Authority fields (AuthorityUrl, ClientId, ClientSecret, Username, Password). | DevEx/CLI | Environment variable support. |
| CLI2 | Implement `stellaops-cli auth login/logout/status` commands using `StellaOps.Auth.Client`. | DevEx/CLI | Tokens stored via `ITokenCache`; support password + client creds. |
| CLI3 | Ensure all API calls attach bearer tokens; handle 401/403 with friendly output. | DevEx/CLI | Regression tests for unauthorized scenarios. |
| CLI4 | Update CLI docs & help text to reference authentication workflow. | Docs | Include example flows. |
## 6. Deployment & Ops
| ID | Task | Owner | Notes / Acceptance |
|----|------|-------|---------------------|
| OPS1 | Provide distroless Dockerfile + compose example (Authority + Mongo + optional Redis). | DevOps | **DONE (scaffold)** Dockerfile + compose sample published under `ops/authority/`; offline-friendly mounts + volumes ready for DevOps hardening. |
| OPS2 | Implement CI pipeline stages (build, unit tests, integration tests, publish artifacts). | DevOps | **DONE** CI workflow now builds/tests Authority, publishes artifacts, and builds container image alongside Feedser. |
| OPS3 | Add automated key rotation job (CLI or script) and document manual procedure. | DevOps/BE-Auth | Integrate with JWKS endpoint. |
| OPS4 | Document backup/restore steps for Authority Mongo collections and key material. | Docs/DevOps | Cover offline site restore. |
| OPS5 | Define monitoring/alerting rules (token issuance failure rates, auth errors). | Observability | Provide Prometheus/OpenTelemetry guidance. |
## 7. Security & Compliance
| ID | Task | Owner | Notes / Acceptance |
|----|------|-------|---------------------|
| SEC1 | Adopt ASP.NET Identity password hashing defaults (Argon2 if available). | BE-Auth | Verify with penetration test harness. |
| SEC2 | Implement audit log (structured) for token issuance, revocation, admin actions (including plugin events). | BE-Auth | Logs must include principal, scopes, client, IP. |
| SEC3 | Add configurable lockout/throttle rules (per user + per IP). | BE-Auth | Integration tests confirm lock after threshold. |
| SEC4 | Support offline revocation list generation/signing (for air-gapped exports). | BE-Auth/QA | CLI command + verification doc. |
| SEC5 | Conduct threat model review + update documentation with mitigations. | Security Guild | Include password grant hardening notes. |
## 8. Documentation & Enablement
| ID | Task | Owner | Notes / Acceptance |
|----|------|-------|---------------------|
| DOC1 | Author `docs/11_AUTHORITY.md` covering architecture, configuration, plugin model, operational playbooks. | Docs | Reference sample configs and CLI flows. |
| DOC2 | Produce API reference snippet (OpenAPI fragment) for `/token`, `/jwks`, `/introspect`, `/revoke`. | Docs/BE-Auth | Link in docs & README. |
| DOC3 | Write migration guide from anonymous Feedser to secured Feedser (staged rollout). | Docs/BE-Auth | Address bootstrap credentials and cut-over steps. |
| DOC4 | Create plugin developer how-to referencing new abstractions. | Docs/DevEx | Include example plugin skeleton. |
| DOC5 | Update repository README quickstart to point to Authority docs once live. | Docs | After Authority MVP lands. |
## 9. Backlog / Future Enhancements
| ID | Idea | Notes |
|----|------|-------|
| FUT1 | Multi-factor authentication plugin capability (TOTP / WebAuthn) via plugin metadata. | Requires UX + plugin changes. |
| FUT2 | Admin UI (React/Angular) for managing users/clients. | Defer until API stabilizes. |
| FUT3 | Federation with Microsoft Entra ID using OIDC upstream (Authority acts as broker). | Align with future integration strategy. |
| FUT4 | Device authorization flow support for offline agents. | Dependent on client library maturity. |
| FUT5 | Plugin marketplace packaging guidelines (versioning, signing). | Coordinate with product team. |
---
**Coordination Notes**
- Dedicated triage meetings weekly (Auth Guild) to review progress and unblock module owners.
- Plugin + Authority changes must coordinate with QA for end-to-end scenarios (Authority ↔ Feedser ↔ CLI).
- Security reviews required before enabling Authority in production environments.

1272
StellaOps.sln Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -5,9 +5,13 @@
|OSV alias consolidation & per-ecosystem snapshots|BE-Conn-OSV, QA|Merge, Testing|DONE alias graph handles GHSA/CVE records and deterministic snapshots exist across ecosystems.|
|Oracle PSIRT pipeline completion|BE-Conn-Oracle|Source.Common, Core|**DONE** Oracle mapper now emits CVE aliases, vendor affected packages, patch references, and resume/backfill flow is covered by integration tests.|
|VMware connector observability & resume coverage|BE-Conn-VMware, QA|Source.Common, Storage.Mongo|**DONE** VMware diagnostics emit fetch/parse/map metrics, fetch dedupe uses hash cache, and integration test covers snapshot plus resume path.|
|Model provenance & range backlog|BE-Merge|Models|**DOING** VMware/Oracle/Chromium, NVD, Debian, SUSE, Ubuntu, and Adobe emit RangePrimitives (Debian EVR + SUSE NEVRA + Ubuntu EVR telemetry online; Adobe now reports `adobe.track/platform/priority/availability` telemetry with fixed-status provenance). Remaining connectors (Apple, etc.) still need structured primitives/EVR coverage.|
|Trivy DB exporter delta strategy|BE-Export|Exporters|**TODO** finish `ExportStateManager` delta reset and design incremental layer reuse for unchanged trees.|
|Model provenance & range backlog|BE-Merge|Models|**DOING** VMware/Oracle/Chromium, NVD, Debian, SUSE, Ubuntu, Adobe, ICS Kaspersky, CERT-In, CERT-FR, JVN, and KEV now emit RangePrimitives (KEV adds due-date/vendor extensions with deterministic snapshots). Remaining connectors (`Acsc`, `Cccs`, `CertBund`, `CertCc`, `Cve`, `Ghsa`, `Ics.Cisa`, `Kisa`, `Ru.Bdu`, `Ru.Nkcki`, `Vndr.Apple`, `Vndr.Cisco`, `Vndr.Msrc`) still need structured coverage.|
|Trivy DB exporter delta strategy|BE-Export|Exporters|**DONE** planner promotes chained deltas back to full exports, OCI writer reuses base blobs, regression tests cover the delta→delta→full sequence, and a full-stack layer-reuse smoke test + operator docs landed (2025-10-10).|
|Red Hat fixture validation sweep|QA|Source.Distro.RedHat|**DOING** finalize RHSA fixture regeneration once connector regression fixes land.|
|JVN VULDEF schema update|BE-Conn-JVN, QA|Source.Jvn|**DONE** schema patched (vendor/product attrs, impact entries, err codes), parser tightened, fixtures/tests refreshed.|
|Build/test sweeps|QA|All modules|**DOING** targeted suites green (Models, VMware, Oracle, Chromium, JVN, Cert-In). Full solution run still fails due to `StellaOps.Feedser.Storage.Mongo.Tests/AdvisoryStorePerformanceTests` exceeding perf budget; rerun once budget or test adjusted.|
|OSV vs GHSA parity checks|QA, BE-Merge|Merge|**TODO** design diff detection between OSV and GHSA feeds to surface inconsistencies.|
|Build/test sweeps|QA|All modules|**DONE** wired Authority plugin abstractions into the build, updated CLI export tests for the new overrides, and full `dotnet test` now succeeds (perf suite within budget).|
|Authority plugin PLG1PLG3|BE-Auth Plugin|Authority DevEx|**DONE** abstractions/tests shipped, plugin loader integrated, and Mongo-backed Standard plugin stub operational with bootstrap seeding.|
|Authority plugin PLG4PLG6|BE-Auth Plugin, DevEx/Docs|Authority plugin PLG1PLG3|**READY FOR DOCS REVIEW (2025-10-10)** Capability metadata validated, configuration guardrails shipped, developer guide finalised; waiting on Docs polish + diagram export.|
|Authority plugin PLG7 RFC|BE-Auth Plugin|PLG4|**DRAFTED (2025-10-10)** `docs/rfcs/authority-plugin-ldap.md` captured LDAP plugin architecture, configuration schema, and implementation plan; needs Auth/Security guild review.|
|Feedser modularity test sweep|BE-Conn/QA|Feedser build|**DONE (2025-10-10)** AngleSharp upgrade applied, helper assemblies copy-local, Kaspersky fixtures updated; full `dotnet test src/StellaOps.Feedser.sln` now passes locally.|
|OSV vs GHSA parity checks|QA, BE-Merge|Merge|**DONE** parity inspector/diagnostics wired into OSV connector regression sweep; fixtures validated via `OsvGhsaParityRegressionTests` (see docs/19_TEST_SUITE_OVERVIEW.md) and metrics emitted through `OsvGhsaParityDiagnostics`.|

View File

@@ -1,36 +1,36 @@
# Pending Task Backlog
> Last updated: 2025-10-09 (UTC)
## Common
- **Build/test sweeps (QA DOING)**
Full solution runs still fail the `StellaOps.Feedser.Storage.Mongo.Tests/AdvisoryStorePerformanceTests` budget. We need either to optimise the hot paths in `AdvisoryStore` for large advisory payloads or relax the perf thresholds with new baseline data. Once the bottleneck is addressed, rerun the full suite and capture metrics for the release checklist.
- **OSV vs GHSA parity checks (QA & BE-Merge TODO)**
Design and implement a diff detector comparing OSV advisories against GHSA records. The deliverable should flag mismatched aliases, missing affected ranges, or divergent severities, surface actionable telemetry/alerts, and include regression tests with canned OSV+GHSA fixtures.
## Prerequisites
# Pending Task Backlog
> Last updated: 2025-10-09 (UTC)
## Common
- **Build/test sweeps (QA DONE)**
Full `dotnet test` is green again after wiring the Authority plugin abstractions into `StellaOps.Configuration` and updating CLI export tests for the new publish/include overrides. Keep running the sweep weekly and capture timings so we catch regressions early.
- **OSV vs GHSA parity checks (QA & BE-Merge TODO)**
Design and implement a diff detector comparing OSV advisories against GHSA records. The deliverable should flag mismatched aliases, missing affected ranges, or divergent severities, surface actionable telemetry/alerts, and include regression tests with canned OSV+GHSA fixtures.
## Prerequisites
- **Range primitives for SemVer/EVR/NEVRA metadata (BE-Merge DOING)**
The core model supports range primitives, but several connectors (notably Apple, remaining vendor feeds, and older distro paths) still emit raw strings. We must extend those mappers to populate the structured envelopes (SemVer/EVR/NEVRA plus vendor extensions) and add fixture coverage so merge/export layers see consistent telemetry.
- **Provenance envelope field masks (BE-Merge DOING)**
Provenance needs richer categorisation (component category, severity bands, resume counters) and better dedupe metrics. Update the provenance model, extend diagnostics to emit the new tags, and refresh dashboards/tests to ensure determinism once additional metadata flows through.
## Implementations
- **Model provenance & range backlog (BE-Merge DOING)**
With Adobe/Ubuntu now emitting range primitives, focus on the remaining connectors (e.g., Apple, smaller vendor PSIRTs). Update their pipelines, regenerate goldens, and confirm `feedser.range.primitives` metrics reflect the added telemetry. The task closes when every high-priority source produces structured ranges with provenance.
- **Trivy DB exporter delta strategy (BE-Export TODO)**
Finalise the delta-reset story in `ExportStateManager`: define when to invalidate baselines, how to reuse unchanged layers, and document operator workflows. Implement planner logic for layer reuse, update exporter tests, and exercise a delta→full→delta sequence.
The core model supports range primitives, but several connectors still emit raw strings. Current gaps (snapshot 20251009, post-Kaspersky/CERT-In/CERT-FR/JVN updates): `Acsc`, `Cccs`, `CertBund`, `CertCc`, `Cve`, `Ghsa`, `Ics.Cisa`, `Kev`, `Kisa`, `Ru.Bdu`, `Ru.Nkcki`, `Vndr.Apple`, `Vndr.Cisco`, `Vndr.Msrc`. We need to extend those mappers to populate the structured envelopes (SemVer/EVR/NEVRA plus vendor extensions) and add fixture coverage so merge/export layers see consistent telemetry. (Delivered: ICS.Kaspersky, CERT-In, CERT-FR emit vendor primitives; JVN captures version/build metadata.)
- **Provenance envelope field masks (BE-Merge DOING)**
Provenance needs richer categorisation (component category, severity bands, resume counters) and better dedupe metrics. Update the provenance model, extend diagnostics to emit the new tags, and refresh dashboards/tests to ensure determinism once additional metadata flows through.
## Implementations
- **Model provenance & range backlog (BE-Merge DOING)**
With Adobe/Ubuntu now emitting range primitives, focus on the remaining connectors (e.g., Apple, smaller vendor PSIRTs). Update their pipelines, regenerate goldens, and confirm `feedser.range.primitives` metrics reflect the added telemetry. The task closes when every high-priority source produces structured ranges with provenance.
- **Trivy DB exporter delta strategy (BE-Export TODO)**
Finalise the delta-reset story in `ExportStateManager`: define when to invalidate baselines, how to reuse unchanged layers, and document operator workflows. Implement planner logic for layer reuse, update exporter tests, and exercise a delta→full→delta sequence.
- **Red Hat fixture validation sweep (QA DOING)**
Regenerate RHSA fixtures with the latest connector output and make sure the regenerated snapshots align once the outstanding connector tweaks land. Blockers: connector regression fixes still in-flight; revisit once those merges stabilise to avoid churn.
- **Plan incremental/delta exports (BE-Export DOING)**
`TrivyDbExportPlanner` now captures changed files but does not yet reuse existing OCI layers. Extend the planner to build per-file manifests, teach the writer to skip untouched layers, and add delta-cycle tests covering file removals, additions, and checksum changes.
Regenerate RHSA fixtures with the latest connector output and make sure the regenerated snapshots align once the outstanding connector tweaks land. Pending prerequisites: land the mapper reference-normalisation patch (local branch `redhat/ref-dedupe`) and the range provenance backfill (`RangePrimitives.GetCoverageTag`). Once those land, run `UPDATE_RHSA_FIXTURES=1 dotnet test src/StellaOps.Feedser.Source.Distro.RedHat.Tests/StellaOps.Feedser.Source.Distro.RedHat.Tests.csproj`, review the refreshed `Fixtures/rhsa-*.json`, and sync the task status to **DONE**.
- **Plan incremental/delta exports (BE-Export DOING)**
`TrivyDbExportPlanner` now captures changed files but does not yet reuse existing OCI layers. Extend the planner to build per-file manifests, teach the writer to skip untouched layers, and add delta-cycle tests covering file removals, additions, and checksum changes.
- **Scan execution & result upload workflow (DevEx/CLI & Ops Integrator DOING)**
`stella scan run`/`stella scan upload` need completion: support the remaining executor backends (dotnet/self-hosted/docker), capture structured run metadata, implement retry/backoff on uploads, and add integration tests exercising happy-path and failure retries. Update CLI docs once the workflow is stable.
`stella scan run` now emits a structured `scan-run-*.json` alongside artefacts. Remaining work: add resilient upload retries/backoff, cover success/retry/cancellation with integration tests, and expand docs with docker/dotnet/native runner examples plus metadata troubleshooting tips.

3
WEB-TODOS.md Normal file
View File

@@ -0,0 +1,3 @@
# Web UI Follow-ups
- Trivy DB exporter settings panel: surface `publishFull` / `publishDelta` and `includeFull` / `includeDelta` toggles, saving overrides via future `/exporters/trivy-db/settings` API. Include “run export now” button that reuses those overrides when triggering `export:trivy-db`.

View File

@@ -131,7 +131,11 @@ Each connector ships fixtures/tests under the matching `*.Tests` project.
* JSON exporter mirrors vuln-list layout with per-file digests and manifest.
* Trivy DB exporter shells or native-builds Bolt archives, optionally pushes OCI
layers, and records export cursors.
layers, and records export cursors. Delta runs reuse unchanged blobs from the
previous full baseline, annotating `metadata.json` with `mode`, `baseExportId`,
`baseManifestDigest`, `resetBaseline`, and `delta.changedFiles[]`/`delta.removedPaths[]`.
ORAS pushes honour `publishFull` / `publishDelta`, and offline bundles respect
`includeFull` / `includeDelta` for air-gapped syncs.
###5.4Feedser.WebService

View File

@@ -1,198 +1,198 @@
# API & CLI Reference
*Purpose* give operators and integrators a single, authoritative spec for REST/GRPC calls **and** firstparty CLI tools (`stella-cli`, `zastava`, `stella`).
Everything here is *sourceoftruth* for generated Swagger/OpenAPI and the `--help` screens in the CLIs.
---
## 0 Quick Glance
| Area | Call / Flag | Notes |
| ------------------ | ------------------------------------------- | ------------------------------------------------------------------------------ |
| Scan entry | `POST /scan` | Accepts SBOM or image; sub5s target |
| Delta check | `POST /layers/missing` | <20ms reply; powers *delta SBOM* feature |
| Ratelimit / quota | | Headers **`XStellaQuotaRemaining`**, **`XStellaReset`** on every response |
| Policy I/O | `GET /policy/export`, `POST /policy/import` | YAML now; Rego coming |
| Policy lint | `POST /policy/validate` | Returns 200 OK if ruleset passes |
| Auth | `POST /connect/token` (OpenIddict) | Clientcredentials preferred |
| Health | `GET /healthz` | Simple liveness probe |
| Attestation * | `POST /attest` (TODO Q12026) | SLSA provenance + Rekor log |
| CLI flags | `--sbom-type` `--delta` `--policy-file` | Added to `stella` |
\* Marked **TODO** delivered after sixth month (kept on Feature Matrix To Do list).
---
## 1 Authentication
StellaOps uses **OAuth 2.0 / OIDC** (token endpoint mounted via OpenIddict).
```
POST /connect/token
ContentType: application/x-www-form-urlencoded
grant_type=client_credentials&
client_id=cibot&
client_secret=REDACTED&
scope=stella.api
```
Successful response:
```json
{
"access_token": "eyJraWQi...",
"token_type": "Bearer",
"expires_in": 3600
}
```
> **Tip**  pass the token via `Authorization: Bearer <token>` on every call.
---
## 2 REST API
###2.0Obtain / Refresh OfflineToken
```text
POST /token/offline
Authorization: Bearer <admintoken>
```
| Body field | Required | Example | Notes |
|------------|----------|---------|-------|
| `expiresDays` | no | `30` | Max 90 days |
```json
{
"jwt": "eyJhbGciOiJSUzI1NiIsInR5cCI6...",
"expires": "20250817T00:00:00Z"
}
```
Token is signed with the backends private key and already contains
`"maxScansPerDay": {{ quota_token }}`.
### 2.1 Scan  Upload SBOM **or** Image
```
POST /scan
```
| Param / Header | In | Required | Description |
| -------------------- | ------ | -------- | --------------------------------------------------------------------- |
| `XStellaSbomType` | header | no | `trivy-json-v2`, `spdx-json`, `cyclonedx-json`; omitted autodetect |
| `?threshold` | query | no | `low`, `medium`, `high`, `critical`; default **critical** |
| body | body | yes | *Either* SBOM JSON *or* Docker image tarball/upload URL |
Every successful `/scan` response now includes:
| Header | Example |
|--------|---------|
| `XStellaQuotaRemaining` | `129` |
| `XStellaReset` | `20250718T23:59:59Z` |
| `XStellaTokenExpires` | `20250817T00:00:00Z` |
**Response 200** (scan completed):
```json
{
"digest": "sha256:…",
"summary": {
"Critical": 0,
"High": 3,
"Medium": 12,
"Low": 41
},
"policyStatus": "pass",
"quota": {
"remaining": 131,
"reset": "2025-07-18T00:00:00Z"
}
}
```
**Response 202** queued; polling URL in `Location` header.
---
### 2.2 Delta SBOM Layer Cache Check
```
POST /layers/missing
ContentType: application/json
Authorization: Bearer <token>
```
```json
{
"layers": [
"sha256:d38b...",
"sha256:af45..."
]
}
```
**Response 200**<20ms target:
```json
{
"missing": [
"sha256:af45..."
]
}
```
Client then generates SBOM **only** for the `missing` layers and reposts `/scan`.
---
### 2.3 Policy Endpoints
| Method | Path | Purpose |
| ------ | ------------------ | ------------------------------------ |
| `GET` | `/policy/export` | Download live YAML ruleset |
| `POST` | `/policy/import` | Upload YAML or Rego; replaces active |
| `POST` | `/policy/validate` | Lint only; returns 400 on error |
| `GET` | `/policy/history` | Paginated change log (audit trail) |
```yaml
# Example import payload (YAML)
version: "1.0"
rules:
- name: Ignore Low dev
severity: [Low, None]
environments: [dev, staging]
action: ignore
```
Validation errors come back as:
```json
{
"errors": [
{
"path": "$.rules[0].severity",
"msg": "Invalid level 'None'"
}
]
}
```
---
# API & CLI Reference
*Purpose* give operators and integrators a single, authoritative spec for REST/GRPC calls **and** firstparty CLI tools (`stella-cli`, `zastava`, `stella`).
Everything here is *sourceoftruth* for generated Swagger/OpenAPI and the `--help` screens in the CLIs.
---
## 0 Quick Glance
| Area | Call / Flag | Notes |
| ------------------ | ------------------------------------------- | ------------------------------------------------------------------------------ |
| Scan entry | `POST /scan` | Accepts SBOM or image; sub5s target |
| Delta check | `POST /layers/missing` | <20ms reply; powers *delta SBOM* feature |
| Ratelimit / quota | | Headers **`XStellaQuotaRemaining`**, **`XStellaReset`** on every response |
| Policy I/O | `GET /policy/export`, `POST /policy/import` | YAML now; Rego coming |
| Policy lint | `POST /policy/validate` | Returns 200 OK if ruleset passes |
| Auth | `POST /connect/token` (OpenIddict) | Clientcredentials preferred |
| Health | `GET /healthz` | Simple liveness probe |
| Attestation * | `POST /attest` (TODO Q12026) | SLSA provenance + Rekor log |
| CLI flags | `--sbom-type` `--delta` `--policy-file` | Added to `stella` |
\* Marked **TODO** delivered after sixth month (kept on Feature Matrix To Do list).
---
## 1 Authentication
StellaOps uses **OAuth 2.0 / OIDC** (token endpoint mounted via OpenIddict).
```
POST /connect/token
ContentType: application/x-www-form-urlencoded
grant_type=client_credentials&
client_id=cibot&
client_secret=REDACTED&
scope=stella.api
```
Successful response:
```json
{
"access_token": "eyJraWQi...",
"token_type": "Bearer",
"expires_in": 3600
}
```
> **Tip**  pass the token via `Authorization: Bearer <token>` on every call.
---
## 2 REST API
###2.0Obtain / Refresh OfflineToken
```text
POST /token/offline
Authorization: Bearer <admintoken>
```
| Body field | Required | Example | Notes |
|------------|----------|---------|-------|
| `expiresDays` | no | `30` | Max 90 days |
```json
{
"jwt": "eyJhbGciOiJSUzI1NiIsInR5cCI6...",
"expires": "20250817T00:00:00Z"
}
```
Token is signed with the backends private key and already contains
`"maxScansPerDay": {{ quota_token }}`.
### 2.1 Scan  Upload SBOM **or** Image
```
POST /scan
```
| Param / Header | In | Required | Description |
| -------------------- | ------ | -------- | --------------------------------------------------------------------- |
| `XStellaSbomType` | header | no | `trivy-json-v2`, `spdx-json`, `cyclonedx-json`; omitted autodetect |
| `?threshold` | query | no | `low`, `medium`, `high`, `critical`; default **critical** |
| body | body | yes | *Either* SBOM JSON *or* Docker image tarball/upload URL |
Every successful `/scan` response now includes:
| Header | Example |
|--------|---------|
| `XStellaQuotaRemaining` | `129` |
| `XStellaReset` | `20250718T23:59:59Z` |
| `XStellaTokenExpires` | `20250817T00:00:00Z` |
**Response 200** (scan completed):
```json
{
"digest": "sha256:…",
"summary": {
"Critical": 0,
"High": 3,
"Medium": 12,
"Low": 41
},
"policyStatus": "pass",
"quota": {
"remaining": 131,
"reset": "2025-07-18T00:00:00Z"
}
}
```
**Response 202** queued; polling URL in `Location` header.
---
### 2.2 Delta SBOM Layer Cache Check
```
POST /layers/missing
ContentType: application/json
Authorization: Bearer <token>
```
```json
{
"layers": [
"sha256:d38b...",
"sha256:af45..."
]
}
```
**Response 200**<20ms target:
```json
{
"missing": [
"sha256:af45..."
]
}
```
Client then generates SBOM **only** for the `missing` layers and reposts `/scan`.
---
### 2.3 Policy Endpoints
| Method | Path | Purpose |
| ------ | ------------------ | ------------------------------------ |
| `GET` | `/policy/export` | Download live YAML ruleset |
| `POST` | `/policy/import` | Upload YAML or Rego; replaces active |
| `POST` | `/policy/validate` | Lint only; returns 400 on error |
| `GET` | `/policy/history` | Paginated change log (audit trail) |
```yaml
# Example import payload (YAML)
version: "1.0"
rules:
- name: Ignore Low dev
severity: [Low, None]
environments: [dev, staging]
action: ignore
```
Validation errors come back as:
```json
{
"errors": [
{
"path": "$.rules[0].severity",
"msg": "Invalid level 'None'"
}
]
}
```
---
### 2.4 Attestation (Planned  Q12026)
```
POST /attest
```
| Param | Purpose |
| ----------- | ------------------------------------- |
| body (JSON) | SLSA v1.0 provenance doc |
| | Signed + stored in local Rekor mirror |
| Param | Purpose |
| ----------- | ------------------------------------- |
| body (JSON) | SLSA v1.0 provenance doc |
| | Signed + stored in local Rekor mirror |
Returns `202 Accepted` and `Location: /attest/{id}` for async verify.
@@ -211,11 +211,14 @@ Configuration follows the same precedence chain everywhere:
| Command | Purpose | Key Flags / Arguments | Notes |
|---------|---------|-----------------------|-------|
| `stellaops-cli scanner download` | Fetch and install scanner container | `--channel <stable\|beta\|nightly>` (default `stable`)<br>`--output <path>`<br>`--overwrite`<br>`--no-install` | Saves artefact under `ScannerCacheDirectory`, verifies digest/signature, and executes `docker load` unless `--no-install` is supplied. |
| `stellaops-cli scan run` | Execute scanner container against a directory (auto-upload) | `--target <directory>` (required)<br>`--runner <docker\|dotnet\|self>` (default from config)<br>`--entry <image-or-entrypoint>`<br>`[scanner-args...]` | Runs the scanner, writes results into `ResultsDirectory`, and automatically uploads the artefact when the exit code is `0`. |
| `stellaops-cli scan run` | Execute scanner container against a directory (auto-upload) | `--target <directory>` (required)<br>`--runner <docker\|dotnet\|self>` (default from config)<br>`--entry <image-or-entrypoint>`<br>`[scanner-args...]` | Runs the scanner, writes results into `ResultsDirectory`, emits a structured `scan-run-*.json` metadata file, and automatically uploads the artefact when the exit code is `0`. |
| `stellaops-cli scan upload` | Re-upload existing scan artefact | `--file <path>` | Useful for retries when automatic upload fails or when operating offline. |
| `stellaops-cli db fetch` | Trigger connector jobs | `--source <id>` (e.g. `redhat`, `osv`)<br>`--stage <fetch\|parse\|map>` (default `fetch`)<br>`--mode <resume|init|cursor>` | Translates to `POST /jobs/source:{source}:{stage}` with `trigger=cli` |
| `stellaops-cli db merge` | Run canonical merge reconcile | — | Calls `POST /jobs/merge:reconcile`; exit code `0` on acceptance, `1` on failures/conflicts |
| `stellaops-cli db export` | Kick JSON / Trivy exports | `--format <json\|trivy-db>` (default `json`)<br>`--delta` | Sets `{ delta = true }` parameter when requested |
| `stellaops-cli db export` | Kick JSON / Trivy exports | `--format <json\|trivy-db>` (default `json`)<br>`--delta`<br>`--publish-full/--publish-delta`<br>`--bundle-full/--bundle-delta` | Sets `{ delta = true }` parameter when requested and can override ORAS/bundle toggles per run |
| `stellaops-cli auth <login\|logout\|status>` | Manage cached tokens for StellaOps Authority | `auth login --force` (ignore cache)<br>`auth status` | Uses `StellaOps.Auth.Client` under the hood; honours `StellaOps:Authority:*` configuration |
When running on an interactive terminal without explicit override flags, the CLI uses Spectre.Console prompts to let you choose per-run ORAS/offline bundle behaviour.
| `stellaops-cli config show` | Display resolved configuration | — | Masks secret values; helpful for airgapped installs |
**Logging & exit codes**
@@ -229,12 +232,51 @@ Configuration follows the same precedence chain everywhere:
- Downloads are verified against the `X-StellaOps-Digest` header (SHA-256). When `StellaOps:ScannerSignaturePublicKeyPath` points to a PEM-encoded RSA key, the optional `X-StellaOps-Signature` header is validated as well.
- Metadata for each bundle is written alongside the artefact (`*.metadata.json`) with digest, signature, source URL, and timestamps.
- Retry behaviour is controlled via `StellaOps:ScannerDownloadAttempts` (default **3** with exponential backoff).
- Successful `scan run` executions create timestamped JSON artefacts inside `ResultsDirectory`; these are posted back to Feedser automatically.
- Successful `scan run` executions create timestamped JSON artefacts inside `ResultsDirectory` plus a `scan-run-*.json` metadata envelope documenting the runner, arguments, timing, and stdout/stderr. The artefact is posted back to Feedser automatically.
#### Trivy DB export metadata (`metadata.json`)
`stellaops-cli db export --format trivy-db` (and the backing `POST /jobs/export:trivy-db`) always emits a `metadata.json` document in the OCI layout root. Operators consuming the bundle or delta updates should inspect the following fields:
| Field | Type | Purpose |
| ----- | ---- | ------- |
| `mode` | `full` \| `delta` | Indicates whether the current run rebuilt the entire database (`full`) or only the changed files (`delta`). |
| `baseExportId` | string? | Export ID of the last full baseline that the delta builds upon. Only present for `mode = delta`. |
| `baseManifestDigest` | string? | SHA-256 digest of the manifest belonging to the baseline OCI layout. |
| `resetBaseline` | boolean | `true` when the exporter rotated the baseline (e.g., repo change, delta chain reset). Treat as a full refresh. |
| `treeDigest` | string | Canonical SHA-256 digest of the JSON tree used to build the database. |
| `treeBytes` | number | Total bytes across exported JSON files. |
| `advisoryCount` | number | Count of advisories included in the export. |
| `exporterVersion` | string | Version stamp of `StellaOps.Feedser.Exporter.TrivyDb`. |
| `builder` | object? | Raw metadata emitted by `trivy-db build` (version, update cadence, etc.). |
| `delta.changedFiles[]` | array | Present when `mode = delta`. Each entry lists `{ "path": "<relative json>", "length": <bytes>, "digest": "sha256:..." }`. |
| `delta.removedPaths[]` | array | Paths that existed in the previous manifest but were removed in the new run. |
When the planner opts for a delta run, the exporter copies unmodified blobs from the baseline layout identified by `baseManifestDigest`. Consumers that cache OCI blobs only need to fetch the `changedFiles` and the new manifest/metadata unless `resetBaseline` is true.
When pushing to ORAS, set `feedser:exporters:trivyDb:oras:publishFull` / `publishDelta` to control whether full or delta runs are copied to the registry. Offline bundles follow the analogous `includeFull` / `includeDelta` switches under `offlineBundle`.
Example configuration (`appsettings.yaml`):
```yaml
feedser:
exporters:
trivyDb:
oras:
enabled: true
publishFull: true
publishDelta: false
offlineBundle:
enabled: true
includeFull: true
includeDelta: false
```
**Authentication**
- API key is sent as `Authorization: Bearer <token>` automatically when configured.
- Anonymous operation (empty key) is permitted for offline use cases but backend calls will fail with 401 unless the Feedser instance allows guest access.
- When `StellaOps:Authority:Url` is set the CLI initialises the StellaOps auth client. Use `stellaops-cli auth login` to obtain a token (password grant when `Username`/`Password` are set, otherwise client credentials). Tokens are cached under `~/.stellaops/tokens` by default; `auth status` shows expiry and `auth logout` removes the cached entry.
**Configuration file template**
@@ -247,7 +289,16 @@ Configuration follows the same precedence chain everywhere:
"ResultsDirectory": "results",
"DefaultRunner": "docker",
"ScannerSignaturePublicKeyPath": "",
"ScannerDownloadAttempts": 3
"ScannerDownloadAttempts": 3,
"Authority": {
"Url": "https://authority.example.org",
"ClientId": "feedser-cli",
"ClientSecret": "REDACTED",
"Username": "",
"Password": "",
"Scope": "feedser.jobs.trigger",
"TokenCacheDirectory": ""
}
}
}
```
@@ -256,132 +307,132 @@ Drop `appsettings.local.json` or `.yaml` beside the binary to override per envir
---
### 2.5 Misc Endpoints
| Path | Method | Description |
| ---------- | ------ | ---------------------------- |
| `/healthz` | GET | Liveness; returns `"ok"` |
| `/metrics` | GET | Prometheus exposition (OTel) |
| `/version` | GET | Git SHA + build date |
---
## 3 FirstParty CLI Tools
### 3.1 `stella`
> *Package SBOM + Scan + Exit code* designed for CI.
```
Usage: stella [OPTIONS] IMAGE_OR_SBOM
```
| Flag / Option | Default | Description |
| --------------- | ----------------------- | -------------------------------------------------- |
| `--server` | `http://localhost:8080` | API root |
| `--token` | *env `STELLA_TOKEN`* | Bearer token |
| `--sbom-type` | *auto* | Force `trivy-json-v2`/`spdx-json`/`cyclonedx-json` |
| `--delta` | `false` | Enable delta layer optimisation |
| `--policy-file` | *none* | Override server rules with local YAML/Rego |
| `--threshold` | `critical` | Fail build if ≥ level found |
| `--output-json` | *none* | Write raw scan result to file |
| `--wait-quota` | `true` | If 429 received, automatically wait `RetryAfter` and retry once. |
**Exit codes**
| Code | Meaning |
| ---- | ------------------------------------------- |
| 0 | Scan OK, policy passed |
| 1 | Vulnerabilities ≥ threshold OR policy block |
| 2 | Internal error (network etc.) |
---
### 3.2 `stellazastava`
> *Daemon / K8s DaemonSet* watch container runtime, push SBOMs.
Core flags (excerpt):
| Flag | Purpose |
| ---------------- | ---------------------------------- |
| `--mode` | `listen` (default) / `enforce` |
| `--filter-image` | Regex; ignore infra/busybox images |
| `--threads` | Worker pool size |
---
### 3.3 `stellopsctl`
> *Admin utility* policy snapshots, feed status, user CRUD.
Examples:
```
stellopsctl policy export > policies/backup-2025-07-14.yaml
stellopsctl feed refresh # force OSV merge
stellopsctl user add dev-team --role developer
```
---
## 4 Error Model
Uniform problemdetails object (RFC 7807):
```json
{
"type": "https://stella-ops.org/probs/validation",
"title": "Invalid request",
"status": 400,
"detail": "Layer digest malformed",
"traceId": "00-7c39..."
}
```
---
## 5 Rate Limits
Default **40 requests / second / token**.
429 responses include `Retry-After` seconds header.
---
## 6 FAQ & Tips
* **Skip SBOM generation in CI** supply a *prebuilt* SBOM and add `?sbom-only=true` to `/scan` for <1s path.
* **Airgapped?** point `--server` to `http://oukgw:8080` inside the Offline Update Kit.
* **YAML vs Rego** YAML simpler; Rego unlocks timebased logic (see samples).
* **Cosign verify plugins** enable `SCANNER_VERIFY_SIG=true` env to refuse unsigned plugins.
---
## 7 Planned Changes (Beyond 6 Months)
These stay in *Feature Matrix → To Do* until design is frozen.
| Epic / Feature | API Impact Sketch |
| ---------------------------- | ---------------------------------- |
| **SLSA L1L3** attestation | `/attest` (see §2.4) |
| Rekor transparency log | `/rekor/log/{id}` (GET) |
| Plugin Marketplace metadata | `/plugins/market` (catalog) |
| Horizontal scaling controls | `POST /cluster/node` (add/remove) |
| Windows agent support | Update LSAPI to PDE, no API change |
---
## 8 References
* OpenAPI YAML `/openapi/v1.yaml` (served by backend)
* OAuth2 spec: <https://datatracker.ietf.org/doc/html/rfc6749>
* SLSA spec: <https://slsa.dev/spec/v1.0>
---
## 9 Changelog (truncated)
* **20250714** added *delta SBOM*, policy import/export, CLI `--sbom-type`.
* **20250712** initial public reference.
---
### 2.5 Misc Endpoints
| Path | Method | Description |
| ---------- | ------ | ---------------------------- |
| `/healthz` | GET | Liveness; returns `"ok"` |
| `/metrics` | GET | Prometheus exposition (OTel) |
| `/version` | GET | Git SHA + build date |
---
## 3 FirstParty CLI Tools
### 3.1 `stella`
> *Package SBOM + Scan + Exit code* designed for CI.
```
Usage: stella [OPTIONS] IMAGE_OR_SBOM
```
| Flag / Option | Default | Description |
| --------------- | ----------------------- | -------------------------------------------------- |
| `--server` | `http://localhost:8080` | API root |
| `--token` | *env `STELLA_TOKEN`* | Bearer token |
| `--sbom-type` | *auto* | Force `trivy-json-v2`/`spdx-json`/`cyclonedx-json` |
| `--delta` | `false` | Enable delta layer optimisation |
| `--policy-file` | *none* | Override server rules with local YAML/Rego |
| `--threshold` | `critical` | Fail build if ≥ level found |
| `--output-json` | *none* | Write raw scan result to file |
| `--wait-quota` | `true` | If 429 received, automatically wait `RetryAfter` and retry once. |
**Exit codes**
| Code | Meaning |
| ---- | ------------------------------------------- |
| 0 | Scan OK, policy passed |
| 1 | Vulnerabilities ≥ threshold OR policy block |
| 2 | Internal error (network etc.) |
---
### 3.2 `stellazastava`
> *Daemon / K8s DaemonSet* watch container runtime, push SBOMs.
Core flags (excerpt):
| Flag | Purpose |
| ---------------- | ---------------------------------- |
| `--mode` | `listen` (default) / `enforce` |
| `--filter-image` | Regex; ignore infra/busybox images |
| `--threads` | Worker pool size |
---
### 3.3 `stellopsctl`
> *Admin utility* policy snapshots, feed status, user CRUD.
Examples:
```
stellopsctl policy export > policies/backup-2025-07-14.yaml
stellopsctl feed refresh # force OSV merge
stellopsctl user add dev-team --role developer
```
---
## 4 Error Model
Uniform problemdetails object (RFC 7807):
```json
{
"type": "https://stella-ops.org/probs/validation",
"title": "Invalid request",
"status": 400,
"detail": "Layer digest malformed",
"traceId": "00-7c39..."
}
```
---
## 5 Rate Limits
Default **40 requests / second / token**.
429 responses include `Retry-After` seconds header.
---
## 6 FAQ & Tips
* **Skip SBOM generation in CI** supply a *prebuilt* SBOM and add `?sbom-only=true` to `/scan` for <1s path.
* **Airgapped?** point `--server` to `http://oukgw:8080` inside the Offline Update Kit.
* **YAML vs Rego** YAML simpler; Rego unlocks timebased logic (see samples).
* **Cosign verify plugins** enable `SCANNER_VERIFY_SIG=true` env to refuse unsigned plugins.
---
## 7 Planned Changes (Beyond 6 Months)
These stay in *Feature Matrix → To Do* until design is frozen.
| Epic / Feature | API Impact Sketch |
| ---------------------------- | ---------------------------------- |
| **SLSA L1L3** attestation | `/attest` (see §2.4) |
| Rekor transparency log | `/rekor/log/{id}` (GET) |
| Plugin Marketplace metadata | `/plugins/market` (catalog) |
| Horizontal scaling controls | `POST /cluster/node` (add/remove) |
| Windows agent support | Update LSAPI to PDE, no API change |
---
## 8 References
* OpenAPI YAML `/openapi/v1.yaml` (served by backend)
* OAuth2 spec: <https://datatracker.ietf.org/doc/html/rfc6749>
* SLSA spec: <https://slsa.dev/spec/v1.0>
---
## 9 Changelog (truncated)
* **20250714** added *delta SBOM*, policy import/export, CLI `--sbom-type`.
* **20250712** initial public reference.
---

View File

@@ -0,0 +1,232 @@
# 10 · Feedser + CLI Quickstart
This guide walks through configuring the Feedser web service and the `stellaops-cli`
tool so an operator can ingest advisories, merge them, and publish exports from a
single workstation. It focuses on deployment-facing surfaces only (configuration,
runtime wiring, CLI usage) and leaves connector/internal customization for later.
---
## 0 · Prerequisites
- .NET SDK **10.0.100-preview** (matches `global.json`)
- MongoDB instance reachable from the host (local Docker or managed)
- `trivy-db` binary on `PATH` for Trivy exports (and `oras` if publishing to OCI)
- Plugin assemblies present in `PluginBinaries/` (already included in the repo)
- Optional: Docker/Podman runtime if you plan to run scanners locally
> **Tip** air-gapped installs should preload `trivy-db` and `oras` binaries into the
> runner image since Feedser never fetches them dynamically.
---
## 1 · Configure Feedser
1. Copy the sample config to the expected location (CI/CD pipelines can stamp values
into this file during deployment—see the “Deployment automation” note below):
```bash
mkdir -p etc
cp etc/feedser.yaml.sample etc/feedser.yaml
```
2. Edit `etc/feedser.yaml` and update the MongoDB DSN (and optional database name).
The default template configures plug-in discovery to look in `PluginBinaries/`
and disables remote telemetry exporters by default.
3. (Optional) Override settings via environment variables. All keys are prefixed with
`FEEDSER_`. Example:
```bash
export FEEDSER_STORAGE__DSN="mongodb://user:pass@mongo:27017/feedser"
export FEEDSER_TELEMETRY__ENABLETRACING=false
```
4. Start the web service from the repository root:
```bash
dotnet run --project src/StellaOps.Feedser.WebService
```
On startup Feedser validates the options, boots MongoDB indexes, loads plug-ins,
and exposes:
- `GET /health` returns service status and telemetry settings
- `GET /ready` performs a MongoDB `ping`
- `GET /jobs` + `POST /jobs/{kind}` inspect and trigger connector/export jobs
> **Security note** authentication is not wired yet; guard the service with
> network controls or a reverse proxy until auth middleware ships.
### Authority companion configuration (preview)
1. Copy the Authority sample configuration:
```bash
cp etc/authority.yaml.sample etc/authority.yaml
```
2. Update the issuer URL, token lifetimes, and plug-in descriptors to match your
environment. Authority expects per-plugin manifests in `etc/authority.plugins/`;
sample `standard.yaml` and `ldap.yaml` files are provided as starting points.
For air-gapped installs keep the default plug-in binary directory
(`../PluginBinaries/Authority`) so packaged plug-ins load without outbound access.
3. Environment variables prefixed with `STELLAOPS_AUTHORITY_` override individual
fields. Example:
```bash
export STELLAOPS_AUTHORITY__ISSUER="https://authority.stella-ops.local"
export STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0="/srv/authority/plugins"
```
---
## 2 · Configure the CLI
The CLI reads configuration from JSON/YAML files *and* environment variables. The
defaults live in `src/StellaOps.Cli/appsettings.json` and expect overrides at runtime.
| Setting | Environment variable | Default | Purpose |
| ------- | -------------------- | ------- | ------- |
| `BackendUrl` | `STELLAOPS_BACKEND_URL` | _empty_ | Base URL of the Feedser web service |
| `ApiKey` | `API_KEY` | _empty_ | Reserved for future auth; keep empty today |
| `ScannerCacheDirectory` | `STELLAOPS_SCANNER_CACHE_DIRECTORY` | `scanners` | Local cache folder |
| `ResultsDirectory` | `STELLAOPS_RESULTS_DIRECTORY` | `results` | Where scan outputs are written |
Example bootstrap:
```bash
export STELLAOPS_BACKEND_URL="http://localhost:5000"
export STELLAOPS_RESULTS_DIRECTORY="$HOME/.stellaops/results"
dotnet run --project src/StellaOps.Cli -- db merge
```
To persist configuration, you can create `stellaops-cli.yaml` next to the binary or
rely on environment variables for ephemeral runners.
---
## 3 · Operating Workflow
1. **Trigger connector fetch stages**
```bash
dotnet run --project src/StellaOps.Cli -- db fetch --source osv --stage fetch
dotnet run --project src/StellaOps.Cli -- db fetch --source osv --stage parse
dotnet run --project src/StellaOps.Cli -- db fetch --source osv --stage map
```
Use `--mode resume` when continuing from a previous window:
```bash
dotnet run --project src/StellaOps.Cli -- db fetch --source redhat --stage fetch --mode resume
```
2. **Merge canonical advisories**
```bash
dotnet run --project src/StellaOps.Cli -- db merge
```
3. **Produce exports**
```bash
# JSON tree (vuln-list style)
dotnet run --project src/StellaOps.Cli -- db export --format json
# Trivy DB (delta example)
dotnet run --project src/StellaOps.Cli -- db export --format trivy-db --delta
```
Feedser always produces a deterministic OCI layout. The first run after a clean
bootstrap emits a **full** baseline; subsequent `--delta` runs reuse the previous
baselines blobs when only JSON manifests change. If the exporter detects that a
prior delta is still active (i.e., `LastDeltaDigest` is recorded) it automatically
upgrades the next run to a full export and resets the baseline so operators never
chain deltas indefinitely. The CLI exposes `--publish-full/--publish-delta` (for
ORAS pushes) and `--include-full/--include-delta` (for offline bundles) should you
need to override the defaults interactively.
**Smoke-check delta reuse:** after the first baseline completes, run the export a
second time with `--delta` and verify that the new directory reports `mode=delta`
while reusing the previous layer blob.
```bash
export_root=${FEEDSER_EXPORT_ROOT:-exports/trivy}
base=$(ls -1d "$export_root"/* | sort | tail -n2 | head -n1)
delta=$(ls -1d "$export_root"/* | sort | tail -n1)
jq -r '.mode,.baseExportId' "$delta/metadata.json"
base_manifest=$(jq -r '.manifests[0].digest' "$base/index.json")
delta_manifest=$(jq -r '.manifests[0].digest' "$delta/index.json")
printf 'baseline manifest: %s\ndelta manifest: %s\n' "$base_manifest" "$delta_manifest"
layer_digest=$(jq -r '.layers[0].digest' "$base/blobs/sha256/${base_manifest#sha256:}")
cmp "$base/blobs/sha256/${layer_digest#sha256:}" \
"$delta/blobs/sha256/${layer_digest#sha256:}"
```
`cmp` returning exit code `0` confirms the delta export reuses the baselines
`db.tar.gz` layer instead of rebuilding it.
4. **Manage scanners (optional)**
```bash
dotnet run --project src/StellaOps.Cli -- scanner download --channel stable
dotnet run --project src/StellaOps.Cli -- scan run --entry scanners/latest/Scanner.dll --target ./sboms
dotnet run --project src/StellaOps.Cli -- scan upload --file results/scan-001.json
```
Add `--verbose` to any command for structured console logs. All commands honour
`Ctrl+C` cancellation and exit with non-zero status codes when the backend returns
a problem document.
---
## 4 · Verification Checklist
- Feedser `/health` returns `"status":"healthy"` and Storage bootstrap is marked
complete after startup.
- CLI commands return HTTP 202 with a `Location` header (job tracking URL) when
triggering Feedser jobs.
- Export artefacts are materialised under the configured output directories and
their manifests record digests.
- MongoDB contains the expected `document`, `dto`, `advisory`, and `export_state`
collections after a run.
---
## 5 · Deployment Automation
- Treat `etc/feedser.yaml.sample` as the canonical template. CI/CD should copy it to
the deployment artifact and replace placeholders (DSN, telemetry endpoints, cron
overrides) with environment-specific secrets.
- Keep secret material (Mongo credentials, OTLP tokens) outside of the repository;
inject them via secret stores or pipeline variables at stamp time.
- When building container images, include `trivy-db` (and `oras` if used) so air-gapped
clusters do not need outbound downloads at runtime.
---
## 5 · Next Steps
- Introduce authentication/authorization in the web service before exposing it on
shared networks.
- Automate the workflow above via CI/CD (compose stack or Kubernetes CronJobs).
- Pair with the Feedser connector teams when enabling additional sources so their
module-specific requirements are pulled in safely.
---
## 6 · Microsoft Authentication Integration (Planned)
- The Feedser web service will integrate with the Microsoft identity stack (Entra ID)
using OAuth 2.0. Expect additional configuration keys for authority URLs, client
IDs/secrets, and audience scopes once the implementation lands.
- CLI commands already pass `Authorization` headers when credentials are supplied.
When auth is enabled, point `stellaops-cli` at the token issuer (client credentials
flow) or run it behind a proxy that injects bearer tokens.
- Keep network-facing deployments behind reverse proxies or firewalls until the
authentication middleware ships and is fully validated.

View File

@@ -42,28 +42,46 @@ contributors who need to extend coverage or diagnose failures.
---
## Local runner
```bash
# minimal run: unit + property + frontend tests
./scripts/dev-test.sh
# full stack incl. Playwright and lighthouse
./scripts/dev-test.sh --full
````
The script spins up MongoDB/Redis via Testcontainers and requires:
* Docker25
* Node20 (for Jest/Playwright)
---
## CI job layout
```mermaid
flowchart LR
subgraph fast-path
## Local runner
```bash
# minimal run: unit + property + frontend tests
./scripts/dev-test.sh
# full stack incl. Playwright and lighthouse
./scripts/dev-test.sh --full
````
The script spins up MongoDB/Redis via Testcontainers and requires:
* Docker25
* Node20 (for Jest/Playwright)
---
### Feedser OSV↔GHSA parity fixtures
The Feedser connector suite includes a regression test (`OsvGhsaParityRegressionTests`)
that checks a curated set of GHSA identifiers against OSV responses. The fixture
snapshots live in `src/StellaOps.Feedser.Source.Osv.Tests/Fixtures/` and are kept
deterministic so the parity report remains reproducible.
To refresh the fixtures when GHSA/OSV payloads change:
1. Ensure outbound HTTPS access to `https://api.osv.dev` and `https://api.github.com`.
2. Run `UPDATE_PARITY_FIXTURES=1 dotnet test src/StellaOps.Feedser.Source.Osv.Tests/StellaOps.Feedser.Source.Osv.Tests.csproj`.
3. Commit the regenerated `osv-ghsa.*.json` files that the test emits (raw snapshots and canonical advisories).
The regen flow logs `[Parity]` messages and normalises `recordedAt` timestamps so the
fixtures stay stable across machines.
---
## CI job layout
```mermaid
flowchart LR
subgraph fast-path
U[xUnit] --> P[FsCheck] --> I1[Testcontainer API]
end

View File

@@ -160,9 +160,9 @@ public interface IFeedConnector {
## 7) Exporters
* JSON exporter mirrors `aquasecurity/vuln-list` layout with deterministic ordering and reproducible timestamps.
* Trivy DB exporter initially shells out to `trivy-db` builder; later will emit BoltDB directly.
* `StellaOps.Feedser.Storage.Mongo` provides cursors for delta exports based on `export_state.exportCursor`.
* Export jobs produce OCI tarballs (layer media type `application/vnd.aquasec.trivy.db.layer.v1.tar+gzip`) and optionally push via ORAS.
* Trivy DB exporter shells out to `trivy-db build`, produces Bolt archives, and reuses unchanged blobs from the last full baseline when running in delta mode. The exporter annotates `metadata.json` with `mode`, `baseExportId`, `baseManifestDigest`, `resetBaseline`, and `delta.changedFiles[]`/`delta.removedPaths[]`, and honours `publishFull` / `publishDelta` (ORAS) plus `includeFull` / `includeDelta` (offline bundle) toggles.
* `StellaOps.Feedser.Storage.Mongo` provides cursors for delta exports based on `export_state.exportCursor` and the persisted per-file manifest (`export_state.files`).
* Export jobs produce OCI tarballs (layer media type `application/vnd.aquasec.trivy.db.layer.v1.tar+gzip`) and optionally push via ORAS; `metadata.json` accompanies each layout so mirrors can decide between full refreshes and deltas.
---

View File

@@ -0,0 +1,155 @@
# Authority Plug-in Developer Guide
> **Status:** Ready for Docs/DOC4 editorial review as of 2025-10-10. Content aligns with PLG6 acceptance criteria and references stable Authority primitives.
## 1. Overview
Authority plug-ins extend the **StellaOps Authority** service with custom identity providers, credential stores, and client-management logic. Unlike Feedser plug-ins (which ingest or export advisories), Authority plug-ins participate directly in authentication flows:
- **Use cases:** integrate corporate directories (LDAP/AD), delegate to external IDPs, enforce bespoke password/lockout policies, or add client provisioning automation.
- **Constraints:** plug-ins load only during service start (no hot-reload), must function without outbound internet access, and must emit deterministic results for identical configuration and input data.
- **Ship targets:** target the same .NET 10 preview as the host, honour offline-first requirements, and provide clear diagnostics so operators can triage issues from `/ready`.
## 2. Architecture Snapshot
Authority hosts follow a deterministic plug-in lifecycle. The flow below can be rendered as a sequence diagram in the final authored documentation, but all touchpoints are described here for offline viewers:
1. **Configuration load** `AuthorityPluginConfigurationLoader` resolves YAML manifests under `etc/authority.plugins/`.
2. **Assembly discovery** the shared `PluginHost` scans `PluginBinaries/Authority` for `StellaOps.Authority.Plugin.*.dll` assemblies.
3. **Registrar execution** each assembly is searched for `IAuthorityPluginRegistrar` implementations. Registrars bind options, register services, and optionally queue bootstrap tasks.
4. **Runtime** the host resolves `IIdentityProviderPlugin` instances, uses capability metadata to decide which OAuth grants to expose, and invokes health checks for readiness endpoints.
**Data persistence primer:** the standard Mongo-backed plugin stores users in collections named `authority_users_<pluginName>` and lockout metadata in embedded documents. Additional plugins must document their storage layout and provide deterministic collection naming to honour the Offline Kit replication process.
## 3. Capability Metadata
Capability flags let the host reason about what your plug-in supports:
- Declare capabilities in your descriptor using the string constants from `AuthorityPluginCapabilities` (`password`, `mfa`, `clientProvisioning`, `bootstrap`). The configuration loader now validates these tokens and rejects unknown values at startup.
- `AuthorityIdentityProviderCapabilities.FromCapabilities` projects those strings into strongly typed booleans (`SupportsPassword`, etc.). Authority Core will use these flags when wiring flows such as the password grant. Built-in plugins (e.g., Standard) will fail fast or force-enable required capabilities if the descriptor is misconfigured, so keep manifests accurate.
- Typical configuration (`etc/authority.plugins/standard.yaml`):
```yaml
plugins:
descriptors:
standard:
assemblyName: "StellaOps.Authority.Plugin.Standard"
capabilities:
- password
- bootstrap
```
- Only declare a capability if the plug-in genuinely implements it. For example, if `SupportsClientProvisioning` is `true`, the plug-in must supply a working `IClientProvisioningStore`.
**Operational reminder:** the Authority host surfaces capability summaries during startup (see `AuthorityIdentityProviderRegistry` log lines). Use those logs during smoke tests to ensure manifests align with expectations.
## 4. Project Scaffold
- Target **.NET 10 preview**, enable nullable, treat warnings as errors, and mark Authority plug-ins with `<IsAuthorityPlugin>true</IsAuthorityPlugin>`.
- Minimum references:
- `StellaOps.Authority.Plugins.Abstractions` (contracts & capability helpers)
- `StellaOps.Plugin` (hosting/DI helpers)
- `StellaOps.Auth.*` libraries as needed for shared token utilities (optional today).
- Example `.csproj` (trimmed from `StellaOps.Authority.Plugin.Standard`):
```xml
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>net10.0</TargetFramework>
<Nullable>enable</Nullable>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
<IsAuthorityPlugin>true</IsAuthorityPlugin>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\StellaOps.Authority.Plugins.Abstractions\StellaOps.Authority.Plugins.Abstractions.csproj" />
<ProjectReference Include="..\..\StellaOps.Plugin\StellaOps.Plugin.csproj" />
</ItemGroup>
</Project>
```
(Add other references—e.g., MongoDB driver, shared auth libraries—according to your implementation.)
## 5. Implementing `IAuthorityPluginRegistrar`
- Create a parameterless registrar class that returns your plug-in type name via `PluginType`.
- Use `AuthorityPluginRegistrationContext` to:
- Bind options (`AddOptions<T>(pluginName).Bind(...)`).
- Register singletons for stores/enrichers using manifest metadata.
- Register any hosted bootstrap tasks (e.g., seed admin users).
- Always validate configuration inside `PostConfigure` and throw meaningful `InvalidOperationException` to fail fast during startup.
- Use the provided `ILoggerFactory` from DI; avoid static loggers or console writes.
- Example skeleton:
```csharp
internal sealed class MyPluginRegistrar : IAuthorityPluginRegistrar
{
public string PluginType => "my-custom";
public void Register(AuthorityPluginRegistrationContext context)
{
var name = context.Plugin.Manifest.Name;
context.Services.AddOptions<MyPluginOptions>(name)
.Bind(context.Plugin.Configuration)
.PostConfigure(opts => opts.Validate(name));
context.Services.AddSingleton<IIdentityProviderPlugin>(sp =>
new MyIdentityProvider(context.Plugin, sp.GetRequiredService<MyCredentialStore>(),
sp.GetRequiredService<MyClaimsEnricher>(),
sp.GetRequiredService<ILogger<MyIdentityProvider>>()));
}
}
```
## 6. Identity Provider Surface
- Implement `IIdentityProviderPlugin` to expose:
- `IUserCredentialStore` for password validation and user CRUD.
- `IClaimsEnricher` to append roles/attributes onto issued principals.
- Optional `IClientProvisioningStore` for machine-to-machine clients.
- `AuthorityIdentityProviderCapabilities` to advertise supported flows.
- Password guidance:
- Prefer Argon2 (Security Guild upcoming recommendation); Standard plug-in currently ships PBKDF2 with easy swap via `IPasswordHasher`.
- Enforce password policies before hashing to avoid storing weak credentials.
- Health checks should probe backing stores (e.g., Mongo `ping`) and return `AuthorityPluginHealthResult` so `/ready` can surface issues.
- When supporting additional factors (e.g., TOTP), implement `SupportsMfa` and document the enrolment flow for resource servers.
## 7. Configuration & Secrets
- Authority looks for manifests under `etc/authority.plugins/`. Each YAML file maps directly to a plug-in name.
- Support environment overrides using `STELLAOPS_AUTHORITY_PLUGINS__DESCRIPTORS__<NAME>__...`.
- Never store raw secrets in git: allow operators to supply them via `.local.yaml`, environment variables, or injected secret files. Document which keys are mandatory.
- Validate configuration as soon as the registrar runs; use explicit error messages to guide operators. The Standard plug-in now enforces complete bootstrap credentials (username + password) and positive lockout windows via `StandardPluginOptions.Validate`.
- Cross-reference bootstrap workflows with `docs/ops/authority_bootstrap.md` (to be published alongside CORE6) so operators can reuse the same payload formats for manual provisioning.
## 8. Logging, Metrics, and Diagnostics
- Always log via the injected `ILogger<T>`; include `pluginName` and correlation IDs where available.
- Activity/metric names should align with `AuthorityTelemetry` constants (`service.name=stellaops-authority`).
- Expose additional diagnostics via structured logging rather than writing custom HTTP endpoints; the host will integrate these into `/health` and `/ready`.
- Emit metrics with stable names (`auth.plugins.<pluginName>.*`) when introducing custom instrumentation; coordinate with the Observability guild to reserve prefixes.
## 9. Testing & Tooling
- Unit tests: use Mongo2Go (or similar) to exercise credential stores without hitting production infrastructure (`StandardUserCredentialStoreTests` is a template).
- Determinism: fix timestamps to UTC and sort outputs consistently; avoid random GUIDs unless stable.
- Smoke tests: launch `dotnet run --project src/StellaOps.Authority/StellaOps.Authority` with your plug-in under `PluginBinaries/Authority` and verify `/ready`.
- Example verification snippet:
```csharp
[Fact]
public async Task VerifyPasswordAsync_ReturnsSuccess()
{
var store = CreateCredentialStore();
await store.UpsertUserAsync(new AuthorityUserRegistration("alice", "Pa55!", null, null, false,
Array.Empty<string>(), new Dictionary<string, string?>()), CancellationToken.None);
var result = await store.VerifyPasswordAsync("alice", "Pa55!", CancellationToken.None);
Assert.True(result.Succeeded);
Assert.True(result.User?.Roles.Count == 0);
}
```
## 10. Packaging & Delivery
- Output assembly should follow `StellaOps.Authority.Plugin.<Name>.dll` so the hosts search pattern picks it up.
- Place the compiled DLL plus dependencies under `PluginBinaries/Authority` for offline deployments; include hashes/signatures in release notes (Security Guild guidance forthcoming).
- Document any external prerequisites (e.g., CA cert bundle) in your plug-in README.
- Update `etc/authority.plugins/<plugin>.yaml` samples and include deterministic SHA256 hashes for optional bootstrap payloads when distributing Offline Kit artefacts.
## 11. Checklist & Handoff
- ✅ Capabilities declared and validated in automated tests.
- ✅ Bootstrap workflows documented (if `bootstrap` capability used) and repeatable.
- ✅ Local smoke test + unit/integration suites green (`dotnet test`).
- ✅ Operational docs updated: configuration keys, secrets guidance, troubleshooting.
- Submit the developer guide update referencing PLG6/DOC4 and tag DevEx + Docs reviewers for sign-off.
---
**Next documentation actions:**
- Add rendered architectural diagram (PlantUML/mermaid) reflecting the lifecycle above once the Docs toolkit pipeline is ready.
- Reference the LDAP RFC (`docs/rfcs/authority-plugin-ldap.md`) in the capability section once review completes.
- Sync terminology with `docs/11_AUTHORITY.md` when that chapter is published to keep glossary terms consistent.

View File

@@ -0,0 +1,136 @@
# RFC: StellaOps.Authority.Plugin.Ldap
**Status:** Draft for review by Auth Guild, Security Guild, DevEx (2025-10-10)
**Authors:** Plugin Team 4 (Auth Libraries & Identity Providers)
**Related initiatives:** PLG7 backlog, CORE5 event handlers, DOC4 developer guide
## 1. Problem Statement
Many on-prem StellaOps deployments rely on existing LDAP/Active Directory domains for workforce identity. The current Standard Mongo-backed plugin requires duplicating users and secrets, which increases operational overhead and violates corporate policy in some regulated environments. We need a sovereign, offline-friendly LDAP plugin that:
- Supports password grant and bootstrap provisioning flows without storing credentials in Mongo.
- Enforces StellaOps security policies (lockout, password policy hints, audit logging) while delegating credential validation to LDAP.
- Operates deterministically in offline or partially connected environments by caching directory metadata when necessary.
## 2. Goals
- Provide a first-party `StellaOps.Authority.Plugin.Ldap` plugin advertising `password` and optional `clientProvisioning` capabilities at launch.
- Support username/password authentication against LDAP bind operations with configurable DN templates.
- Allow optional bootstrap seeding of service accounts by writing into LDAP (guarded behind explicit configuration) or by mapping to pre-existing entries.
- Surface directory-derived claims (groups, attributes) for downstream authorization via `IClaimsEnricher`.
- Integrate with Authority lockout telemetry and structured logging without persisting secrets locally.
## 3. Non-Goals
- Implement multi-factor authentication out of the box (future enhancement once TOTP/WebAuthn strategy is finalised).
- Provide write-heavy directory management (e.g., user creation workflows) beyond optional bootstrap service account seeding.
- Replace the Standard plugin; both must remain supported and selectable per environment.
## 4. Key Constraints & Assumptions
- Offline-first posture: deployments may operate without outbound internet and with intermittent directory connectivity (e.g., read-only replicas). The plugin must tolerate transient LDAP connectivity failures and degrade gracefully.
- Deterministic behaviour: identical configuration and directory state must yield identical token issuance results. Cached metadata (e.g., group lookups) must have defined expiration.
- Security: No plaintext credential storage; TLS must be enforced for LDAP connections unless explicitly overridden for air-gapped lab environments.
## 5. High-Level Architecture
1. **Configuration binding** (`ldap.yaml`): defines server endpoints, bind strategy, claim mapping, and optional bootstrap overrides.
2. **Connection factory**: pooled LDAP connections using a resilient client (preferred dependency: `Novell.Directory.Ldap.NETStandard`).
3. **Credential validator** (`IUserCredentialStore`): performs bind-as-user flow with optional fallback bind using service account when directories disallow anonymous search.
4. **Claims enricher** (`IClaimsEnricher`): queries group membership/attributes and projects them into canonical roles/claims.
5. **Optional client provisioning** (`IClientProvisioningStore`): maintains machine/service principals either in Mongo (metadata) or via LDAP `serviceConnectionPoint` entries based on configuration.
6. **Health checks**: periodic LDAP `whoami` or `search` probes surfaced through `AuthorityPluginHealthResult`.
```
Authority Host
├── Plugin Manifest (ldap)
├── Registrar → registers ConnectionFactory, LdapCredentialStore, LdapClaimsEnricher
├── Password Grant Handler → CredentialStore.VerifyPasswordAsync → LDAP Bind
└── Claims Pipeline → ClaimsEnricher.EnrichAsync → LDAP group lookup
```
## 6. Configuration Schema (Draft)
```yaml
connection:
host: "ldaps://ldap.example.internal"
port: 636
useStartTls: false
validateCertificates: true
bindDn: "cn=stellaops-bind,ou=service,dc=example,dc=internal"
bindPasswordSecret: "file:/etc/stellaops/secrets/ldap-bind.txt"
searchBase: "dc=example,dc=internal"
usernameAttribute: "uid"
userDnFormat: "uid={username},ou=people,dc=example,dc=internal" # optional template
security:
requireTls: true
allowedCipherSuites: [] # optional allow-list
referralChasing: false
lockout:
useAuthorityPolicies: true # reuse Authority lockout counters
directoryLockoutAttribute: "pwdAccountLockedTime"
claims:
groupAttribute: "memberOf"
groupToRoleMap:
"cn=stellaops-admins,ou=groups,dc=example,dc=internal": "operators"
"cn=stellaops-read,ou=groups,dc=example,dc=internal": "auditors"
extraAttributes:
displayName: "displayName"
email: "mail"
clientProvisioning:
enabled: false
containerDn: "ou=service,dc=example,dc=internal"
secretAttribute: "userPassword"
health:
probeIntervalSeconds: 60
timeoutSeconds: 5
```
## 7. Capability Mapping
| Capability | Implementation Notes |
|------------|---------------------|
| `password` | Bind-as-user validation with Authority lockout integration. Mandatory. |
| `clientProvisioning` | Optional; when enabled, creates/updates LDAP entries for machine clients or stores metadata in Mongo if directory writes are disabled. |
| `bootstrap` | Exposed only when bootstrap manifest provides service account credentials AND directory write permissions are confirmed during startup. |
| `mfa` | Not supported in MVP. Future iteration may integrate TOTP attributes or external MFA providers. |
## 8. Operational Considerations
- **Offline cache:** provide optional Mongo cache for group membership to keep `/ready` responsive if LDAP is temporarily unreachable. Cache entries must include TTL and invalidation hooks.
- **Secrets management:** accept `file:` and environment variable references; integrate with existing `StellaOps.Configuration` secret providers.
- **Observability:** emit structured logs with event IDs (`LDAP_BIND_START`, `LDAP_BIND_FAILURE`, `LDAP_GROUP_LOOKUP`), counters for success/failure, and latency histograms.
- **Throttling:** reuse Authority rate-limiting middleware; add per-connection throttles to avoid saturating directory servers during brute-force attacks.
## 9. Security & Compliance
- Enforce TLS (`ldaps://` or STARTTLS) by default. Provide explicit `allowInsecure` flag gated behind environment variable for lab/testing only.
- Support password hash migration by detecting directory lockout attributes and surfacing `RequiresPasswordReset` when policies demand changes.
- Log distinguished names only at `Debug` level to avoid leaking sensitive structure in default logs.
- Coordinate with Security Guild for penetration testing before GA; incorporate audit log entries for bind attempts and provisioning changes.
## 10. Testing Strategy
- **Unit tests:** mock LDAP connections to validate DN formatting, error mapping, and capability negotiation.
- **Integration tests:** run against an ephemeral OpenLDAP container (seeded via LDIF fixtures) within CI. Include offline cache regression (disconnect LDAP mid-test).
- **Determinism tests:** feed identical LDIF snapshots and configuration to ensure output tokens/claims remain stable across runs.
- **Smoke tests:** `dotnet test` harness plus manual `dotnet run` scenario verifying `/token` password grants and `/internal/users` bootstrap with LDAP-backed store.
## 11. Implementation Plan
1. Scaffold `StellaOps.Authority.Plugin.Ldap` project + tests (net10.0, `<IsAuthorityPlugin>` true).
2. Implement configuration options + validation (mirroring Standard plugin guardrails).
3. Build connection factory + credential store with bind logic.
4. Implement claims enricher and optional cache layer.
5. Add client provisioning store (optional) with toggles for read-only deployments.
6. Wire bootstrapper to validate connectivity/permissions and record findings in startup logs.
7. Extend developer guide with LDAP specifics (post-RFC acceptance).
8. Update Docs and TODO trackers; produce release notes entry once merged.
## 12. Open Questions
- Should client provisioning default to storing metadata in Mongo even when LDAP writes succeed (to preserve audit history)?
- Do we require LDAPS mutual TLS support (client certificates) for regulated environments? If yes, need to extend configuration schema.
- How will we map LDAP groups to Authority scopes/roles when names differ significantly? Consider supporting regex or mapping scripts.
## 13. Timeline (Tentative)
- **Week 1:** RFC review & sign-off.
- **Week 2-3:** Implementation & unit tests.
- **Week 4:** Integration tests + documentation updates.
- **Week 5:** Security review, release candidate packaging.
## 14. Approval
- **Auth Guild Lead:** _TBD_
- **Security Guild Representative:** _TBD_
- **DevEx Docs:** _TBD_
---
Please add comments inline or via PR review. Once approved, track execution under PLG7.

View File

@@ -0,0 +1,17 @@
# Placeholder configuration for the LDAP identity provider plug-in.
# Replace values with your directory settings before enabling the plug-in.
connection:
host: "ldap.example.com"
port: 636
useTls: true
bindDn: "cn=service,dc=example,dc=com"
bindPassword: "CHANGE_ME"
queries:
userFilter: "(uid={username})"
groupFilter: "(member={distinguishedName})"
groupAttribute: "cn"
capabilities:
supportsPassword: true
supportsMfa: false

View File

@@ -0,0 +1,21 @@
# Standard plugin configuration (Mongo-backed identity store).
bootstrapUser:
username: "admin"
password: "changeme"
passwordPolicy:
minimumLength: 12
requireUppercase: true
requireLowercase: true
requireDigit: true
requireSymbol: true
lockout:
enabled: true
maxAttempts: 5
windowMinutes: 15
tokenSigning:
# Path to the directory containing signing keys (relative paths resolve
# against this configuration file location).
keyDirectory: "../keys"

71
etc/authority.yaml.sample Normal file
View File

@@ -0,0 +1,71 @@
# StellaOps Authority configuration template.
# Copy to ../etc/authority.yaml (relative to the Authority content root)
# and adjust values to fit your environment. Environment variables
# prefixed with STELLAOPS_AUTHORITY_ override these values at runtime.
# Example: STELLAOPS_AUTHORITY__ISSUER=https://authority.example.com
schemaVersion: 1
# Absolute issuer URI advertised to clients. Use HTTPS for anything
# beyond loopback development.
issuer: "https://authority.stella-ops.local"
# Token lifetimes expressed as HH:MM:SS or DD.HH:MM:SS.
accessTokenLifetime: "00:15:00"
refreshTokenLifetime: "30.00:00:00"
identityTokenLifetime: "00:05:00"
authorizationCodeLifetime: "00:05:00"
deviceCodeLifetime: "00:15:00"
# MongoDB storage connection details.
storage:
connectionString: "mongodb://localhost:27017/stellaops-authority"
# databaseName: "stellaops_authority"
commandTimeout: "00:00:30"
# Bootstrap administrative endpoints (initial provisioning).
bootstrap:
enabled: false
apiKey: "change-me"
defaultIdentityProvider: "standard"
# Directories scanned for Authority plug-ins. Relative paths resolve
# against the application content root, enabling air-gapped deployments
# that package plug-ins alongside binaries.
pluginDirectories:
- "../PluginBinaries/Authority"
# "/var/lib/stellaops/authority/plugins"
# Plug-in manifests live in descriptors below; per-plugin settings are stored
# in the configurationDirectory (YAML files). Authority will load any enabled
# plugins and surface their metadata/capabilities to the host.
plugins:
configurationDirectory: "../etc/authority.plugins"
descriptors:
standard:
type: "standard"
assemblyName: "StellaOps.Authority.Plugin.Standard"
enabled: true
configFile: "standard.yaml"
capabilities:
- password
- bootstrap
- clientProvisioning
metadata:
defaultRole: "operators"
# Example for an external identity provider plugin. Leave disabled unless
# the plug-in package exists under PluginBinaries/Authority.
ldap:
type: "ldap"
assemblyName: "StellaOps.Authority.Plugin.Ldap"
enabled: false
configFile: "ldap.yaml"
capabilities:
- password
- mfa
# CIDR ranges that bypass network-sensitive policies (e.g. on-host cron jobs).
# Keep the list tight: localhost is sufficient for most air-gapped installs.
bypassNetworks:
- "127.0.0.1/32"
- "::1/128"

55
etc/feedser.yaml.sample Normal file
View File

@@ -0,0 +1,55 @@
# Feedser configuration template for StellaOps deployments.
# Copy to ../etc/feedser.yaml (relative to the web service content root)
# and adjust the values to match your environment. Environment variables
# (prefixed with FEEDSER_) override these settings at runtime.
storage:
driver: mongo
# Mongo connection string. Use SRV URI or standard connection string.
dsn: "mongodb://feedser:feedser@mongo:27017/feedser?authSource=admin"
# Optional database name; defaults to the name embedded in the DSN or 'feedser'.
database: "feedser"
# Mongo command timeout in seconds.
commandTimeoutSeconds: 30
plugins:
# Feedser resolves plug-ins relative to the content root; override as needed.
baseDirectory: ".."
directory: "PluginBinaries"
searchPatterns:
- "StellaOps.Feedser.Plugin.*.dll"
telemetry:
enabled: true
enableTracing: false
enableMetrics: false
enableLogging: true
minimumLogLevel: "Information"
serviceName: "stellaops-feedser"
# Configure OTLP endpoint when shipping traces/metrics/logs out-of-band.
otlpEndpoint: ""
# Optional headers for OTLP exporters, for example authentication tokens.
otlpHeaders: {}
# Attach additional resource attributes to telemetry exports.
resourceAttributes:
deployment.environment: "local"
# Emit console exporters for local debugging.
exportConsole: true
authority:
enabled: false
# Issuer advertised by StellaOps Authority (e.g. https://authority.stella-ops.local).
issuer: "https://authority.stella-ops.local"
# Optional explicit metadata address; defaults to {issuer}/.well-known/openid-configuration.
metadataAddress: ""
requireHttpsMetadata: true
backchannelTimeoutSeconds: 30
tokenClockSkewSeconds: 60
audiences:
- "api://feedser"
requiredScopes:
- "feedser.jobs.trigger"
# Networks allowed to bypass authentication (loopback by default for on-host cron jobs).
bypassNetworks:
- "127.0.0.1/32"
- "::1/128"

View File

@@ -1,6 +1,6 @@
{
"sdk": {
"version": "10.0.100-preview.7.25380.108",
"rollForward": "latestMinor"
}
}
{
"sdk": {
"version": "10.0.100-preview.7.25380.108",
"rollForward": "latestMinor"
}
}

View File

@@ -0,0 +1,45 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>net9.0</TargetFramework>
<LangVersion>latest</LangVersion>
<Nullable>enable</Nullable>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
<ItemGroup>
<Content Remove="NuGet.config" />
<None Include="NuGet.config">
<CopyToOutputDirectory>Never</CopyToOutputDirectory>
</None>
</ItemGroup>
<ItemGroup>
<PackageReference Include="OpenIddict.Client" Version="6.4.0" />
<PackageReference Include="OpenIddict.Client.AspNetCore" Version="6.4.0" />
<PackageReference Include="OpenIddict.Client.DataProtection" Version="6.4.0" />
<PackageReference Include="OpenIddict.Client.SystemNetHttp" Version="6.4.0" />
<PackageReference Include="OpenIddict.Server.AspNetCore" Version="6.4.0" />
<PackageReference Include="OpenIddict.Validation.DataProtection" Version="6.4.0" />
<PackageReference Include="OpenIddict.Validation.SystemNetHttp" Version="6.4.0" />
<PackageReference Include="Polly" Version="8.6.1" />
<PackageReference Include="System.Text.Encodings.Web" Version="9.0.6" />
<PackageReference Include="OpenIddict.Validation.AspNetCore" Version="6.4.0" />
<PackageReference Include="Microsoft.AspNetCore.Authentication.Cookies" Version="2.3.0" />
<PackageReference Include="OpenIddict.Abstractions" Version="6.4.0" />
<PackageReference Include="System.Text.Json" Version="9.0.6" />
<PackageReference Include="System.IdentityModel.Tokens.Jwt" Version="8.12.1" />
<PackageReference Include="Microsoft.IdentityModel.Protocols.OpenIdConnect" Version="8.12.1" />
<PackageReference Include="Microsoft.AspNetCore.Authentication.JwtBearer" Version="9.0.6" />
<PackageReference Include="Microsoft.AspNetCore.Authentication.Abstractions" Version="2.3.0" />
<PackageReference Include="Microsoft.AspNetCore.Cors" Version="2.3.0" />
<PackageReference Include="Microsoft.Extensions.Hosting" Version="9.0.6" />
<PackageReference Include="Microsoft.Extensions.Http" Version="9.0.6" />
<PackageReference Include="Microsoft.AspNetCore.DataProtection.StackExchangeRedis" Version="9.0.6" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="../Ablera.Serdica.Common.Tools/Ablera.Serdica.Common.Tools.csproj" />
<ProjectReference Include="../Ablera.Serdica.Extensions.Redis/Ablera.Serdica.Extensions.Redis.csproj" />
</ItemGroup>
</Project>

View File

@@ -0,0 +1,16 @@
using System.Collections.Immutable;
namespace Ablera.Serdica.Authentication.Constants;
public static class ConstantsClass
{
public const string HttpContextItemsSession = "Session";
public const string HttpContextEndpoint = "Endpoint";
public const string HttpContextEndpointRequiredRoles = "EndpointRequiredRoles";
public const string RedisKeyPrefixKey = "serdica-session-dp";
public const string DataProtectionApplicationName = "SerdicaAuth";
public const string AuthenticationScheme = "SerdicaAuthentication"; // "SerdicaAuthentication"
public const string SerdicaAPIAudience = "SerdicaAPI";
public const string DefaultRolePrincipalPrefix = "__principal";
}

View File

@@ -0,0 +1,10 @@
namespace Ablera.Serdica.Authentication.Constants
{
public static class SerdicaClaims
{
public const string Anonymous = "__anonymous";
public const string IsAuthenticated = "__isAuthenticated";
public const string DefaultIdentity = "__default";
public const string RoleSuperUser = "DBA";
}
}

View File

@@ -0,0 +1,130 @@
using System;
using System.IO;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Authentication;
using Microsoft.AspNetCore.Authentication.Cookies;
using Microsoft.AspNetCore.Http;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using Microsoft.IdentityModel.Tokens;
using OpenIddict.Validation.AspNetCore;
using OpenIddict.Validation.SystemNetHttp;
using StackExchange.Redis;
using Ablera.Serdica.Authentication.Models;
using Ablera.Serdica.Authentication.Models.Oidc;
using Ablera.Serdica.Authentication.Utilities;
using Microsoft.AspNetCore.DataProtection;
using Ablera.Serdica.Authentication.Services;
using Microsoft.AspNetCore.Authentication.JwtBearer;
using Ablera.Serdica.Authentication.Constants;
using OpenIddict.Client;
using OpenIddict.Validation;
using System.Linq;
using System.Collections.Generic;
using System.Security.Claims;
using Microsoft.AspNetCore.Identity;
using System.Security.Principal;
using OpenIddict.Client.AspNetCore;
using Microsoft.AspNetCore.Authorization;
using Ablera.Serdica.DependencyInjection;
using static Ablera.Serdica.Authentication.Constants.ConstantsClass;
using static OpenIddict.Abstractions.OpenIddictConstants;
using System.IdentityModel.Tokens.Jwt;
using static OpenIddict.Client.OpenIddictClientEvents;
namespace Ablera.Serdica.DependencyInjection;
public sealed class AcceptAnyIssuer :
IOpenIddictClientHandler<OpenIddict.Client.OpenIddictClientEvents.HandleConfigurationResponseContext>
{
public ValueTask HandleAsync(HandleConfigurationResponseContext ctx)
{
// Short-circuit the built-in ValidateIssuer handler.
ctx.SkipRequest();
return default;
}
}
public static class JwtBearerWithSessionAuthenticationExtensions
{
public static IServiceCollection AddDataProtection(this IServiceCollection services, IConfiguration configuration)
{
//------------------------------------------------------------------
// 1) read configuration
//------------------------------------------------------------------
var redisConfiguration = RedisConfigurationGetter.GetRedisConfiguration(configuration);
var multiplexer = ConnectionMultiplexer.Connect(redisConfiguration);
services.AddSingleton<IConnectionMultiplexer>(multiplexer);
//------------------------------------------------------------------
// 2) Data-Protection (encrypt/sign cookies) keys stored in Redis
//------------------------------------------------------------------
var xmlRepo = new RedisAndFileSystemXmlRepository(
multiplexer.GetDatabase(), RedisKeyPrefixKey);
services.AddDataProtection()
.SetApplicationName(DataProtectionApplicationName)
.PersistKeysToStackExchangeRedis(multiplexer, RedisKeyPrefixKey)
.AddKeyManagementOptions(o => o.XmlRepository = xmlRepo)
.SetDefaultKeyLifetime(TimeSpan.FromDays(30));
return services;
}
public static IServiceCollection AddMicroserviceAuthentication(
this IServiceCollection services,
IConfiguration cfg,
IHostEnvironment env)
{
// ---------------------------------------------------------------------
// 1) Read and validate the OIDC client settings
// ---------------------------------------------------------------------
var oidc = cfg.GetSection(nameof(OidcValidation)).Get<OidcValidation>()
?? throw new InvalidOperationException($"{nameof(OidcValidation)} section is missing.");
if (string.IsNullOrWhiteSpace(oidc.EncryptionKey))
throw new InvalidOperationException($"{nameof(oidc.EncryptionKey)} is not defined.");
// Issuer value found in the `iss` claim of the tokens (HTTPS as issued by the IdP)
var issuerUrl = new Uri(oidc.IssuerUrl
?? throw new InvalidOperationException($"{nameof(oidc.IssuerUrl)} is not defined."));
services.Configure<OidcValidation>(cfg.GetSection(nameof(OidcValidation)));
services
.AddDataProtection(cfg)
.AddOpenIddict()
.AddValidation(opt =>
{
opt.UseSystemNetHttp();
opt.UseAspNetCore();
opt.SetIssuer(issuerUrl);
if (!string.IsNullOrWhiteSpace(oidc.ConfigurationUrl))
{
opt.Configure(x =>
{
x.ConfigurationEndpoint = new Uri(oidc.ConfigurationUrl);
});
}
opt.AddEncryptionKey(
new SymmetricSecurityKey(Convert.FromBase64String(oidc.EncryptionKey)));
});
services.AddAuthorization(options =>
options.FallbackPolicy = new AuthorizationPolicyBuilder()
.RequireAuthenticatedUser()
.Build())
.AddAuthentication(options =>
{
options.DefaultScheme = ConstantsClass.AuthenticationScheme;
options.DefaultChallengeScheme = ConstantsClass.AuthenticationScheme;
})
.AddScheme<JwtBearerOptions, SerdicaJwtBearerAuthenticationHandler>(
ConstantsClass.AuthenticationScheme, _ => { });
return services;
}
}

View File

@@ -0,0 +1,54 @@
using Microsoft.AspNetCore.Http;
using NetTools;
using System;
using System.Linq;
using System.Net;
using System.Collections.Generic;
using Ablera.Serdica.Authentication.Utilities;
using Ablera.Serdica.Authentication.Models.Oidc;
using Ablera.Serdica.Common.Tools.Utilities;
namespace Ablera.Serdica.Authority.Extensions;
public static class AllowedMaskExtensions
{
// Lazily built the first time AllowedMaskExtensions is referenced.
private static readonly IReadOnlyCollection<IPAddressRange> AssociatedNetworks = ListeningNetworksRetriever.Retrieve();
public static AllowedMask? MergeWith(this AllowedMask? client, AllowedMask? global)
=> (client, global) switch
{
(null, null) => null,
(null, _) => global,
_ => new()
{
SameNetworks = client.SameNetworks ?? global?.SameNetworks,
Networks = client.Networks ?? global?.Networks,
Hosts = client.Hosts ?? global?.Hosts,
Ports = client.Ports ?? global?.Ports
}
};
public static bool MatchesRemote(this AllowedMask allow, HttpContext http)
{
var remoteIp = http.Connection.RemoteIpAddress ?? IPAddress.None;
var host = http.Request.Host.Host;
var port = http.Request.Host.Port ?? 0;
bool ipOk = allow.Networks == null ||
allow.Networks.Any(net => IPAddressRange.Parse(net).Contains(remoteIp));
bool hostOk = allow.Hosts == null ||
allow.Hosts.Any(h => StringComparer.OrdinalIgnoreCase.Equals(h, host));
bool portOk = allow.Ports == null || allow.Ports.Contains(port);
// Same-network rule: only enforced when SameNetwork == true
bool sameNetworkOk =
allow.SameNetworks != true || // Flag not enabled → no restriction
AssociatedNetworks == null || // Could not determine our own network
AssociatedNetworks.Any(network => network.Contains(remoteIp));
return ipOk && hostOk && portOk && sameNetworkOk;
}
}

View File

@@ -0,0 +1,67 @@
using Microsoft.AspNetCore.Identity;
using OpenIddict.Abstractions;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Security.Claims;
using static OpenIddict.Abstractions.OpenIddictConstants;
namespace Ablera.Serdica.Authentication.Extensions
{
public static class ClaimExtensions
{
public static IReadOnlyCollection<Claim> BuildClaims<TKeyType>(
this IdentityUser<TKeyType> identity,
string? userName = null, string? givenName = null, string? surname = null)
where TKeyType : IEquatable<TKeyType> => new[]
{
new Claim(ClaimTypes.NameIdentifier, identity.Id?.ToString() ?? string.Empty),
new Claim(Claims.Subject, identity.Id?.ToString() ?? string.Empty),
new Claim(ClaimTypes.Name, userName ?? identity.UserName ?? string.Empty),
new Claim(ClaimTypes.GivenName, givenName ?? string.Empty),
new Claim(ClaimTypes.Surname, surname ?? string.Empty),
new Claim(ClaimTypes.Email, identity.Email ?? string.Empty)
};
public static IEnumerable<string> DestinationsSelector(this Claim c) => c.Type switch
{
Claims.Name or Claims.PreferredUsername
=> new[] { Destinations.AccessToken, Destinations.IdentityToken },
Claims.Email when c.Subject?.HasScope(Scopes.Email) == true
=> new[] { Destinations.AccessToken, Destinations.IdentityToken },
Claims.Role when c.Subject?.HasScope(Scopes.Roles) == true
=> new[] { Destinations.AccessToken, Destinations.IdentityToken },
_ => new[] { Destinations.AccessToken }
};
public static string? GetUserId(this ClaimsPrincipal user)
=> user.Claims.GetUserId() ?? Guid.Empty.ToString();
public static string? GetUserEmail(this ClaimsPrincipal user)
=> user.Claims
.FirstOrDefault(x => x.Type == ClaimTypes.Email)
?.Value?.ToString();
private static string? GetUserId(this IEnumerable<Claim> claims)
=> claims
.FirstOrDefault(x => x.Type == ClaimTypes.NameIdentifier)
?.Value?.ToString()
?? claims
.FirstOrDefault(x => x.Type == ClaimTypes.Name)
?.Value?.ToString();
public static string? GetClientApplicationId(this ClaimsPrincipal user)
=> user.Claims.GetClientApplicationId();
private static string? GetClientApplicationId(this IEnumerable<Claim> claims)
=> claims
.FirstOrDefault(x => x.Type == Claims.Subject)
?.Value?.ToString()
?? claims
.FirstOrDefault(x => x.Type == Claims.ClientId)
?.Value?.ToString();
}
}

View File

@@ -0,0 +1,16 @@
using System.Security.Claims;
using OpenIddict.Abstractions;
using static OpenIddict.Abstractions.OpenIddictConstants;
namespace Ablera.Serdica.Authentication.Extensions;
public static class PrincipalBuilder
{
public static ClaimsPrincipal Build(string clientId, string authenticationSchema)
{
var claimsIdentity = new ClaimsIdentity(authenticationSchema);
claimsIdentity.AddClaim(Claims.Subject, clientId, Destinations.AccessToken);
var claimsPrincipal = new ClaimsPrincipal(claimsIdentity);
return claimsPrincipal;
}
}

View File

@@ -0,0 +1,18 @@
using Ablera.Serdica.Authentication.Models;
using Ablera.Serdica.Common.Tools.Extensions;
using Microsoft.AspNetCore.Http;
using System.Text.Json;
using System.Threading.Tasks;
namespace Ablera.Serdica.Authentication.Extensions;
public static class ProxyResultExtension
{
public static async Task ReturnHttpRessponse(this ProxyResult proxyResult, HttpResponse httpResponse)
{
if (httpResponse.HasStarted) return;
httpResponse.StatusCode = (int)proxyResult.StatusCode;
httpResponse.ContentType = "application/json";
await JsonSerializer.SerializeAsync(httpResponse.Body, proxyResult, proxyResult.GetType(), GlobalJsonSerializerOptions.JsonSerializerOptions);
}
}

View File

@@ -0,0 +1,10 @@
namespace Ablera.Serdica.Authentication.Models.Oidc;
public record AllowedMask
{
public bool? SameNetworks { get; init; }
public string[]? Hosts { get; init; }
public string[]? Networks { get; init; }
public int[]? Ports { get; init; }
public string[]? ClientIds { get; init; }
}

View File

@@ -0,0 +1,7 @@
namespace Ablera.Serdica.Authentication.Models.Oidc;
public record ClaimTypeAndValue
{
public required string Type { get; init; } = null!;
public required string Value { get; init; } = null!;
}

View File

@@ -0,0 +1,8 @@
namespace Ablera.Serdica.Authentication.Models.Oidc;
public record ClientCredentials : ConnectionSettingsBase
{
public required string[] Scopes { get; init; }
public required string[] Claims { get; init; }
public bool RequireHttps { get; init; } = true;
}

View File

@@ -0,0 +1,22 @@
using System.Collections.Generic;
using System.Text.Json;
namespace Ablera.Serdica.Authentication.Models.Oidc;
public abstract record ConnectionSettingsBase
{
public required string[] GrantTypes { get; set; }
public required string ClientId { get; init; }
public string? ClientSecret { get; init; }
public required string ClientType { get; init; } = "public";
public required string DisplayName { get; init; }
public string[]? RedirectUris { get; init; }
public string[]? PostLogoutRedirectUris { get; init; }
public Dictionary<string, JsonElement>? Properties { get; init; }
}

View File

@@ -0,0 +1,17 @@
namespace Ablera.Serdica.Authority.Models;
public record Endpoints
{
public required string Authorization { get; init; } = "/connect/authorize";
public required string Introspection { get; init; } = "/connect/introspect";
public required string Token { get; init; } = "/connect/token";
public required string Userinfo { get; init; } = "/connect/userinfo";
public required string EndUserVerification { get; init; } = "/connect/verification";
public required string Revocation { get; init; } = "/connect/revocation";
public required string Logout { get; init; } = "/connect/endsession";
public required string CheckSession { get; init; } = "/connect/checksession";
public required string Device { get; init; } = "/connect/device";
public required string Jwks { get; init; } = "/connect/jwks";
public required string Configuration { get; init; } = "/.well-known/openid-configuration";
}

View File

@@ -0,0 +1,15 @@
using Ablera.Serdica.Authority.Models;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Ablera.Serdica.Authentication.Models.Oidc;
public record OidcValidation : OidcSettingsBase
{
public required string IssuerUrl { get; set; }
public required string? ConfigurationUrl { get; set; }
public AllowedMask[] BypassValidationsMasks { get; init; } = Array.Empty<AllowedMask>();
}

View File

@@ -0,0 +1,21 @@
using System;
using System.Linq;
using System.Text;
using System.Text.Json.Serialization;
using System.Threading.Tasks;
using Ablera.Serdica.Authentication.Models.Oidc;
namespace Ablera.Serdica.Authority.Models;
public record OidcServerSettings : OidcSettingsBase
{
public Endpoints Endpoints { get; init; } = null!;
public required string IssuerUrl { get; init; } = null!;
public bool? RequireHttps { get; set; } = false;
public required string CookieName { get; init; } = "oauth2-authorization";
public required int CookieExpirationInMinutes { get; init; } = 2;
public required int AuthorizationTokenDurationInMinutes { get; init; } = 5;
public RegisteredClient[] RegisteredClients { get; init; } = Array.Empty<RegisteredClient>();
public string[] Claims { get; init; } = Array.Empty<string>();
public string[] Scopes { get; init; } = Array.Empty<string>();
}

View File

@@ -0,0 +1,7 @@
namespace Ablera.Serdica.Authentication.Models.Oidc;
public abstract record OidcSettingsBase
{
public string? EncryptionKey { get; init; }
public AllowedMask[]? AllowedMasks { get; init; }
}

View File

@@ -0,0 +1,15 @@
using System.Collections.Generic;
namespace Ablera.Serdica.Authentication.Models.Oidc;
public record RegisteredClient : ConnectionSettingsBase
{
public string[]? Permissions { get; init; }
public string[]? Requirements { get; init; }
public AllowedMask[]? AllowedMasks { get; init; }
public ClaimTypeAndValue[]? BuiltinClaims { get; init; } = [];
public Dictionary<string, string?>? Settings { get; init; }
}

View File

@@ -0,0 +1,15 @@
using System.Collections.Generic;
using System.Net;
using System.Text.Json.Nodes;
namespace Ablera.Serdica.Authentication.Models;
public sealed class ProxyResult
{
public HttpStatusCode StatusCode { get; init; } = HttpStatusCode.OK;
public JsonNode? Data { get; init; } // null ⇒ no body
public IDictionary<string, string>? Errors { get; init; }
public string? TraceId { get; init; }
public string? Title { get; init; }
public string? Type { get; init; }
}

View File

@@ -0,0 +1,13 @@
<?xml version="1.0" encoding="utf-8"?>
<configuration>
<packageSources>
<add key="nuget-mirror" value="https://mirrors.ablera.dev/nuget/nuget-mirror/v3/index.json" />
<add key="GitlabSerdicaBackend" value="https://gitlab.ablera.dev/api/v4/projects/92/packages/nuget/index.json" />
</packageSources>
<packageSourceCredentials>
<GitlabSerdicaBackend>
<add key="Username" value="gitlab+deploy-token-3" />
<add key="ClearTextPassword" value="osdy7Ec2sVoSJC2Kaxvr" />
</GitlabSerdicaBackend>
</packageSourceCredentials>
</configuration>

View File

@@ -0,0 +1,163 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Security.Claims;
using System.Text.Encodings.Web;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Authentication;
using Microsoft.AspNetCore.Authentication.JwtBearer;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using OpenIddict.Abstractions;
using Ablera.Serdica.Authentication.Constants;
using Ablera.Serdica.Authentication.Models;
using Ablera.Serdica.Authentication.Models.Oidc;
using Ablera.Serdica.Authority.Extensions;
using System.Net;
using OpenIddict.Validation.AspNetCore;
using Ablera.Serdica.Authentication.Extensions;
using static Ablera.Serdica.Authentication.Constants.ConstantsClass;
namespace Ablera.Serdica.Authentication.Services;
public sealed class SerdicaJwtBearerAuthenticationHandler : AuthenticationHandler<JwtBearerOptions>
{
private readonly OidcValidation oidcValidationSettings;
private readonly ILogger<SerdicaJwtBearerAuthenticationHandler> logger;
public SerdicaJwtBearerAuthenticationHandler(
IOptionsMonitor<JwtBearerOptions> jwtOptions,
ILoggerFactory loggerFactory,
ILogger<SerdicaJwtBearerAuthenticationHandler> logger,
UrlEncoder encoder,
IOptions<OidcValidation> oidcServerConnection)
: base(jwtOptions, loggerFactory, encoder)
{
this.oidcValidationSettings = oidcServerConnection.Value;
this.logger = logger;
}
protected override async Task<AuthenticateResult> HandleAuthenticateAsync()
{
// 1. Internal callers detected by bypass mask → Super user
if (oidcValidationSettings.BypassValidationsMasks?
.Any(m => m.MatchesRemote(Context)) == true)
{
return SuccessTicket(BuildDefaultRolePrincipal(SerdicaClaims.RoleSuperUser));
}
// 2. What roles does the endpoint require?
Context.Items.TryGetValue(ConstantsClass.HttpContextEndpointRequiredRoles,
out var rolesObj);
var requiredRoles = rolesObj as string[];
if (requiredRoles is { Length: 0 }) // empty means requirement for authentication claim
{
requiredRoles =
[
SerdicaClaims.IsAuthenticated
];
}
bool anonymousAllowed = requiredRoles == null ||
requiredRoles.Contains(SerdicaClaims.Anonymous,
StringComparer.Ordinal);
// 3. Decide whether we *need* to run AuthenticateAsync
bool tokenPresent =
Context.Request.Headers.TryGetValue("Authorization", out var authHeaders) &&
authHeaders.Any(h => h?.StartsWith("Bearer ", StringComparison.OrdinalIgnoreCase) == true);
bool mustAuthenticate = tokenPresent || !anonymousAllowed;
AuthenticateResult authResult = mustAuthenticate
? await Context.AuthenticateAsync(OpenIddictValidationAspNetCoreDefaults.AuthenticationScheme)
: AuthenticateResult.NoResult(); // cheap placeholder; not succeeded, not failed
logger.LogInformation(
"Authorizing with following parameters authResult: {AuthResult}, anonymousAllowed: {anonymousAllowed}, tokenPresent: {tokenPresent}, requiredRoles: {requiredRoles}, roleClaims: {roleClaims}",
authResult.Succeeded,
anonymousAllowed,
tokenPresent,
string.Join(",", requiredRoles ?? []),
string.Join(",", authResult?.Principal?.Claims?.Where(c => c.Type == ClaimTypes.Role)?.Select(c => c.Value) ?? [])
);
// 4. Figure out whether roles are satisfied (only matters if authenticated)
bool rolesSatisfied = authResult?.Succeeded == true &&
!anonymousAllowed &&
requiredRoles is { Length: > 0 } &&
(requiredRoles.Contains(SerdicaClaims.IsAuthenticated)
||
(authResult?.Principal?.Claims
?.Where(c => c.Type == ClaimTypes.Role)
?.Select(c => c.Value)
?.Intersect(requiredRoles!)
?.Any() ?? false) == true);
// 5. Switch expression drives the outcome
return (anonymousAllowed, authResult?.Succeeded ?? false, rolesSatisfied) switch
{
// Anonymous endpoint
(true, true, _) => SuccessTicket(authResult!.Principal!), // token supplied
(true, false, _) => SuccessTicket(BuildDefaultRolePrincipal(
SerdicaClaims.Anonymous)), // no token
// Protected endpoint but NOT authenticated
(false, false, _) => AuthenticateResult.Fail(
authResult!.Failure ?? new Exception("Token invalid.")),
// Authenticated but lacks required roles
(_, _, false) => AuthenticateResult.Fail("Insufficient privileges"),
// Authenticated and authorised
_ => SuccessTicket(authResult!.Principal!)
};
}
protected override async Task HandleChallengeAsync(AuthenticationProperties props)
{
var proxy = new ProxyResult
{
StatusCode = HttpStatusCode.Unauthorized, // 401
TraceId = Context.TraceIdentifier,
Title = "Unauthorized",
Type = "https://datatracker.ietf.org/doc/html/rfc9110#section-15.5.2",
Errors = new Dictionary<string, string>
{
["authentication"] = "Missing or invalid credentials."
}
};
await proxy.ReturnHttpRessponse(Response);
}
protected override async Task HandleForbiddenAsync(AuthenticationProperties props)
{
var proxy = new ProxyResult
{
StatusCode = HttpStatusCode.Forbidden, // 403
TraceId = Context.TraceIdentifier,
Title = "Forbidden",
Type = "https://datatracker.ietf.org/doc/html/rfc9110#section-15.5.3",
Errors = new Dictionary<string, string>
{
["authorization"] = "Insufficient privileges."
}
};
await proxy.ReturnHttpRessponse(Response);
}
// ──────────────────────────────────────────────────────────────────
private ClaimsPrincipal BuildDefaultRolePrincipal(string role) =>
PrincipalBuilder.Build($"{DefaultRolePrincipalPrefix}_{role}", ConstantsClass.AuthenticationScheme)
.AddClaim(ClaimTypes.NameIdentifier, $"{DefaultRolePrincipalPrefix}_{role}")
.AddClaim(ClaimTypes.Role, role);
private static AuthenticateResult SuccessTicket(ClaimsPrincipal principal)
=> AuthenticateResult.Success(
new AuthenticationTicket(
principal,
principal.Identity!.AuthenticationType!
)
);
}

View File

@@ -0,0 +1,47 @@
using Microsoft.AspNetCore.DataProtection.Repositories;
using StackExchange.Redis;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Xml.Linq;
namespace Ablera.Serdica.Authentication.Utilities;
// Move this to ...Authentication.Redis or something
public sealed class RedisAndFileSystemXmlRepository : IXmlRepository
{
private readonly IDatabase _db;
private readonly string _prefix;
public RedisAndFileSystemXmlRepository(IDatabase db, string prefix)
{
_db = db;
_prefix = prefix;
}
public IReadOnlyCollection<XElement> GetAllElements()
{
var keys = _db.SetMembers(_prefix);
var list = new List<XElement>();
foreach (var redisValue in keys)
{
var xml = redisValue.ToString();
try { list.Add(XElement.Parse(xml)); }
catch { /* ignore corrupted entry */ }
}
return list;
}
public void StoreElement(XElement element, string friendlyName)
{
var xml = element.ToString(SaveOptions.DisableFormatting);
/* 1) write to Redis (set-add = idempotent) */
_db.SetAdd(_prefix, xml);
}
}

View File

@@ -0,0 +1,26 @@
###### generated-by: Ablera.Serdica.CiJobsBuilder 1.0.0 ######
###### Build & Publish ########################################################
FROM mirrors.ablera.dev/docker-mirror/dotnet/sdk:9.0-alpine AS build
WORKDIR /
COPY . .
WORKDIR /src/Serdica/Ablera.Serdica.Authority/Ablera.Serdica.Authority
RUN dotnet restore "Ablera.Serdica.Authority.csproj"
RUN dotnet publish "Ablera.Serdica.Authority.csproj" -c Release -o /app/publish
###### Run stage ##############################################################
FROM mirrors.ablera.dev/docker-mirror/dotnet/aspnet:9.0-alpine AS final
ENV DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=false
ENV TZ=UTC
RUN apk add --no-cache curl icu-data-full icu-libs tzdata
WORKDIR /app
COPY --from=build /app/publish .
CMD ["dotnet","Ablera.Serdica.Authority.dll"]
# port should match a port the web server is listening on
ENV HEALTHCHECK_PORT=80 \
HEALTHCHECK_HOST=localhost \
HEALTHCHECK_PROTOCOL=http \
HEALTHCHECK_ENDPOINT="health"
HEALTHCHECK --interval=15s --timeout=5s --start-period=10s --retries=3 \
CMD curl -sSLf ${HEALTHCHECK_PROTOCOL}://${HEALTHCHECK_HOST}:${HEALTHCHECK_PORT}/${HEALTHCHECK_ENDPOINT} || (echo 'Health check failed!' && exit 1)

View File

@@ -0,0 +1,501 @@

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 18
VisualStudioVersion = 18.0.11012.119 d18.0
MinimumVisualStudioVersion = 10.0.40219.1
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Authority", "Ablera.Serdica.Authority\Ablera.Serdica.Authority.csproj", "{4DC6FDAD-3F58-662F-B66C-35BD90B3300B}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "__Libraries", "__Libraries", "{02EA681E-C7D8-13C7-8484-4AC65E1B71E8}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Common.Tools", "..\..\__Libraries\Ablera.Serdica.Common.Tools\Ablera.Serdica.Common.Tools.csproj", "{AB637A9A-1ED1-27BC-5FC7-84775EC61C9C}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Common.Services", "..\..\__Libraries\Ablera.Serdica.Common.Services\Ablera.Serdica.Common.Services.csproj", "{2C117C87-F749-88D4-F947-0C3165F99365}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Microservice.Initializer", "..\..\__Libraries\Ablera.Serdica.Microservice.Initializer\Ablera.Serdica.Microservice.Initializer.csproj", "{56D0F1F5-8658-A87B-3E10-1E6674B39943}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Microservice.Initializer.EndpointsRegistration", "..\..\__Libraries\Ablera.Serdica.Microservice.Initializer.EndpointsRegistration\Ablera.Serdica.Microservice.Initializer.EndpointsRegistration.csproj", "{1E2B3B33-C1C9-A86C-234D-8E3D2487381C}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Microservice.Consumer", "..\..\__Libraries\Ablera.Serdica.Microservice.Consumer\Ablera.Serdica.Microservice.Consumer.csproj", "{58186FA9-D464-8D16-9999-4E747B59C02C}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Common.Services.FromEntityFramework", "..\..\__Libraries\Ablera.Serdica.Common.Services.FromEntityFramework\Ablera.Serdica.Common.Services.FromEntityFramework.csproj", "{A90C6420-7BAD-86FB-D4E9-62528940071F}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Extensions.RabbitMQ", "..\..\__Libraries\Ablera.Serdica.Extensions.RabbitMQ\Ablera.Serdica.Extensions.RabbitMQ.csproj", "{3D860D17-A14E-25AE-81A0-DB0D0EBBEAD4}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Extensions.NJsonSchema", "..\..\__Libraries\Ablera.Serdica.Extensions.NJsonSchema\Ablera.Serdica.Extensions.NJsonSchema.csproj", "{C0692A9A-9841-F95A-A07B-0C0AC6AA1322}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Extensions.Serilog", "..\..\__Libraries\Ablera.Serdica.Extensions.Serilog\Ablera.Serdica.Extensions.Serilog.csproj", "{163970E8-D955-4963-9B44-F3E576782FE6}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.DbConfig", "..\..\__Libraries\Ablera.Serdica.DbConfig\Ablera.Serdica.DbConfig.csproj", "{5BC0A7B5-5CD7-572F-BBC0-01AA8C62CDE8}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Plugin", "..\..\__Libraries\Ablera.Serdica.Plugin\Ablera.Serdica.Plugin.csproj", "{78370B69-97D0-AAB0-FBF4-97A4757563B6}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.DBModels.Serdica", "..\..\__Libraries\Ablera.Serdica.DBModels.Serdica\Ablera.Serdica.DBModels.Serdica.csproj", "{22036806-8B3D-67C6-2CE7-8F4D7E192BB0}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.LocalCacheProvider", "..\..\__Libraries\Ablera.Serdica.LocalCacheProvider\Ablera.Serdica.LocalCacheProvider.csproj", "{55832819-3500-D8BA-9EBB-E3E2AB15090B}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Authentication", "..\..\__Libraries\Ablera.Serdica.Authentication\Ablera.Serdica.Authentication.csproj", "{FCBDFBDE-E76B-964D-24E8-9F01F69D1A00}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.TranslationProvider", "..\..\__Libraries\Ablera.Serdica.TranslationProvider\Ablera.Serdica.TranslationProvider.csproj", "{B22FADB1-C377-F072-0419-E15D363A64AD}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{8EC462FD-D22E-90A8-E5CE-7E832BA40C5D}"
ProjectSection(SolutionItems) = preProject
Dockerfile = Dockerfile
EndProjectSection
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "__Plugins", "__Plugins", "{D8B47378-81A7-4BE3-8B76-B48D01E4D704}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Authority.Plugin.Standard", "__Plugins\Ablera.Serdica.Authority.Plugin.Standard\Ablera.Serdica.Authority.Plugin.Standard.csproj", "{36E54ACD-38EF-8350-82B7-2DBF372C5239}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.DBModels.Oidc", "__Libraries\Ablera.Serdica.DBModels.Oidc\Ablera.Serdica.DBModels.Oidc.csproj", "{0AB994AF-7DE0-B08D-6428-1EA9AEF3DE0B}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Extensions.Redis", "..\..\__Libraries\Ablera.Serdica.Extensions.Redis\Ablera.Serdica.Extensions.Redis.csproj", "{893C26DF-A9F4-5896-C765-B680DA63D23C}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.DBModels.Oidc.Migrations", "__Libraries\Ablera.Serdica.DBModels.Oidc.Migrations\Ablera.Serdica.DBModels.Oidc.Migrations.csproj", "{2572437D-2AA9-A956-3EA7-2DD09105AFC1}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Authorization", "..\..\__Libraries\Ablera.Serdica.Authorization\Ablera.Serdica.Authorization.csproj", "{387A2480-D7FB-6F9D-6D93-F96970DAB46B}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Extensions.MessagePack", "..\..\__Libraries\Ablera.Serdica.Extensions.MessagePack\Ablera.Serdica.Extensions.MessagePack.csproj", "{FEE40D33-2AB0-2891-706F-4BE662BD2CF4}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.UserConfiguration", "..\..\__Libraries\Ablera.Serdica.UserConfiguration\Ablera.Serdica.UserConfiguration.csproj", "{4E4CAE4A-E577-174F-9671-EBB759F44E77}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.UserConfiguration.Redis", "..\..\__Libraries\Ablera.Serdica.UserConfiguration.Redis\Ablera.Serdica.UserConfiguration.Redis.csproj", "{29B145E2-F37C-A614-F834-7F1F484ED142}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.UserConfiguration.Builder", "..\..\__Libraries\Ablera.Serdica.UserConfiguration.Builder\Ablera.Serdica.UserConfiguration.Builder.csproj", "{3DD8C0FB-7500-2F44-8C5B-A6DAF54C27F0}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "__Libraries", "__Libraries", "{6517AF15-46A7-4D81-A060-20FD1785EDE6}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Extensions.Novell.Directory.Ldap", "..\..\__Libraries\Ablera.Serdica.Extensions.Novell.Directory.Ldap\Ablera.Serdica.Extensions.Novell.Directory.Ldap.csproj", "{E2C3643E-C60F-4BB8-A7EA-12CB038346FB}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Authority.Plugins.Base", "__Plugins\Ablera.Serdica.Authority.Plugins.Base\Ablera.Serdica.Authority.Plugins.Base.csproj", "{2804361B-83DD-DD87-ED76-3DAF19778DC5}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Authority.Plugins.LdapUtilities", "__Plugins\Ablera.Serdica.Authority.Plugins.LdapUtilities\Ablera.Serdica.Authority.Plugins.LdapUtilities.csproj", "{225906DB-8525-9CF4-EE0D-1996AF58A7AE}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.HealthChecks", "..\..\__Libraries\Ablera.Serdica.HealthChecks\Ablera.Serdica.HealthChecks.csproj", "{E3905D64-D056-4EF3-B4C9-98A4EEB7E71A}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Authority.Plugin.Bulstrad", "__Plugins\Ablera.Serdica.Authority.Plugin.Bulstrad\Ablera.Serdica.Authority.Plugin.Bulstrad.csproj", "{DBE3EF10-21FE-9F9B-E292-DD6D4E22192C}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Ablera.Serdica.Authority.Plugin.Ldap", "__Plugins\Ablera.Serdica.Authority.Plugin.Ldap\Ablera.Serdica.Authority.Plugin.Ldap.csproj", "{20476940-0B2C-62FE-F772-7E8C77D24A9B}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Debug|x64 = Debug|x64
Debug|x86 = Debug|x86
Release|Any CPU = Release|Any CPU
Release|x64 = Release|x64
Release|x86 = Release|x86
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{4DC6FDAD-3F58-662F-B66C-35BD90B3300B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{4DC6FDAD-3F58-662F-B66C-35BD90B3300B}.Debug|Any CPU.Build.0 = Debug|Any CPU
{4DC6FDAD-3F58-662F-B66C-35BD90B3300B}.Debug|x64.ActiveCfg = Debug|Any CPU
{4DC6FDAD-3F58-662F-B66C-35BD90B3300B}.Debug|x64.Build.0 = Debug|Any CPU
{4DC6FDAD-3F58-662F-B66C-35BD90B3300B}.Debug|x86.ActiveCfg = Debug|Any CPU
{4DC6FDAD-3F58-662F-B66C-35BD90B3300B}.Debug|x86.Build.0 = Debug|Any CPU
{4DC6FDAD-3F58-662F-B66C-35BD90B3300B}.Release|Any CPU.ActiveCfg = Release|Any CPU
{4DC6FDAD-3F58-662F-B66C-35BD90B3300B}.Release|Any CPU.Build.0 = Release|Any CPU
{4DC6FDAD-3F58-662F-B66C-35BD90B3300B}.Release|x64.ActiveCfg = Release|Any CPU
{4DC6FDAD-3F58-662F-B66C-35BD90B3300B}.Release|x64.Build.0 = Release|Any CPU
{4DC6FDAD-3F58-662F-B66C-35BD90B3300B}.Release|x86.ActiveCfg = Release|Any CPU
{4DC6FDAD-3F58-662F-B66C-35BD90B3300B}.Release|x86.Build.0 = Release|Any CPU
{AB637A9A-1ED1-27BC-5FC7-84775EC61C9C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{AB637A9A-1ED1-27BC-5FC7-84775EC61C9C}.Debug|Any CPU.Build.0 = Debug|Any CPU
{AB637A9A-1ED1-27BC-5FC7-84775EC61C9C}.Debug|x64.ActiveCfg = Debug|Any CPU
{AB637A9A-1ED1-27BC-5FC7-84775EC61C9C}.Debug|x64.Build.0 = Debug|Any CPU
{AB637A9A-1ED1-27BC-5FC7-84775EC61C9C}.Debug|x86.ActiveCfg = Debug|Any CPU
{AB637A9A-1ED1-27BC-5FC7-84775EC61C9C}.Debug|x86.Build.0 = Debug|Any CPU
{AB637A9A-1ED1-27BC-5FC7-84775EC61C9C}.Release|Any CPU.ActiveCfg = Release|Any CPU
{AB637A9A-1ED1-27BC-5FC7-84775EC61C9C}.Release|Any CPU.Build.0 = Release|Any CPU
{AB637A9A-1ED1-27BC-5FC7-84775EC61C9C}.Release|x64.ActiveCfg = Release|Any CPU
{AB637A9A-1ED1-27BC-5FC7-84775EC61C9C}.Release|x64.Build.0 = Release|Any CPU
{AB637A9A-1ED1-27BC-5FC7-84775EC61C9C}.Release|x86.ActiveCfg = Release|Any CPU
{AB637A9A-1ED1-27BC-5FC7-84775EC61C9C}.Release|x86.Build.0 = Release|Any CPU
{2C117C87-F749-88D4-F947-0C3165F99365}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{2C117C87-F749-88D4-F947-0C3165F99365}.Debug|Any CPU.Build.0 = Debug|Any CPU
{2C117C87-F749-88D4-F947-0C3165F99365}.Debug|x64.ActiveCfg = Debug|Any CPU
{2C117C87-F749-88D4-F947-0C3165F99365}.Debug|x64.Build.0 = Debug|Any CPU
{2C117C87-F749-88D4-F947-0C3165F99365}.Debug|x86.ActiveCfg = Debug|Any CPU
{2C117C87-F749-88D4-F947-0C3165F99365}.Debug|x86.Build.0 = Debug|Any CPU
{2C117C87-F749-88D4-F947-0C3165F99365}.Release|Any CPU.ActiveCfg = Release|Any CPU
{2C117C87-F749-88D4-F947-0C3165F99365}.Release|Any CPU.Build.0 = Release|Any CPU
{2C117C87-F749-88D4-F947-0C3165F99365}.Release|x64.ActiveCfg = Release|Any CPU
{2C117C87-F749-88D4-F947-0C3165F99365}.Release|x64.Build.0 = Release|Any CPU
{2C117C87-F749-88D4-F947-0C3165F99365}.Release|x86.ActiveCfg = Release|Any CPU
{2C117C87-F749-88D4-F947-0C3165F99365}.Release|x86.Build.0 = Release|Any CPU
{56D0F1F5-8658-A87B-3E10-1E6674B39943}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{56D0F1F5-8658-A87B-3E10-1E6674B39943}.Debug|Any CPU.Build.0 = Debug|Any CPU
{56D0F1F5-8658-A87B-3E10-1E6674B39943}.Debug|x64.ActiveCfg = Debug|Any CPU
{56D0F1F5-8658-A87B-3E10-1E6674B39943}.Debug|x64.Build.0 = Debug|Any CPU
{56D0F1F5-8658-A87B-3E10-1E6674B39943}.Debug|x86.ActiveCfg = Debug|Any CPU
{56D0F1F5-8658-A87B-3E10-1E6674B39943}.Debug|x86.Build.0 = Debug|Any CPU
{56D0F1F5-8658-A87B-3E10-1E6674B39943}.Release|Any CPU.ActiveCfg = Release|Any CPU
{56D0F1F5-8658-A87B-3E10-1E6674B39943}.Release|Any CPU.Build.0 = Release|Any CPU
{56D0F1F5-8658-A87B-3E10-1E6674B39943}.Release|x64.ActiveCfg = Release|Any CPU
{56D0F1F5-8658-A87B-3E10-1E6674B39943}.Release|x64.Build.0 = Release|Any CPU
{56D0F1F5-8658-A87B-3E10-1E6674B39943}.Release|x86.ActiveCfg = Release|Any CPU
{56D0F1F5-8658-A87B-3E10-1E6674B39943}.Release|x86.Build.0 = Release|Any CPU
{1E2B3B33-C1C9-A86C-234D-8E3D2487381C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{1E2B3B33-C1C9-A86C-234D-8E3D2487381C}.Debug|Any CPU.Build.0 = Debug|Any CPU
{1E2B3B33-C1C9-A86C-234D-8E3D2487381C}.Debug|x64.ActiveCfg = Debug|Any CPU
{1E2B3B33-C1C9-A86C-234D-8E3D2487381C}.Debug|x64.Build.0 = Debug|Any CPU
{1E2B3B33-C1C9-A86C-234D-8E3D2487381C}.Debug|x86.ActiveCfg = Debug|Any CPU
{1E2B3B33-C1C9-A86C-234D-8E3D2487381C}.Debug|x86.Build.0 = Debug|Any CPU
{1E2B3B33-C1C9-A86C-234D-8E3D2487381C}.Release|Any CPU.ActiveCfg = Release|Any CPU
{1E2B3B33-C1C9-A86C-234D-8E3D2487381C}.Release|Any CPU.Build.0 = Release|Any CPU
{1E2B3B33-C1C9-A86C-234D-8E3D2487381C}.Release|x64.ActiveCfg = Release|Any CPU
{1E2B3B33-C1C9-A86C-234D-8E3D2487381C}.Release|x64.Build.0 = Release|Any CPU
{1E2B3B33-C1C9-A86C-234D-8E3D2487381C}.Release|x86.ActiveCfg = Release|Any CPU
{1E2B3B33-C1C9-A86C-234D-8E3D2487381C}.Release|x86.Build.0 = Release|Any CPU
{58186FA9-D464-8D16-9999-4E747B59C02C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{58186FA9-D464-8D16-9999-4E747B59C02C}.Debug|Any CPU.Build.0 = Debug|Any CPU
{58186FA9-D464-8D16-9999-4E747B59C02C}.Debug|x64.ActiveCfg = Debug|Any CPU
{58186FA9-D464-8D16-9999-4E747B59C02C}.Debug|x64.Build.0 = Debug|Any CPU
{58186FA9-D464-8D16-9999-4E747B59C02C}.Debug|x86.ActiveCfg = Debug|Any CPU
{58186FA9-D464-8D16-9999-4E747B59C02C}.Debug|x86.Build.0 = Debug|Any CPU
{58186FA9-D464-8D16-9999-4E747B59C02C}.Release|Any CPU.ActiveCfg = Release|Any CPU
{58186FA9-D464-8D16-9999-4E747B59C02C}.Release|Any CPU.Build.0 = Release|Any CPU
{58186FA9-D464-8D16-9999-4E747B59C02C}.Release|x64.ActiveCfg = Release|Any CPU
{58186FA9-D464-8D16-9999-4E747B59C02C}.Release|x64.Build.0 = Release|Any CPU
{58186FA9-D464-8D16-9999-4E747B59C02C}.Release|x86.ActiveCfg = Release|Any CPU
{58186FA9-D464-8D16-9999-4E747B59C02C}.Release|x86.Build.0 = Release|Any CPU
{A90C6420-7BAD-86FB-D4E9-62528940071F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{A90C6420-7BAD-86FB-D4E9-62528940071F}.Debug|Any CPU.Build.0 = Debug|Any CPU
{A90C6420-7BAD-86FB-D4E9-62528940071F}.Debug|x64.ActiveCfg = Debug|Any CPU
{A90C6420-7BAD-86FB-D4E9-62528940071F}.Debug|x64.Build.0 = Debug|Any CPU
{A90C6420-7BAD-86FB-D4E9-62528940071F}.Debug|x86.ActiveCfg = Debug|Any CPU
{A90C6420-7BAD-86FB-D4E9-62528940071F}.Debug|x86.Build.0 = Debug|Any CPU
{A90C6420-7BAD-86FB-D4E9-62528940071F}.Release|Any CPU.ActiveCfg = Release|Any CPU
{A90C6420-7BAD-86FB-D4E9-62528940071F}.Release|Any CPU.Build.0 = Release|Any CPU
{A90C6420-7BAD-86FB-D4E9-62528940071F}.Release|x64.ActiveCfg = Release|Any CPU
{A90C6420-7BAD-86FB-D4E9-62528940071F}.Release|x64.Build.0 = Release|Any CPU
{A90C6420-7BAD-86FB-D4E9-62528940071F}.Release|x86.ActiveCfg = Release|Any CPU
{A90C6420-7BAD-86FB-D4E9-62528940071F}.Release|x86.Build.0 = Release|Any CPU
{3D860D17-A14E-25AE-81A0-DB0D0EBBEAD4}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{3D860D17-A14E-25AE-81A0-DB0D0EBBEAD4}.Debug|Any CPU.Build.0 = Debug|Any CPU
{3D860D17-A14E-25AE-81A0-DB0D0EBBEAD4}.Debug|x64.ActiveCfg = Debug|Any CPU
{3D860D17-A14E-25AE-81A0-DB0D0EBBEAD4}.Debug|x64.Build.0 = Debug|Any CPU
{3D860D17-A14E-25AE-81A0-DB0D0EBBEAD4}.Debug|x86.ActiveCfg = Debug|Any CPU
{3D860D17-A14E-25AE-81A0-DB0D0EBBEAD4}.Debug|x86.Build.0 = Debug|Any CPU
{3D860D17-A14E-25AE-81A0-DB0D0EBBEAD4}.Release|Any CPU.ActiveCfg = Release|Any CPU
{3D860D17-A14E-25AE-81A0-DB0D0EBBEAD4}.Release|Any CPU.Build.0 = Release|Any CPU
{3D860D17-A14E-25AE-81A0-DB0D0EBBEAD4}.Release|x64.ActiveCfg = Release|Any CPU
{3D860D17-A14E-25AE-81A0-DB0D0EBBEAD4}.Release|x64.Build.0 = Release|Any CPU
{3D860D17-A14E-25AE-81A0-DB0D0EBBEAD4}.Release|x86.ActiveCfg = Release|Any CPU
{3D860D17-A14E-25AE-81A0-DB0D0EBBEAD4}.Release|x86.Build.0 = Release|Any CPU
{C0692A9A-9841-F95A-A07B-0C0AC6AA1322}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{C0692A9A-9841-F95A-A07B-0C0AC6AA1322}.Debug|Any CPU.Build.0 = Debug|Any CPU
{C0692A9A-9841-F95A-A07B-0C0AC6AA1322}.Debug|x64.ActiveCfg = Debug|Any CPU
{C0692A9A-9841-F95A-A07B-0C0AC6AA1322}.Debug|x64.Build.0 = Debug|Any CPU
{C0692A9A-9841-F95A-A07B-0C0AC6AA1322}.Debug|x86.ActiveCfg = Debug|Any CPU
{C0692A9A-9841-F95A-A07B-0C0AC6AA1322}.Debug|x86.Build.0 = Debug|Any CPU
{C0692A9A-9841-F95A-A07B-0C0AC6AA1322}.Release|Any CPU.ActiveCfg = Release|Any CPU
{C0692A9A-9841-F95A-A07B-0C0AC6AA1322}.Release|Any CPU.Build.0 = Release|Any CPU
{C0692A9A-9841-F95A-A07B-0C0AC6AA1322}.Release|x64.ActiveCfg = Release|Any CPU
{C0692A9A-9841-F95A-A07B-0C0AC6AA1322}.Release|x64.Build.0 = Release|Any CPU
{C0692A9A-9841-F95A-A07B-0C0AC6AA1322}.Release|x86.ActiveCfg = Release|Any CPU
{C0692A9A-9841-F95A-A07B-0C0AC6AA1322}.Release|x86.Build.0 = Release|Any CPU
{163970E8-D955-4963-9B44-F3E576782FE6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{163970E8-D955-4963-9B44-F3E576782FE6}.Debug|Any CPU.Build.0 = Debug|Any CPU
{163970E8-D955-4963-9B44-F3E576782FE6}.Debug|x64.ActiveCfg = Debug|Any CPU
{163970E8-D955-4963-9B44-F3E576782FE6}.Debug|x64.Build.0 = Debug|Any CPU
{163970E8-D955-4963-9B44-F3E576782FE6}.Debug|x86.ActiveCfg = Debug|Any CPU
{163970E8-D955-4963-9B44-F3E576782FE6}.Debug|x86.Build.0 = Debug|Any CPU
{163970E8-D955-4963-9B44-F3E576782FE6}.Release|Any CPU.ActiveCfg = Release|Any CPU
{163970E8-D955-4963-9B44-F3E576782FE6}.Release|Any CPU.Build.0 = Release|Any CPU
{163970E8-D955-4963-9B44-F3E576782FE6}.Release|x64.ActiveCfg = Release|Any CPU
{163970E8-D955-4963-9B44-F3E576782FE6}.Release|x64.Build.0 = Release|Any CPU
{163970E8-D955-4963-9B44-F3E576782FE6}.Release|x86.ActiveCfg = Release|Any CPU
{163970E8-D955-4963-9B44-F3E576782FE6}.Release|x86.Build.0 = Release|Any CPU
{5BC0A7B5-5CD7-572F-BBC0-01AA8C62CDE8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{5BC0A7B5-5CD7-572F-BBC0-01AA8C62CDE8}.Debug|Any CPU.Build.0 = Debug|Any CPU
{5BC0A7B5-5CD7-572F-BBC0-01AA8C62CDE8}.Debug|x64.ActiveCfg = Debug|Any CPU
{5BC0A7B5-5CD7-572F-BBC0-01AA8C62CDE8}.Debug|x64.Build.0 = Debug|Any CPU
{5BC0A7B5-5CD7-572F-BBC0-01AA8C62CDE8}.Debug|x86.ActiveCfg = Debug|Any CPU
{5BC0A7B5-5CD7-572F-BBC0-01AA8C62CDE8}.Debug|x86.Build.0 = Debug|Any CPU
{5BC0A7B5-5CD7-572F-BBC0-01AA8C62CDE8}.Release|Any CPU.ActiveCfg = Release|Any CPU
{5BC0A7B5-5CD7-572F-BBC0-01AA8C62CDE8}.Release|Any CPU.Build.0 = Release|Any CPU
{5BC0A7B5-5CD7-572F-BBC0-01AA8C62CDE8}.Release|x64.ActiveCfg = Release|Any CPU
{5BC0A7B5-5CD7-572F-BBC0-01AA8C62CDE8}.Release|x64.Build.0 = Release|Any CPU
{5BC0A7B5-5CD7-572F-BBC0-01AA8C62CDE8}.Release|x86.ActiveCfg = Release|Any CPU
{5BC0A7B5-5CD7-572F-BBC0-01AA8C62CDE8}.Release|x86.Build.0 = Release|Any CPU
{78370B69-97D0-AAB0-FBF4-97A4757563B6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{78370B69-97D0-AAB0-FBF4-97A4757563B6}.Debug|Any CPU.Build.0 = Debug|Any CPU
{78370B69-97D0-AAB0-FBF4-97A4757563B6}.Debug|x64.ActiveCfg = Debug|Any CPU
{78370B69-97D0-AAB0-FBF4-97A4757563B6}.Debug|x64.Build.0 = Debug|Any CPU
{78370B69-97D0-AAB0-FBF4-97A4757563B6}.Debug|x86.ActiveCfg = Debug|Any CPU
{78370B69-97D0-AAB0-FBF4-97A4757563B6}.Debug|x86.Build.0 = Debug|Any CPU
{78370B69-97D0-AAB0-FBF4-97A4757563B6}.Release|Any CPU.ActiveCfg = Release|Any CPU
{78370B69-97D0-AAB0-FBF4-97A4757563B6}.Release|Any CPU.Build.0 = Release|Any CPU
{78370B69-97D0-AAB0-FBF4-97A4757563B6}.Release|x64.ActiveCfg = Release|Any CPU
{78370B69-97D0-AAB0-FBF4-97A4757563B6}.Release|x64.Build.0 = Release|Any CPU
{78370B69-97D0-AAB0-FBF4-97A4757563B6}.Release|x86.ActiveCfg = Release|Any CPU
{78370B69-97D0-AAB0-FBF4-97A4757563B6}.Release|x86.Build.0 = Release|Any CPU
{22036806-8B3D-67C6-2CE7-8F4D7E192BB0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{22036806-8B3D-67C6-2CE7-8F4D7E192BB0}.Debug|Any CPU.Build.0 = Debug|Any CPU
{22036806-8B3D-67C6-2CE7-8F4D7E192BB0}.Debug|x64.ActiveCfg = Debug|Any CPU
{22036806-8B3D-67C6-2CE7-8F4D7E192BB0}.Debug|x64.Build.0 = Debug|Any CPU
{22036806-8B3D-67C6-2CE7-8F4D7E192BB0}.Debug|x86.ActiveCfg = Debug|Any CPU
{22036806-8B3D-67C6-2CE7-8F4D7E192BB0}.Debug|x86.Build.0 = Debug|Any CPU
{22036806-8B3D-67C6-2CE7-8F4D7E192BB0}.Release|Any CPU.ActiveCfg = Release|Any CPU
{22036806-8B3D-67C6-2CE7-8F4D7E192BB0}.Release|Any CPU.Build.0 = Release|Any CPU
{22036806-8B3D-67C6-2CE7-8F4D7E192BB0}.Release|x64.ActiveCfg = Release|Any CPU
{22036806-8B3D-67C6-2CE7-8F4D7E192BB0}.Release|x64.Build.0 = Release|Any CPU
{22036806-8B3D-67C6-2CE7-8F4D7E192BB0}.Release|x86.ActiveCfg = Release|Any CPU
{22036806-8B3D-67C6-2CE7-8F4D7E192BB0}.Release|x86.Build.0 = Release|Any CPU
{55832819-3500-D8BA-9EBB-E3E2AB15090B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{55832819-3500-D8BA-9EBB-E3E2AB15090B}.Debug|Any CPU.Build.0 = Debug|Any CPU
{55832819-3500-D8BA-9EBB-E3E2AB15090B}.Debug|x64.ActiveCfg = Debug|Any CPU
{55832819-3500-D8BA-9EBB-E3E2AB15090B}.Debug|x64.Build.0 = Debug|Any CPU
{55832819-3500-D8BA-9EBB-E3E2AB15090B}.Debug|x86.ActiveCfg = Debug|Any CPU
{55832819-3500-D8BA-9EBB-E3E2AB15090B}.Debug|x86.Build.0 = Debug|Any CPU
{55832819-3500-D8BA-9EBB-E3E2AB15090B}.Release|Any CPU.ActiveCfg = Release|Any CPU
{55832819-3500-D8BA-9EBB-E3E2AB15090B}.Release|Any CPU.Build.0 = Release|Any CPU
{55832819-3500-D8BA-9EBB-E3E2AB15090B}.Release|x64.ActiveCfg = Release|Any CPU
{55832819-3500-D8BA-9EBB-E3E2AB15090B}.Release|x64.Build.0 = Release|Any CPU
{55832819-3500-D8BA-9EBB-E3E2AB15090B}.Release|x86.ActiveCfg = Release|Any CPU
{55832819-3500-D8BA-9EBB-E3E2AB15090B}.Release|x86.Build.0 = Release|Any CPU
{FCBDFBDE-E76B-964D-24E8-9F01F69D1A00}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{FCBDFBDE-E76B-964D-24E8-9F01F69D1A00}.Debug|Any CPU.Build.0 = Debug|Any CPU
{FCBDFBDE-E76B-964D-24E8-9F01F69D1A00}.Debug|x64.ActiveCfg = Debug|Any CPU
{FCBDFBDE-E76B-964D-24E8-9F01F69D1A00}.Debug|x64.Build.0 = Debug|Any CPU
{FCBDFBDE-E76B-964D-24E8-9F01F69D1A00}.Debug|x86.ActiveCfg = Debug|Any CPU
{FCBDFBDE-E76B-964D-24E8-9F01F69D1A00}.Debug|x86.Build.0 = Debug|Any CPU
{FCBDFBDE-E76B-964D-24E8-9F01F69D1A00}.Release|Any CPU.ActiveCfg = Release|Any CPU
{FCBDFBDE-E76B-964D-24E8-9F01F69D1A00}.Release|Any CPU.Build.0 = Release|Any CPU
{FCBDFBDE-E76B-964D-24E8-9F01F69D1A00}.Release|x64.ActiveCfg = Release|Any CPU
{FCBDFBDE-E76B-964D-24E8-9F01F69D1A00}.Release|x64.Build.0 = Release|Any CPU
{FCBDFBDE-E76B-964D-24E8-9F01F69D1A00}.Release|x86.ActiveCfg = Release|Any CPU
{FCBDFBDE-E76B-964D-24E8-9F01F69D1A00}.Release|x86.Build.0 = Release|Any CPU
{B22FADB1-C377-F072-0419-E15D363A64AD}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{B22FADB1-C377-F072-0419-E15D363A64AD}.Debug|Any CPU.Build.0 = Debug|Any CPU
{B22FADB1-C377-F072-0419-E15D363A64AD}.Debug|x64.ActiveCfg = Debug|Any CPU
{B22FADB1-C377-F072-0419-E15D363A64AD}.Debug|x64.Build.0 = Debug|Any CPU
{B22FADB1-C377-F072-0419-E15D363A64AD}.Debug|x86.ActiveCfg = Debug|Any CPU
{B22FADB1-C377-F072-0419-E15D363A64AD}.Debug|x86.Build.0 = Debug|Any CPU
{B22FADB1-C377-F072-0419-E15D363A64AD}.Release|Any CPU.ActiveCfg = Release|Any CPU
{B22FADB1-C377-F072-0419-E15D363A64AD}.Release|Any CPU.Build.0 = Release|Any CPU
{B22FADB1-C377-F072-0419-E15D363A64AD}.Release|x64.ActiveCfg = Release|Any CPU
{B22FADB1-C377-F072-0419-E15D363A64AD}.Release|x64.Build.0 = Release|Any CPU
{B22FADB1-C377-F072-0419-E15D363A64AD}.Release|x86.ActiveCfg = Release|Any CPU
{B22FADB1-C377-F072-0419-E15D363A64AD}.Release|x86.Build.0 = Release|Any CPU
{36E54ACD-38EF-8350-82B7-2DBF372C5239}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{36E54ACD-38EF-8350-82B7-2DBF372C5239}.Debug|Any CPU.Build.0 = Debug|Any CPU
{36E54ACD-38EF-8350-82B7-2DBF372C5239}.Debug|x64.ActiveCfg = Debug|Any CPU
{36E54ACD-38EF-8350-82B7-2DBF372C5239}.Debug|x64.Build.0 = Debug|Any CPU
{36E54ACD-38EF-8350-82B7-2DBF372C5239}.Debug|x86.ActiveCfg = Debug|Any CPU
{36E54ACD-38EF-8350-82B7-2DBF372C5239}.Debug|x86.Build.0 = Debug|Any CPU
{36E54ACD-38EF-8350-82B7-2DBF372C5239}.Release|Any CPU.ActiveCfg = Release|Any CPU
{36E54ACD-38EF-8350-82B7-2DBF372C5239}.Release|Any CPU.Build.0 = Release|Any CPU
{36E54ACD-38EF-8350-82B7-2DBF372C5239}.Release|x64.ActiveCfg = Release|Any CPU
{36E54ACD-38EF-8350-82B7-2DBF372C5239}.Release|x64.Build.0 = Release|Any CPU
{36E54ACD-38EF-8350-82B7-2DBF372C5239}.Release|x86.ActiveCfg = Release|Any CPU
{36E54ACD-38EF-8350-82B7-2DBF372C5239}.Release|x86.Build.0 = Release|Any CPU
{0AB994AF-7DE0-B08D-6428-1EA9AEF3DE0B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{0AB994AF-7DE0-B08D-6428-1EA9AEF3DE0B}.Debug|Any CPU.Build.0 = Debug|Any CPU
{0AB994AF-7DE0-B08D-6428-1EA9AEF3DE0B}.Debug|x64.ActiveCfg = Debug|Any CPU
{0AB994AF-7DE0-B08D-6428-1EA9AEF3DE0B}.Debug|x64.Build.0 = Debug|Any CPU
{0AB994AF-7DE0-B08D-6428-1EA9AEF3DE0B}.Debug|x86.ActiveCfg = Debug|Any CPU
{0AB994AF-7DE0-B08D-6428-1EA9AEF3DE0B}.Debug|x86.Build.0 = Debug|Any CPU
{0AB994AF-7DE0-B08D-6428-1EA9AEF3DE0B}.Release|Any CPU.ActiveCfg = Release|Any CPU
{0AB994AF-7DE0-B08D-6428-1EA9AEF3DE0B}.Release|Any CPU.Build.0 = Release|Any CPU
{0AB994AF-7DE0-B08D-6428-1EA9AEF3DE0B}.Release|x64.ActiveCfg = Release|Any CPU
{0AB994AF-7DE0-B08D-6428-1EA9AEF3DE0B}.Release|x64.Build.0 = Release|Any CPU
{0AB994AF-7DE0-B08D-6428-1EA9AEF3DE0B}.Release|x86.ActiveCfg = Release|Any CPU
{0AB994AF-7DE0-B08D-6428-1EA9AEF3DE0B}.Release|x86.Build.0 = Release|Any CPU
{893C26DF-A9F4-5896-C765-B680DA63D23C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{893C26DF-A9F4-5896-C765-B680DA63D23C}.Debug|Any CPU.Build.0 = Debug|Any CPU
{893C26DF-A9F4-5896-C765-B680DA63D23C}.Debug|x64.ActiveCfg = Debug|Any CPU
{893C26DF-A9F4-5896-C765-B680DA63D23C}.Debug|x64.Build.0 = Debug|Any CPU
{893C26DF-A9F4-5896-C765-B680DA63D23C}.Debug|x86.ActiveCfg = Debug|Any CPU
{893C26DF-A9F4-5896-C765-B680DA63D23C}.Debug|x86.Build.0 = Debug|Any CPU
{893C26DF-A9F4-5896-C765-B680DA63D23C}.Release|Any CPU.ActiveCfg = Release|Any CPU
{893C26DF-A9F4-5896-C765-B680DA63D23C}.Release|Any CPU.Build.0 = Release|Any CPU
{893C26DF-A9F4-5896-C765-B680DA63D23C}.Release|x64.ActiveCfg = Release|Any CPU
{893C26DF-A9F4-5896-C765-B680DA63D23C}.Release|x64.Build.0 = Release|Any CPU
{893C26DF-A9F4-5896-C765-B680DA63D23C}.Release|x86.ActiveCfg = Release|Any CPU
{893C26DF-A9F4-5896-C765-B680DA63D23C}.Release|x86.Build.0 = Release|Any CPU
{2572437D-2AA9-A956-3EA7-2DD09105AFC1}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{2572437D-2AA9-A956-3EA7-2DD09105AFC1}.Debug|Any CPU.Build.0 = Debug|Any CPU
{2572437D-2AA9-A956-3EA7-2DD09105AFC1}.Debug|x64.ActiveCfg = Debug|Any CPU
{2572437D-2AA9-A956-3EA7-2DD09105AFC1}.Debug|x64.Build.0 = Debug|Any CPU
{2572437D-2AA9-A956-3EA7-2DD09105AFC1}.Debug|x86.ActiveCfg = Debug|Any CPU
{2572437D-2AA9-A956-3EA7-2DD09105AFC1}.Debug|x86.Build.0 = Debug|Any CPU
{2572437D-2AA9-A956-3EA7-2DD09105AFC1}.Release|Any CPU.ActiveCfg = Release|Any CPU
{2572437D-2AA9-A956-3EA7-2DD09105AFC1}.Release|Any CPU.Build.0 = Release|Any CPU
{2572437D-2AA9-A956-3EA7-2DD09105AFC1}.Release|x64.ActiveCfg = Release|Any CPU
{2572437D-2AA9-A956-3EA7-2DD09105AFC1}.Release|x64.Build.0 = Release|Any CPU
{2572437D-2AA9-A956-3EA7-2DD09105AFC1}.Release|x86.ActiveCfg = Release|Any CPU
{2572437D-2AA9-A956-3EA7-2DD09105AFC1}.Release|x86.Build.0 = Release|Any CPU
{387A2480-D7FB-6F9D-6D93-F96970DAB46B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{387A2480-D7FB-6F9D-6D93-F96970DAB46B}.Debug|Any CPU.Build.0 = Debug|Any CPU
{387A2480-D7FB-6F9D-6D93-F96970DAB46B}.Debug|x64.ActiveCfg = Debug|Any CPU
{387A2480-D7FB-6F9D-6D93-F96970DAB46B}.Debug|x64.Build.0 = Debug|Any CPU
{387A2480-D7FB-6F9D-6D93-F96970DAB46B}.Debug|x86.ActiveCfg = Debug|Any CPU
{387A2480-D7FB-6F9D-6D93-F96970DAB46B}.Debug|x86.Build.0 = Debug|Any CPU
{387A2480-D7FB-6F9D-6D93-F96970DAB46B}.Release|Any CPU.ActiveCfg = Release|Any CPU
{387A2480-D7FB-6F9D-6D93-F96970DAB46B}.Release|Any CPU.Build.0 = Release|Any CPU
{387A2480-D7FB-6F9D-6D93-F96970DAB46B}.Release|x64.ActiveCfg = Release|Any CPU
{387A2480-D7FB-6F9D-6D93-F96970DAB46B}.Release|x64.Build.0 = Release|Any CPU
{387A2480-D7FB-6F9D-6D93-F96970DAB46B}.Release|x86.ActiveCfg = Release|Any CPU
{387A2480-D7FB-6F9D-6D93-F96970DAB46B}.Release|x86.Build.0 = Release|Any CPU
{FEE40D33-2AB0-2891-706F-4BE662BD2CF4}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{FEE40D33-2AB0-2891-706F-4BE662BD2CF4}.Debug|Any CPU.Build.0 = Debug|Any CPU
{FEE40D33-2AB0-2891-706F-4BE662BD2CF4}.Debug|x64.ActiveCfg = Debug|Any CPU
{FEE40D33-2AB0-2891-706F-4BE662BD2CF4}.Debug|x64.Build.0 = Debug|Any CPU
{FEE40D33-2AB0-2891-706F-4BE662BD2CF4}.Debug|x86.ActiveCfg = Debug|Any CPU
{FEE40D33-2AB0-2891-706F-4BE662BD2CF4}.Debug|x86.Build.0 = Debug|Any CPU
{FEE40D33-2AB0-2891-706F-4BE662BD2CF4}.Release|Any CPU.ActiveCfg = Release|Any CPU
{FEE40D33-2AB0-2891-706F-4BE662BD2CF4}.Release|Any CPU.Build.0 = Release|Any CPU
{FEE40D33-2AB0-2891-706F-4BE662BD2CF4}.Release|x64.ActiveCfg = Release|Any CPU
{FEE40D33-2AB0-2891-706F-4BE662BD2CF4}.Release|x64.Build.0 = Release|Any CPU
{FEE40D33-2AB0-2891-706F-4BE662BD2CF4}.Release|x86.ActiveCfg = Release|Any CPU
{FEE40D33-2AB0-2891-706F-4BE662BD2CF4}.Release|x86.Build.0 = Release|Any CPU
{4E4CAE4A-E577-174F-9671-EBB759F44E77}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{4E4CAE4A-E577-174F-9671-EBB759F44E77}.Debug|Any CPU.Build.0 = Debug|Any CPU
{4E4CAE4A-E577-174F-9671-EBB759F44E77}.Debug|x64.ActiveCfg = Debug|Any CPU
{4E4CAE4A-E577-174F-9671-EBB759F44E77}.Debug|x64.Build.0 = Debug|Any CPU
{4E4CAE4A-E577-174F-9671-EBB759F44E77}.Debug|x86.ActiveCfg = Debug|Any CPU
{4E4CAE4A-E577-174F-9671-EBB759F44E77}.Debug|x86.Build.0 = Debug|Any CPU
{4E4CAE4A-E577-174F-9671-EBB759F44E77}.Release|Any CPU.ActiveCfg = Release|Any CPU
{4E4CAE4A-E577-174F-9671-EBB759F44E77}.Release|Any CPU.Build.0 = Release|Any CPU
{4E4CAE4A-E577-174F-9671-EBB759F44E77}.Release|x64.ActiveCfg = Release|Any CPU
{4E4CAE4A-E577-174F-9671-EBB759F44E77}.Release|x64.Build.0 = Release|Any CPU
{4E4CAE4A-E577-174F-9671-EBB759F44E77}.Release|x86.ActiveCfg = Release|Any CPU
{4E4CAE4A-E577-174F-9671-EBB759F44E77}.Release|x86.Build.0 = Release|Any CPU
{29B145E2-F37C-A614-F834-7F1F484ED142}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{29B145E2-F37C-A614-F834-7F1F484ED142}.Debug|Any CPU.Build.0 = Debug|Any CPU
{29B145E2-F37C-A614-F834-7F1F484ED142}.Debug|x64.ActiveCfg = Debug|Any CPU
{29B145E2-F37C-A614-F834-7F1F484ED142}.Debug|x64.Build.0 = Debug|Any CPU
{29B145E2-F37C-A614-F834-7F1F484ED142}.Debug|x86.ActiveCfg = Debug|Any CPU
{29B145E2-F37C-A614-F834-7F1F484ED142}.Debug|x86.Build.0 = Debug|Any CPU
{29B145E2-F37C-A614-F834-7F1F484ED142}.Release|Any CPU.ActiveCfg = Release|Any CPU
{29B145E2-F37C-A614-F834-7F1F484ED142}.Release|Any CPU.Build.0 = Release|Any CPU
{29B145E2-F37C-A614-F834-7F1F484ED142}.Release|x64.ActiveCfg = Release|Any CPU
{29B145E2-F37C-A614-F834-7F1F484ED142}.Release|x64.Build.0 = Release|Any CPU
{29B145E2-F37C-A614-F834-7F1F484ED142}.Release|x86.ActiveCfg = Release|Any CPU
{29B145E2-F37C-A614-F834-7F1F484ED142}.Release|x86.Build.0 = Release|Any CPU
{3DD8C0FB-7500-2F44-8C5B-A6DAF54C27F0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{3DD8C0FB-7500-2F44-8C5B-A6DAF54C27F0}.Debug|Any CPU.Build.0 = Debug|Any CPU
{3DD8C0FB-7500-2F44-8C5B-A6DAF54C27F0}.Debug|x64.ActiveCfg = Debug|Any CPU
{3DD8C0FB-7500-2F44-8C5B-A6DAF54C27F0}.Debug|x64.Build.0 = Debug|Any CPU
{3DD8C0FB-7500-2F44-8C5B-A6DAF54C27F0}.Debug|x86.ActiveCfg = Debug|Any CPU
{3DD8C0FB-7500-2F44-8C5B-A6DAF54C27F0}.Debug|x86.Build.0 = Debug|Any CPU
{3DD8C0FB-7500-2F44-8C5B-A6DAF54C27F0}.Release|Any CPU.ActiveCfg = Release|Any CPU
{3DD8C0FB-7500-2F44-8C5B-A6DAF54C27F0}.Release|Any CPU.Build.0 = Release|Any CPU
{3DD8C0FB-7500-2F44-8C5B-A6DAF54C27F0}.Release|x64.ActiveCfg = Release|Any CPU
{3DD8C0FB-7500-2F44-8C5B-A6DAF54C27F0}.Release|x64.Build.0 = Release|Any CPU
{3DD8C0FB-7500-2F44-8C5B-A6DAF54C27F0}.Release|x86.ActiveCfg = Release|Any CPU
{3DD8C0FB-7500-2F44-8C5B-A6DAF54C27F0}.Release|x86.Build.0 = Release|Any CPU
{E2C3643E-C60F-4BB8-A7EA-12CB038346FB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{E2C3643E-C60F-4BB8-A7EA-12CB038346FB}.Debug|Any CPU.Build.0 = Debug|Any CPU
{E2C3643E-C60F-4BB8-A7EA-12CB038346FB}.Debug|x64.ActiveCfg = Debug|Any CPU
{E2C3643E-C60F-4BB8-A7EA-12CB038346FB}.Debug|x64.Build.0 = Debug|Any CPU
{E2C3643E-C60F-4BB8-A7EA-12CB038346FB}.Debug|x86.ActiveCfg = Debug|Any CPU
{E2C3643E-C60F-4BB8-A7EA-12CB038346FB}.Debug|x86.Build.0 = Debug|Any CPU
{E2C3643E-C60F-4BB8-A7EA-12CB038346FB}.Release|Any CPU.ActiveCfg = Release|Any CPU
{E2C3643E-C60F-4BB8-A7EA-12CB038346FB}.Release|Any CPU.Build.0 = Release|Any CPU
{E2C3643E-C60F-4BB8-A7EA-12CB038346FB}.Release|x64.ActiveCfg = Release|Any CPU
{E2C3643E-C60F-4BB8-A7EA-12CB038346FB}.Release|x64.Build.0 = Release|Any CPU
{E2C3643E-C60F-4BB8-A7EA-12CB038346FB}.Release|x86.ActiveCfg = Release|Any CPU
{E2C3643E-C60F-4BB8-A7EA-12CB038346FB}.Release|x86.Build.0 = Release|Any CPU
{2804361B-83DD-DD87-ED76-3DAF19778DC5}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{2804361B-83DD-DD87-ED76-3DAF19778DC5}.Debug|Any CPU.Build.0 = Debug|Any CPU
{2804361B-83DD-DD87-ED76-3DAF19778DC5}.Debug|x64.ActiveCfg = Debug|Any CPU
{2804361B-83DD-DD87-ED76-3DAF19778DC5}.Debug|x64.Build.0 = Debug|Any CPU
{2804361B-83DD-DD87-ED76-3DAF19778DC5}.Debug|x86.ActiveCfg = Debug|Any CPU
{2804361B-83DD-DD87-ED76-3DAF19778DC5}.Debug|x86.Build.0 = Debug|Any CPU
{2804361B-83DD-DD87-ED76-3DAF19778DC5}.Release|Any CPU.ActiveCfg = Release|Any CPU
{2804361B-83DD-DD87-ED76-3DAF19778DC5}.Release|Any CPU.Build.0 = Release|Any CPU
{2804361B-83DD-DD87-ED76-3DAF19778DC5}.Release|x64.ActiveCfg = Release|Any CPU
{2804361B-83DD-DD87-ED76-3DAF19778DC5}.Release|x64.Build.0 = Release|Any CPU
{2804361B-83DD-DD87-ED76-3DAF19778DC5}.Release|x86.ActiveCfg = Release|Any CPU
{2804361B-83DD-DD87-ED76-3DAF19778DC5}.Release|x86.Build.0 = Release|Any CPU
{225906DB-8525-9CF4-EE0D-1996AF58A7AE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{225906DB-8525-9CF4-EE0D-1996AF58A7AE}.Debug|Any CPU.Build.0 = Debug|Any CPU
{225906DB-8525-9CF4-EE0D-1996AF58A7AE}.Debug|x64.ActiveCfg = Debug|Any CPU
{225906DB-8525-9CF4-EE0D-1996AF58A7AE}.Debug|x64.Build.0 = Debug|Any CPU
{225906DB-8525-9CF4-EE0D-1996AF58A7AE}.Debug|x86.ActiveCfg = Debug|Any CPU
{225906DB-8525-9CF4-EE0D-1996AF58A7AE}.Debug|x86.Build.0 = Debug|Any CPU
{225906DB-8525-9CF4-EE0D-1996AF58A7AE}.Release|Any CPU.ActiveCfg = Release|Any CPU
{225906DB-8525-9CF4-EE0D-1996AF58A7AE}.Release|Any CPU.Build.0 = Release|Any CPU
{225906DB-8525-9CF4-EE0D-1996AF58A7AE}.Release|x64.ActiveCfg = Release|Any CPU
{225906DB-8525-9CF4-EE0D-1996AF58A7AE}.Release|x64.Build.0 = Release|Any CPU
{225906DB-8525-9CF4-EE0D-1996AF58A7AE}.Release|x86.ActiveCfg = Release|Any CPU
{225906DB-8525-9CF4-EE0D-1996AF58A7AE}.Release|x86.Build.0 = Release|Any CPU
{E3905D64-D056-4EF3-B4C9-98A4EEB7E71A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{E3905D64-D056-4EF3-B4C9-98A4EEB7E71A}.Debug|Any CPU.Build.0 = Debug|Any CPU
{E3905D64-D056-4EF3-B4C9-98A4EEB7E71A}.Debug|x64.ActiveCfg = Debug|Any CPU
{E3905D64-D056-4EF3-B4C9-98A4EEB7E71A}.Debug|x64.Build.0 = Debug|Any CPU
{E3905D64-D056-4EF3-B4C9-98A4EEB7E71A}.Debug|x86.ActiveCfg = Debug|Any CPU
{E3905D64-D056-4EF3-B4C9-98A4EEB7E71A}.Debug|x86.Build.0 = Debug|Any CPU
{E3905D64-D056-4EF3-B4C9-98A4EEB7E71A}.Release|Any CPU.ActiveCfg = Release|Any CPU
{E3905D64-D056-4EF3-B4C9-98A4EEB7E71A}.Release|Any CPU.Build.0 = Release|Any CPU
{E3905D64-D056-4EF3-B4C9-98A4EEB7E71A}.Release|x64.ActiveCfg = Release|Any CPU
{E3905D64-D056-4EF3-B4C9-98A4EEB7E71A}.Release|x64.Build.0 = Release|Any CPU
{E3905D64-D056-4EF3-B4C9-98A4EEB7E71A}.Release|x86.ActiveCfg = Release|Any CPU
{E3905D64-D056-4EF3-B4C9-98A4EEB7E71A}.Release|x86.Build.0 = Release|Any CPU
{DBE3EF10-21FE-9F9B-E292-DD6D4E22192C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{DBE3EF10-21FE-9F9B-E292-DD6D4E22192C}.Debug|Any CPU.Build.0 = Debug|Any CPU
{DBE3EF10-21FE-9F9B-E292-DD6D4E22192C}.Debug|x64.ActiveCfg = Debug|Any CPU
{DBE3EF10-21FE-9F9B-E292-DD6D4E22192C}.Debug|x64.Build.0 = Debug|Any CPU
{DBE3EF10-21FE-9F9B-E292-DD6D4E22192C}.Debug|x86.ActiveCfg = Debug|Any CPU
{DBE3EF10-21FE-9F9B-E292-DD6D4E22192C}.Debug|x86.Build.0 = Debug|Any CPU
{DBE3EF10-21FE-9F9B-E292-DD6D4E22192C}.Release|Any CPU.ActiveCfg = Release|Any CPU
{DBE3EF10-21FE-9F9B-E292-DD6D4E22192C}.Release|Any CPU.Build.0 = Release|Any CPU
{DBE3EF10-21FE-9F9B-E292-DD6D4E22192C}.Release|x64.ActiveCfg = Release|Any CPU
{DBE3EF10-21FE-9F9B-E292-DD6D4E22192C}.Release|x64.Build.0 = Release|Any CPU
{DBE3EF10-21FE-9F9B-E292-DD6D4E22192C}.Release|x86.ActiveCfg = Release|Any CPU
{DBE3EF10-21FE-9F9B-E292-DD6D4E22192C}.Release|x86.Build.0 = Release|Any CPU
{20476940-0B2C-62FE-F772-7E8C77D24A9B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{20476940-0B2C-62FE-F772-7E8C77D24A9B}.Debug|Any CPU.Build.0 = Debug|Any CPU
{20476940-0B2C-62FE-F772-7E8C77D24A9B}.Debug|x64.ActiveCfg = Debug|Any CPU
{20476940-0B2C-62FE-F772-7E8C77D24A9B}.Debug|x64.Build.0 = Debug|Any CPU
{20476940-0B2C-62FE-F772-7E8C77D24A9B}.Debug|x86.ActiveCfg = Debug|Any CPU
{20476940-0B2C-62FE-F772-7E8C77D24A9B}.Debug|x86.Build.0 = Debug|Any CPU
{20476940-0B2C-62FE-F772-7E8C77D24A9B}.Release|Any CPU.ActiveCfg = Release|Any CPU
{20476940-0B2C-62FE-F772-7E8C77D24A9B}.Release|Any CPU.Build.0 = Release|Any CPU
{20476940-0B2C-62FE-F772-7E8C77D24A9B}.Release|x64.ActiveCfg = Release|Any CPU
{20476940-0B2C-62FE-F772-7E8C77D24A9B}.Release|x64.Build.0 = Release|Any CPU
{20476940-0B2C-62FE-F772-7E8C77D24A9B}.Release|x86.ActiveCfg = Release|Any CPU
{20476940-0B2C-62FE-F772-7E8C77D24A9B}.Release|x86.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(NestedProjects) = preSolution
{AB637A9A-1ED1-27BC-5FC7-84775EC61C9C} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{2C117C87-F749-88D4-F947-0C3165F99365} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{56D0F1F5-8658-A87B-3E10-1E6674B39943} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{1E2B3B33-C1C9-A86C-234D-8E3D2487381C} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{58186FA9-D464-8D16-9999-4E747B59C02C} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{A90C6420-7BAD-86FB-D4E9-62528940071F} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{3D860D17-A14E-25AE-81A0-DB0D0EBBEAD4} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{C0692A9A-9841-F95A-A07B-0C0AC6AA1322} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{163970E8-D955-4963-9B44-F3E576782FE6} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{5BC0A7B5-5CD7-572F-BBC0-01AA8C62CDE8} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{78370B69-97D0-AAB0-FBF4-97A4757563B6} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{22036806-8B3D-67C6-2CE7-8F4D7E192BB0} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{55832819-3500-D8BA-9EBB-E3E2AB15090B} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{FCBDFBDE-E76B-964D-24E8-9F01F69D1A00} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{B22FADB1-C377-F072-0419-E15D363A64AD} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{36E54ACD-38EF-8350-82B7-2DBF372C5239} = {D8B47378-81A7-4BE3-8B76-B48D01E4D704}
{0AB994AF-7DE0-B08D-6428-1EA9AEF3DE0B} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{893C26DF-A9F4-5896-C765-B680DA63D23C} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{2572437D-2AA9-A956-3EA7-2DD09105AFC1} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{387A2480-D7FB-6F9D-6D93-F96970DAB46B} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{FEE40D33-2AB0-2891-706F-4BE662BD2CF4} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{4E4CAE4A-E577-174F-9671-EBB759F44E77} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{29B145E2-F37C-A614-F834-7F1F484ED142} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{3DD8C0FB-7500-2F44-8C5B-A6DAF54C27F0} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{6517AF15-46A7-4D81-A060-20FD1785EDE6} = {D8B47378-81A7-4BE3-8B76-B48D01E4D704}
{E2C3643E-C60F-4BB8-A7EA-12CB038346FB} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{2804361B-83DD-DD87-ED76-3DAF19778DC5} = {6517AF15-46A7-4D81-A060-20FD1785EDE6}
{225906DB-8525-9CF4-EE0D-1996AF58A7AE} = {6517AF15-46A7-4D81-A060-20FD1785EDE6}
{E3905D64-D056-4EF3-B4C9-98A4EEB7E71A} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8}
{DBE3EF10-21FE-9F9B-E292-DD6D4E22192C} = {D8B47378-81A7-4BE3-8B76-B48D01E4D704}
{20476940-0B2C-62FE-F772-7E8C77D24A9B} = {D8B47378-81A7-4BE3-8B76-B48D01E4D704}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {F7F3E93C-1A9C-4268-867E-2179FA05A877}
EndGlobalSection
EndGlobal

View File

@@ -0,0 +1,52 @@
<Project Sdk="Microsoft.NET.Sdk.Web">
<PropertyGroup>
<TargetFramework>net9.0</TargetFramework>
<Version>1.0.0</Version>
<Nullable>enable</Nullable>
<OutputType>Exe</OutputType>
<EnableDefaultContentItems>false</EnableDefaultContentItems>
</PropertyGroup>
<ItemGroup>
<None Include="NuGet.config" />
<Folder Include="wwwroot\" />
<Content Include="appsettings.json">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</Content>
<Content Include="oidc-settings.json">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</Content>
<Content Include="wwwroot\login.html" CopyToOutputDirectory="PreserveNewest" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Caching.StackExchangeRedis" Version="9.0.5" />
<PackageReference Include="OpenIddict.Quartz" Version="6.3.0" />
<PackageReference Include="Quartz.Extensions.Hosting" Version="3.14.0" />
<PackageReference Include="OpenIddict.AspNetCore" Version="6.3.0" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="../../../__Libraries/Ablera.Serdica.Authorization/Ablera.Serdica.Authorization.csproj" />
<ProjectReference Include="../../../__Libraries/Ablera.Serdica.Common.Tools/Ablera.Serdica.Common.Tools.csproj" />
<ProjectReference Include="../../../__Libraries/Ablera.Serdica.DbConfig/Ablera.Serdica.DbConfig.csproj" />
<ProjectReference Include="../__Libraries/Ablera.Serdica.DBModels.Oidc.Migrations/Ablera.Serdica.DBModels.Oidc.Migrations.csproj" />
<ProjectReference Include="../__Libraries/Ablera.Serdica.DBModels.Oidc/Ablera.Serdica.DBModels.Oidc.csproj" />
<ProjectReference Include="../../../__Libraries/Ablera.Serdica.DBModels.Serdica/Ablera.Serdica.DBModels.Serdica.csproj" />
<ProjectReference Include="../../../__Libraries/Ablera.Serdica.Extensions.Redis/Ablera.Serdica.Extensions.Redis.csproj" />
<ProjectReference Include="../../../__Libraries/Ablera.Serdica.Extensions.Serilog/Ablera.Serdica.Extensions.Serilog.csproj" />
<ProjectReference Include="../../../__Libraries/Ablera.Serdica.HealthChecks/Ablera.Serdica.HealthChecks.csproj" />
<ProjectReference Include="../../../__Libraries/Ablera.Serdica.Microservice.Consumer/Ablera.Serdica.Microservice.Consumer.csproj" />
<ProjectReference Include="../../../__Libraries/Ablera.Serdica.Microservice.Initializer.EndpointsRegistration/Ablera.Serdica.Microservice.Initializer.EndpointsRegistration.csproj" />
<ProjectReference Include="../../../__Libraries/Ablera.Serdica.Plugin/Ablera.Serdica.Plugin.csproj" />
<ProjectReference Include="../../../__Libraries/Ablera.Serdica.Common.Services.FromEntityFramework/Ablera.Serdica.Common.Services.FromEntityFramework.csproj" />
<ProjectReference Include="../../../__Libraries/Ablera.Serdica.TranslationProvider/Ablera.Serdica.TranslationProvider.csproj" />
<ProjectReference Include="../../../__Libraries/Ablera.Serdica.UserConfiguration.Builder/Ablera.Serdica.UserConfiguration.Builder.csproj" />
<ProjectReference Include="../../../__Libraries/Ablera.Serdica.UserConfiguration.Redis/Ablera.Serdica.UserConfiguration.Redis.csproj" />
<ProjectReference Include="..\__Plugins\Ablera.Serdica.Authority.Plugins.Base\Ablera.Serdica.Authority.Plugins.Base.csproj" />
</ItemGroup>
</Project>

View File

@@ -0,0 +1,19 @@
using NJsonSchema.Annotations;
using System;
using System.Collections.Generic;
using System.Data;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Ablera.Serdica.Authority.Constants;
public class ConstantsClass
{
public const string ConnectionNameDefault = "DefaultConnection";
public const string AuthenticationDelegateUrlKey = "authenticationDelegateUrl";
public const string SignOutUrlKey = "signOutUrl";
public const string YesKey = "Y";
public const string NoKey = "N";
}

View File

@@ -0,0 +1,6 @@
namespace Ablera.Serdica.Authority.Constants;
public static class MessageKeys
{
public static string FailedToChangePassword = nameof(FailedToChangePassword);
}

View File

@@ -0,0 +1,8 @@
using Ablera.Serdica.Authority.Plugins.Base.Contracts;
namespace Ablera.Serdica.Authority.Contracts;
public interface IUserManagingDirector<TUser>
: IUserManagementFacade<TUser>
where TUser : class
{
}

View File

@@ -0,0 +1,130 @@
using Ablera.Serdica.Microservice.Consumer.Attributes;
using Ablera.Serdica.Microservice.Consumer.Contracts.Asynchronous;
using Ablera.Serdica.Common.Tools.Exceptions;
using Ablera.Serdica.Authority.Services;
using System.Threading.Tasks;
using Ablera.Serdica.Authentication.Constants;
using Ablera.Serdica.Authentication.Models;
using System;
using System.ComponentModel.DataAnnotations;
using Ablera.Serdica.DBModels.Serdica;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using System.Linq;
using Microsoft.EntityFrameworkCore;
using Ablera.Serdica.Common.Services.Contracts;
using Ablera.Serdica.UserConfiguration.Models;
using Ablera.Serdica.UserConfiguration.Contracts;
using Ablera.Serdica.Extensions.RabbitMQ.Contracts;
namespace Ablera.Serdica.NotificationService.Endpoints;
public class UpdateUsersConfigurationRequest
{
public string? UserGuid { get; set; }
public string? Language { get; set; }
public string? Country { get; set; }
public long? AutoLogoutMinutes { get; set; }
public string? BranchCode { get; set; }
}
[Command("users_update_user_configuration", timeoutInSeconds: 10, allowedRoles: [SerdicaClaims.IsAuthenticated])]
public class UpdateUserConfigurationEndpoint(
SerdicaDbContext dbContext,
IEnvironment environment,
IEndpointRequestMessageProvider requestMessageProvider,
IUserConfigurationRepository<UserConfigurationModel> userConfigurationRepository,
ILogger<UpdateUserConfigurationEndpoint> logger)
: IEndpointWithRequest<UpdateUsersConfigurationRequest, UserConfigurationModel?>
{
public async Task<UserConfigurationModel?> ConsumeAsync(UpdateUsersConfigurationRequest request)
{
if (request.UserGuid != null && request.UserGuid != requestMessageProvider.RequestMessage.UserId)
{
var isLoggedInUserSuperUser = await dbContext.UserAccounts
.Where(x => x.UserGuid == request.UserGuid)
.SelectMany(x => x.UserRole1s)
.Select(x => x.Id)
.AnyAsync(x => x == SerdicaClaims.RoleSuperUser);
if (request.UserGuid != requestMessageProvider.RequestMessage.UserId && isLoggedInUserSuperUser != true)
{
throw new BaseResultException("not_authorized".AsCode(), "You are not authorized to change configuration for this user!");
}
}
request.UserGuid ??= requestMessageProvider.RequestMessage.UserId;
logger.LogInformation("Attempting to update user profile for: '{userGuid}'.", request.UserGuid);
using var tx = dbContext.Database.BeginTransaction();
UserAccount? userAccount = null;
try
{
userAccount = await dbContext.UserAccounts
.Where(u => u.UserGuid == request.UserGuid)
.FirstOrDefaultAsync();
if (userAccount == null)
{
const string errorMsg =
"User identifier from a token does not match a user in the database: '{userGuid}'.";
logger.LogError(errorMsg, request.UserGuid);
throw new Exception(errorMsg.Replace("{userGuid}", request.UserGuid));
}
if (request.Language != null)
{
userAccount.Language = request.Language;
}
if (request.Country != null)
{
userAccount.Country = request.Country;
}
if (request.AutoLogoutMinutes.HasValue)
{
userAccount.AutoLogoutMinutes = request.AutoLogoutMinutes switch
{
null => environment.DefaultAutoLogoutInMinutes, // Use default when not provided
var minutes when minutes > environment.MaximumAutoLogoutInMinutes => throw new BaseResultException(
$"The auto logout minutes value cannot be greater than the maximum allowed: {environment.MaximumAutoLogoutInMinutes}"),
var minutes when minutes < environment.MinimumAutoLogoutInMinutes => throw new BaseResultException(
$"The auto logout minutes value cannot be less than the minimum allowed: {environment.MinimumAutoLogoutInMinutes}"),
var minutes => minutes // Otherwise, use the provided value
};
}
if (request.BranchCode != null)
{
await dbContext.UserAccounts
.Where(ua => ua.UserAccountId == userAccount.UserAccountId)
.ExecuteUpdateAsync(ua =>
ua.SetProperty(
prop => prop.CurrentBranch,
dbContext.IcUsers
.Include(icUser => icUser.IcBranch)
.Where(icUser => icUser.UserAccountId == userAccount.UserAccountId
&& icUser.IcBranch.BranchCode == request.BranchCode)
.Select(icUserId => icUserId.IcUserId)
.FirstOrDefault()
)
);
}
await dbContext.SaveChangesAsync();
await tx.CommitAsync();
logger.LogInformation("Successfully updated settings for user: '{username}'.", userAccount.UserName);
}
catch (Exception ex)
{
await tx.RollbackAsync();
logger.LogError(
ex,
"Failed to update user profile for: '{userGuid}'.",
request.UserGuid);
throw;
}
// Rebuild the configuration to include the updates roles
var userConfiguration = await userConfigurationRepository.RetrieveAsync(userAccount.UserGuid, true);
return userConfiguration;
}
}

View File

@@ -0,0 +1,42 @@
using Ablera.Serdica.Microservice.Consumer.Attributes;
using Ablera.Serdica.Microservice.Consumer.Contracts.Asynchronous;
using Ablera.Serdica.Common.Tools.Exceptions;
using Ablera.Serdica.Authority.Services;
using System.Threading.Tasks;
using Ablera.Serdica.Authentication.Constants;
using Ablera.Serdica.DBModels.Serdica;
using System.Linq;
using Ablera.Serdica.Authority.Models;
using Ablera.Serdica.Extensions.RabbitMQ.Contracts;
using Microsoft.EntityFrameworkCore;
using System;
using Ablera.Serdica.Microservice.Consumer.Contracts;
using static Ablera.Serdica.Authority.Constants.ConstantsClass;
namespace Ablera.Serdica.NotificationService.Endpoints;
public record UserSetEnabledLoginRequest
{
public required string UserGuid { get; init; }
public required bool LoginEnabled { get; init; }
}
public record UserSetEnabledLoginResponse
{
public required bool Updated { get; init; }
}
[Command("users_update_user_login_enabled", timeoutInSeconds: 10, methodName: "POST", allowedRoles: [SerdicaClaims.RoleSuperUser])]
public class UpdateUserLoginEnabledEndpoint(
SerdicaDbContext dbContext)
: Microservice.Consumer.Contracts.Asynchronous.IEndpointWithRequest<UserSetEnabledLoginRequest, UserSetEnabledLoginResponse>
{
public async Task<UserSetEnabledLoginResponse> ConsumeAsync(UserSetEnabledLoginRequest request)
{
var updated = await dbContext.UserAccounts
.Where(x => x.UserGuid == request.UserGuid)
.ExecuteUpdateAsync(x => x.SetProperty(y => y.LockAccount, request.LoginEnabled ? NoKey : YesKey));
return new UserSetEnabledLoginResponse { Updated = updated > 0 };
}
}

View File

@@ -0,0 +1,69 @@
using Ablera.Serdica.Microservice.Consumer.Attributes;
using Ablera.Serdica.Microservice.Consumer.Contracts.Asynchronous;
using Ablera.Serdica.Common.Tools.Exceptions;
using Ablera.Serdica.Authority.Services;
using System.Threading.Tasks;
using Ablera.Serdica.Authentication.Constants;
using Ablera.Serdica.DBModels.Serdica;
using System.Linq;
using Ablera.Serdica.Authority.Models;
using Ablera.Serdica.Extensions.RabbitMQ.Contracts;
using Microsoft.EntityFrameworkCore;
using System;
using Ablera.Serdica.Microservice.Consumer.Contracts;
using static Ablera.Serdica.Authority.Constants.ConstantsClass;
using Ablera.Serdica.Authority.Contracts;
using Microsoft.AspNetCore.Identity;
using Ablera.Serdica.Authority.Constants;
namespace Ablera.Serdica.NotificationService.Endpoints;
public record UserChangePasswordRequest
{
public required string? UserGuid { get; set; }
public required string Password { get; init; }
public required string ConfirmedPassword { get; init; }
}
public record UserChangePasswordResponse
{
public required bool Succeeded { get; init; }
}
[Command("users_update_user_password", timeoutInSeconds: 10, methodName: "POST", allowedRoles: [SerdicaClaims.IsAuthenticated])]
public class UpdateUserPasswordEndpoint(
IEndpointRequestMessageProvider requestMessageProvider,
IUserManagingDirector<IdentityUser<string>> users,
SerdicaDbContext dbContext)
: Microservice.Consumer.Contracts.Asynchronous.IEndpointWithRequest<UserChangePasswordRequest, UserChangePasswordResponse>
{
public async Task<UserChangePasswordResponse> ConsumeAsync(UserChangePasswordRequest request)
{
if (request.UserGuid != null && request.UserGuid != requestMessageProvider.RequestMessage.UserId)
{
var isLoggedInUserSuperUser = await dbContext.UserAccounts
.Where(x => x.UserGuid == request.UserGuid)
.SelectMany(x => x.UserRole1s)
.Select(x => x.Id)
.AnyAsync(x => x == SerdicaClaims.RoleSuperUser);
if (request.UserGuid != requestMessageProvider.RequestMessage.UserId && isLoggedInUserSuperUser != true)
{
throw new BaseResultException("not_authorized".AsCode(), "You are not authorized to change configuration for this user!");
}
}
request.UserGuid ??= requestMessageProvider.RequestMessage.UserId;
var identityUser = await users.FindByIdAsync(request.UserGuid)
?? throw new BaseResultException("account_not_found".AsCode(), "Account associated with the session not found!");
var result = await users.ChangePasswordAsync(identityUser, request.Password, request.ConfirmedPassword);
if (result.Succeeded == false)
{
throw new BaseResultException(
(result.ErrorCode ?? "change_password_failed").AsCode(),
MessageKeys.FailedToChangePassword);
}
return new UserChangePasswordResponse { Succeeded = true };
}
}

View File

@@ -0,0 +1,80 @@
using Ablera.Serdica.Microservice.Consumer.Attributes;
using Ablera.Serdica.Microservice.Consumer.Contracts.Asynchronous;
using Ablera.Serdica.Common.Tools.Exceptions;
using Ablera.Serdica.Authority.Services;
using System.Threading.Tasks;
using Ablera.Serdica.Authentication.Constants;
using Ablera.Serdica.Authentication.Models;
using System;
using System.ComponentModel.DataAnnotations;
using Ablera.Serdica.DBModels.Serdica;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using System.Linq;
using Microsoft.EntityFrameworkCore;
using Ablera.Serdica.UserConfiguration.Models;
using Ablera.Serdica.Common.Services.Contracts;
using Ablera.Serdica.UserConfiguration.Contracts;
using Ablera.Serdica.Extensions.RabbitMQ.Contracts;
namespace Ablera.Serdica.NotificationService.Endpoints;
public record UpdateUserRoles
{
public required string UserGuid { get; set; }
public required string[] Roles { get; init; }
}
[Command("users_update_user_roles", timeoutInSeconds: 10, allowedRoles: [SerdicaClaims.RoleSuperUser])]
public class UpdateUserRolesEndpoint(
SerdicaDbContext dbContext,
ILogger<UpdateUserConfigurationEndpoint> logger,
IUserConfigurationRepository<UserConfigurationModel> userConfigurationRepository)
: IEndpointWithRequest<UpdateUserRoles, UserConfigurationModel?>
{
public async Task<UserConfigurationModel?> ConsumeAsync(UpdateUserRoles request)
{
using var tx = dbContext.Database.BeginTransaction();
try
{
// we are using raw sql because the entityframework generator fails to create entity of table contained only by two columns that are FKs
var userAccountId = await dbContext.UserAccounts
.Where(u => u.UserGuid == request.UserGuid)
.Select(u => u.UserAccountId)
.FirstOrDefaultAsync();
// delete old roles
await dbContext.Database.ExecuteSqlRawAsync(
"DELETE FROM SRD_SYS.USER_ROLES WHERE USER_ACCOUNT_ID = {0} ", userAccountId);
// insert new roles
foreach (var role in request.Roles)
{
await dbContext.Database.ExecuteSqlRawAsync(
"INSERT INTO SRD_SYS.USER_ROLES (USER_ACCOUNT_ID, USER_ROLE) ({0}, {1})", userAccountId, role);
}
await dbContext.SaveChangesAsync();
await tx.CommitAsync();
}
catch (Exception ex)
{
await tx.RollbackAsync();
logger.LogError(
ex,
"Failed to update user roles for: '{userGuid}'.",
request.UserGuid);
throw;
}
logger.LogInformation("Successfully update roles for user with identifier: '{identifier}'.", request.UserGuid);
// Rebuild the configuration to include the updates roles
var userConfiguration = await userConfigurationRepository.RetrieveAsync(request.UserGuid, true);
return userConfiguration;
}
}

View File

@@ -0,0 +1,62 @@
using Ablera.Serdica.Microservice.Consumer.Attributes;
using Ablera.Serdica.Microservice.Consumer.Contracts.Asynchronous;
using Ablera.Serdica.Authority.Services;
using System.Threading.Tasks;
using Ablera.Serdica.DBModels.Serdica;
using Microsoft.EntityFrameworkCore;
using System.Linq;
using Ablera.Serdica.Authentication.Constants;
using Ablera.Serdica.Extensions.RabbitMQ.Listeners;
using Ablera.Serdica.Extensions.RabbitMQ.Contracts;
namespace Ablera.Serdica.NotificationService.Endpoints;
public record UserBranchesResponse
{
public long Id { get; init; }
public long BranchId { get; init; }
public required string BranchCode { get; init; }
public required string BranchName { get; init; }
public long? ReportTo { get; init; }
public string? AgentCode { get; init; }
}
[Command("users_get_user_branches", timeoutInSeconds: 10, methodName: "GET", allowedRoles: [SerdicaClaims.IsAuthenticated])]
public class UserBranchesEndpoint(
IEndpointRequestMessageProvider requestMessageProvider,
SerdicaDbContext dbContext)
: IEndpointWithNoRequest<UserBranchesResponse[]>
{
public async Task<UserBranchesResponse[]> ConsumeAsync()
{
var items = await dbContext
.IcUsers
.Include(x => x.IcBranch)
.Include(x => x.UserAccount)
.Where(x => x.UserAccount.UserGuid == requestMessageProvider.RequestMessage.UserId)
.ToListAsync();
var srCustIds = items.Where(x => x.IcBranch != null)
.Select(x => x.IcBranch.SrCustId)
.ToArray();
var branchNames = await dbContext
.CCusts
.Include(x => x.CCompany)
.Where(x => srCustIds.Contains(x.SrCustId))
.Where(x => x.CCompany != null)
.ToDictionaryAsync(x => x.SrCustId, x => x.CCompany.CompName);
var dtos = items.Select(x => new UserBranchesResponse
{
AgentCode = x.AgentCode,
BranchId = x.IcBranchId.HasValue == false ? 0 : (long)x.IcBranchId,
ReportTo = (long?)x.ReportTo,
BranchCode = x.IcBranch.BranchCode,
BranchName = branchNames.ContainsKey(x.IcBranch.SrCustId ?? 0)
? branchNames[x.IcBranch.SrCustId ?? 0]
: x.IcBranch.BranchCode,
Id = (long)x.IcUserId,
});
return dtos.ToArray();
}
}

View File

@@ -0,0 +1,58 @@
using Ablera.Serdica.Microservice.Consumer.Attributes;
using Ablera.Serdica.Microservice.Consumer.Contracts.Asynchronous;
using Ablera.Serdica.Common.Tools.Exceptions;
using Ablera.Serdica.Authority.Services;
using System.Threading.Tasks;
using Ablera.Serdica.Authentication.Constants;
using Ablera.Serdica.DBModels.Serdica;
using System.Linq;
using Ablera.Serdica.Authority.Models;
using Ablera.Serdica.Extensions.RabbitMQ.Contracts;
using Ablera.Serdica.UserConfiguration.Models;
using System;
using Ablera.Serdica.Authority.Plugins.Base.Contracts;
using Ablera.Serdica.UserConfiguration.Contracts;
using static Ablera.Serdica.Authority.Constants.ConstantsClass;
namespace Ablera.Serdica.NotificationService.Endpoints;
public record UserConfigurationResponse
{
public required UserConfigurationModel Configuration { get; init; }
public required SerdicaRoute[] Routes { get; init; }
}
[Command("users_get_user_configuration", timeoutInSeconds: 10, methodName: "GET", allowedRoles: [SerdicaClaims.IsAuthenticated])]
public class UserConfigurationEndpoint(
IEndpointRequestMessageProvider requestMessageProvider,
SerdicaDbContext dbContext,
RoutesTreeProvider routesProvider,
IUserConfigurationRepository<UserConfigurationModel> repository)
: IEndpointWithNoRequest<UserConfigurationResponse?>
{
public async Task<UserConfigurationResponse?> ConsumeAsync()
{
var userAccount = dbContext.UserAccounts.Where(x => x.UserGuid == requestMessageProvider.RequestMessage.UserId)?.FirstOrDefault()
?? throw new BaseResultException("account_not_found".AsCode(), "Account associated with the session not found!");
if (userAccount.LockAccount == YesKey)
{
throw new BaseResultException("account_locked".AsCode(), "Your account is locked. Please contact support.");
}
var userConfiguration = await repository.RetrieveAsync(userAccount.UserGuid);
// Recursively filter the snapshot based on user roles and map to final DTO.
var filteredRoutes = (routesProvider.Tree ?? [])
.Select(route => route.FilterAndMapRoute(userConfiguration.Roles))
.Where(r => r != null)
.Cast<SerdicaRoute>()
.ToList();
return new UserConfigurationResponse
{
Configuration = userConfiguration,
Routes = filteredRoutes?.ToArray() ?? []
};
}
}

View File

@@ -0,0 +1,42 @@
using Ablera.Serdica.Microservice.Consumer.Attributes;
using Ablera.Serdica.Microservice.Consumer.Contracts.Asynchronous;
using Ablera.Serdica.Common.Tools.Exceptions;
using Ablera.Serdica.Authority.Services;
using System.Threading.Tasks;
using Ablera.Serdica.Authentication.Constants;
using Ablera.Serdica.DBModels.Serdica;
using System.Linq;
using Ablera.Serdica.Authority.Models;
using Ablera.Serdica.Extensions.RabbitMQ.Contracts;
using Microsoft.EntityFrameworkCore;
using System;
using Ablera.Serdica.Microservice.Consumer.Contracts;
using static Ablera.Serdica.Authority.Constants.ConstantsClass;
namespace Ablera.Serdica.NotificationService.Endpoints;
public record UserGetLoginEnabledRequest
{
public required string UserGuid { get; init; }
}
public record UserGetLoginEnabledResponse
{
public required bool LoginEnabled { get; init; }
}
[Command("users_user_login_enabled", timeoutInSeconds: 10, methodName: "POST", allowedRoles: [SerdicaClaims.RoleSuperUser])]
public class UserLoginEnabledEndpoint(
SerdicaDbContext dbContext)
: Microservice.Consumer.Contracts.Asynchronous.IEndpointWithRequest<UserGetLoginEnabledRequest, UserGetLoginEnabledResponse>
{
public async Task<UserGetLoginEnabledResponse> ConsumeAsync(UserGetLoginEnabledRequest request)
{
var userAccount = await dbContext.UserAccounts
.Where(x => x.UserGuid == request.UserGuid)
.FirstOrDefaultAsync()
?? throw new BaseResultException("account_not_found".AsCode(), "Account associated with the session not found!");
return new UserGetLoginEnabledResponse { LoginEnabled = userAccount.LockAccount != YesKey };
}
}

View File

@@ -0,0 +1,38 @@
using Ablera.Serdica.Microservice.Consumer.Attributes;
using Ablera.Serdica.Microservice.Consumer.Contracts.Asynchronous;
using Ablera.Serdica.Common.Tools.Exceptions;
using Ablera.Serdica.Authority.Services;
using System.Threading.Tasks;
using Ablera.Serdica.Authentication.Constants;
using Ablera.Serdica.DBModels.Serdica;
using System.Linq;
using Ablera.Serdica.Authority.Models;
using Ablera.Serdica.Extensions.RabbitMQ.Contracts;
using Microsoft.EntityFrameworkCore;
namespace Ablera.Serdica.NotificationService.Endpoints;
[Command("users_get_user_roles", timeoutInSeconds: 10, methodName: "GET", allowedRoles: [SerdicaClaims.IsAuthenticated])]
public class UserRolesEndpoint(
IEndpointRequestMessageProvider requestMessageProvider,
SerdicaDbContext dbContext)
: IEndpointWithNoRequest<string[]>
{
public async Task<string[]> ConsumeAsync()
{
var userAccount = dbContext.UserAccounts.Where(x => x.UserGuid == requestMessageProvider.RequestMessage.UserId)?.FirstOrDefault()
?? throw new BaseResultException("account_not_found".AsCode(), "Account associated with the session not found!");
var userRoles = await dbContext.UserAccounts
.Where(x => x.UserAccountId == userAccount.UserAccountId)
.SelectMany(x => x.UserRole1s)
.Select(x => x.Id)
.ToArrayAsync();
if (!userRoles.Any())
{
throw new BaseResultException("account_has_no_roles".AsCode(), "Account has not roles set!");
}
return userRoles;
}
}

View File

@@ -0,0 +1,58 @@
using System;
using System.Collections.Generic;
using System.ComponentModel.DataAnnotations.Schema;
using System.Linq;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Builder;
using Microsoft.EntityFrameworkCore;
using Microsoft.Extensions.Options;
using Ablera.Serdica.Microservice.Consumer.Attributes;
using Ablera.Serdica.Microservice.Consumer.Contracts.Asynchronous;
using Ablera.Serdica.DBModels.Serdica;
using Ablera.Serdica.Common.Tools.Exceptions;
using Ablera.Serdica.Authorization.Models;
using Ablera.Serdica.LocalCacheProvider.Contracts;
using Ablera.Serdica.Authority.Services;
using Ablera.Serdica.Authority.Models;
using Ablera.Serdica.Authentication.Constants;
using Ablera.Serdica.Extensions.RabbitMQ.Contracts;
using Polly;
using System.Configuration;
namespace Ablera.Serdica.NotificationService.Endpoints;
[Command("users_get_user_views", timeoutInSeconds: 5, methodName: "GET", allowedRoles: [SerdicaClaims.IsAuthenticated])]
public class UserViewsEndpoint(
IEndpointRequestMessageProvider requestMessageProvider,
SerdicaDbContext dbContext,
RoutesTreeProvider routesProvider)
: IEndpointWithNoRequest<SerdicaRoute[]>
{
public async Task<SerdicaRoute[]> ConsumeAsync()
{
var userAccount = dbContext.UserAccounts.Where(x => x.UserGuid == requestMessageProvider.RequestMessage.UserId)?.FirstOrDefault()
?? throw new BaseResultException("account_not_found".AsCode(), "Account associated with the session is not found!");
var userRoles = await dbContext.UserAccounts
.Where(x => x.UserAccountId == userAccount.UserAccountId)
.SelectMany(x => x.UserRole1s)
.Select(x => x.Id)
.ToArrayAsync();
if (!userRoles.Any())
{
throw new BaseResultException("account_has_no_roles".AsCode(), "Account has not roles set!");
}
// Recursively filter the snapshot based on user roles and map to final DTO.
var filteredRoutes = (routesProvider.Tree ?? [])
.Select(route => route.FilterAndMapRoute(userRoles))
.Where(r => r != null)
.Cast<SerdicaRoute>()
.ToArray();
return filteredRoutes;
}
}

View File

@@ -0,0 +1,54 @@
using System.Collections.Generic;
using System.Text.Json;
namespace Ablera.Serdica.Authority.Extensions;
public class JsonElementEqualityComparer : IEqualityComparer<JsonElement>
{
public static JsonElementEqualityComparer Default = new JsonElementEqualityComparer();
public bool Equals(JsonElement x, JsonElement y)
{
// if theyre not both JSON strings, fall back to raw-text compare
if (x.ValueKind == JsonValueKind.String && y.ValueKind == JsonValueKind.String)
return x.GetString() == y.GetString();
// otherwise, compare their entire JSON text
return x.GetRawText() == y.GetRawText();
}
public int GetHashCode(JsonElement obj)
{
// raw text is the canonical JSON including quotes, so it's stable for hashing
return obj.GetRawText().GetHashCode();
}
}
public static class DictionaryExtensions
{
public static bool DictionaryEquals<TKey, TValue>(
this IDictionary<TKey, TValue> a,
IDictionary<TKey, TValue> b,
IEqualityComparer<TValue>? valueComparer = null)
{
// same reference or both null?
if (ReferenceEquals(a, b)) return true;
// one null or different size?
if (a is null || b is null || a.Count != b.Count) return false;
valueComparer ??= EqualityComparer<TValue>.Default;
foreach (var pair in a)
{
// key missing?
if (!b.TryGetValue(pair.Key, out var bValue))
return false;
// value mismatch?
if (!valueComparer.Equals(pair.Value, bValue))
return false;
}
return true;
}
}

View File

@@ -0,0 +1,19 @@
using System.Collections.Generic;
using System.Text.Json;
using System;
namespace Ablera.Serdica.Authority.Extensions;
public static class ImmutableDictionaryExtensions
{
public static string GetStringOrThrow(this IDictionary<string, JsonElement> dict, string key, string clientId)
{
dict.TryGetValue(key, out var jsonElement);
var s = jsonElement.GetString();
if (string.IsNullOrWhiteSpace(s))
{
throw new InvalidOperationException($"No {key} property is defined for client with id '{clientId}'.");
}
return s;
}
}

View File

@@ -0,0 +1,39 @@
using Ablera.Serdica.Authority.Constants;
using Ablera.Serdica.Authority.Extensions;
using Ablera.Serdica.Authority.Services;
using Microsoft.AspNetCore;
using Microsoft.AspNetCore.Authentication;
using Microsoft.AspNetCore.Authentication.Cookies;
using Microsoft.Extensions.DependencyInjection;
using OpenIddict.Abstractions;
using System;
using System.Linq;
using System.Threading.Tasks;
namespace Ablera.Serdica.Authority.Extensions;
public static class RedirectToLoginHandler
{
public static async Task HandlerRedirectToLogin(this RedirectContext<CookieAuthenticationOptions> ctx)
{
var oidcSettings = ctx.HttpContext.RequestServices.GetRequiredService<OidcJsonSettingsProvider>()
.Settings;
// only intercept the OIDC authorize endpoint
if (ctx.Request.Path.StartsWithSegments(oidcSettings!.Endpoints.Authorization.EnsureStartsWith("/") ) == false)
{
ctx.Response.Redirect(ctx.RedirectUri); // normal behaviour
}
var appMgr = ctx.HttpContext.RequestServices
.GetRequiredService<IOpenIddictApplicationManager>();
var oidReq = ctx.HttpContext.GetOpenIddictServerRequest();
var app = await appMgr.FindByClientIdAsync(oidReq!.ClientId!);
var props = await appMgr.GetPropertiesAsync(app!);
var delegateUrl = props.GetStringOrThrow(ConstantsClass.AuthenticationDelegateUrlKey, oidReq.ClientId!);
var confirm = $"{ctx.HttpContext.Request.Scheme}://{ctx.HttpContext.Request.Host}{ctx.Request.Path}{ctx.Request.QueryString}";
var redir = delegateUrl + "&confirmUrl=" + Uri.EscapeDataString(confirm);
ctx.Response.Redirect(redir);
}
}

View File

@@ -0,0 +1,39 @@
using System.Security.Claims;
using OpenIddict.Abstractions;
using Ablera.Serdica.Authority.Contracts;
using Microsoft.AspNetCore.Identity;
using System.Threading.Tasks;
using static OpenIddict.Abstractions.OpenIddictConstants;
using System.Linq;
using OpenIddict.Server.AspNetCore;
using System.Collections.Generic;
using Microsoft.AspNetCore.Authentication.Cookies;
using static Ablera.Serdica.Authentication.Constants.ConstantsClass;
namespace Ablera.Serdica.Authority.Extensions;
public static class SerdicaPrincipalBuilder
{
public static ClaimsPrincipal Build(IEnumerable<Claim> claims, IEnumerable<string> scopes, string authenticationType)
{
var principal =
new ClaimsPrincipal(
new ClaimsIdentity(
claims,
authenticationType,
Claims.Name,
Claims.Role));
principal.SetResources(SerdicaAPIAudience);
principal.SetScopes(scopes);
principal.SetDestinations(c =>
c.Type == Claims.Name ? new[] { Destinations.AccessToken,
Destinations.IdentityToken }
: new[] { Destinations.AccessToken });
return principal;
}
}

View File

@@ -0,0 +1,18 @@
using System;
namespace Ablera.Serdica.Authority.Extensions;
public static class StringExtensions
{
public static string EnsureStartsWith(this string src, string prefix)
{
return src.StartsWith(prefix, StringComparison.OrdinalIgnoreCase) ? src : prefix + src;
}
public static string AppendPath(this string src, string suffix)
{
var d = src + suffix.EnsureStartsWith("/");
var r = d.TrimStart('/').EnsureStartsWith("/");
return r;
}
}

View File

@@ -0,0 +1,12 @@
using System;
namespace Ablera.Serdica.Authority.Extensions;
public static class UriExtensions
{
public static Uri AppendPath(this Uri baseUrlc, string suffix)
{
var d = new Uri(baseUrlc, baseUrlc.AbsolutePath.AppendPath(suffix));
return d;
}
}

View File

@@ -0,0 +1,91 @@
using Ablera.Serdica.DBModels.Oidc;
using Ablera.Serdica.Authority.Services;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using System.Threading.Tasks;
using System.Threading;
using System;
using Microsoft.EntityFrameworkCore;
using Polly;
public class OidcInfrastructureHostedService(
ILogger<OidcInfrastructureHostedService> logger,
IServiceScopeFactory scopeFactory) : IHostedService, IDisposable
{
private readonly TimeSpan _updateInterval = TimeSpan.FromMinutes(2);
private Timer? _timer;
private CancellationTokenSource? _stoppingCts;
public async Task StartAsync(CancellationToken cancellationToken)
{
logger.LogInformation($"{nameof(OidcInfrastructureHostedService)} service starting...");
// Apply migrations
using var scope = scopeFactory.CreateScope();
using var dbContext = scope.ServiceProvider.GetRequiredService<OidcDbContext>();
await dbContext.Database.CreateExecutionStrategy().ExecuteAsync(async () =>
{
await using var transaction = await dbContext.Database.BeginTransactionAsync(cancellationToken);
await dbContext.Database.MigrateAsync(cancellationToken);
await transaction.CommitAsync(cancellationToken);
});
// Create a CTS that links the ASP.NET shutdown token with our own
_stoppingCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken);
// Schedule the first run immediately
_ = RunOnceAsync(_stoppingCts.Token);
// Then schedule recurring runs. Notice we capture the CTS token.
_timer = new Timer(
_ => _ = RunOnceAsync(_stoppingCts.Token),
state: null,
dueTime: _updateInterval,
period: _updateInterval);
}
private async Task RunOnceAsync(CancellationToken token)
{
try
{
// Honor cancellation right at the top
token.ThrowIfCancellationRequested();
using var scope = scopeFactory.CreateScope();
using var context = scope.ServiceProvider.GetRequiredService<OidcDbContext>();
var sync = scope.ServiceProvider.GetRequiredService<OidcClientSynchronizer>();
// Do the synchronization
await sync.SynchronizeAsync(token);
}
catch (OperationCanceledException)
{
// Expected on shutdown; swallow.
}
catch (Exception ex)
{
logger.LogError(ex, $"Error while synchronizing {nameof(OidcInfrastructureHostedService)}.");
}
}
public Task StopAsync(CancellationToken cancellationToken)
{
logger.LogInformation($"{nameof(OidcInfrastructureHostedService)} service stopping...");
// Signal cancellation to the RunOnceAsync calls
_stoppingCts?.Cancel();
// Stop the timer from firing any more
_timer?.Change(Timeout.Infinite, 0);
return Task.CompletedTask;
}
public void Dispose()
{
_timer?.Dispose();
_stoppingCts?.Dispose();
}
}

View File

@@ -0,0 +1,138 @@
using Ablera.Serdica.DBModels.Serdica;
using Ablera.Serdica.Microservice.Consumer.Services;
using Ablera.Serdica.Authority.Models;
using Ablera.Serdica.Authority.Services;
using Microsoft.EntityFrameworkCore;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
namespace Ablera.Serdica.Authority.HostServices;
public class RoutesTreeBuilderHostedService(
ILogger<RoutesTreeBuilderHostedService> logger,
IServiceScopeFactory scopeFactory,
RoutesTreeProvider routesTreeProvider)
: IHostedService, IDisposable
{
private Timer? _timer;
private readonly TimeSpan _updateInterval = TimeSpan.FromMinutes(2);
public Task StartAsync(CancellationToken cancellationToken)
{
logger.LogInformation($"{nameof(RoutesTreeBuilderHostedService)} starting...");
// Initial snapshot update.
UpdateSnapshot();
// Set timer to update every 2 minutes.
_timer = new Timer(state => UpdateSnapshot(), null, _updateInterval, _updateInterval);
return Task.CompletedTask;
}
private void UpdateSnapshot()
{
try
{
using var scope = scopeFactory.CreateScope();
using var requestProfiler = scope.ServiceProvider.GetRequiredService<RequestProfiler>();
requestProfiler.BeginStage("RoutesTreeBuilderService.UpdateSnapshot");
using var dbContext = scope.ServiceProvider.GetRequiredService<SerdicaDbContext>();
// Retrieve flat routes with minimal projection.
var flatRoutes = dbContext.Routes.AsNoTracking()
.OrderBy(x => x.SortOrder)
.Select(x => new RouteEntity
{
Id = x.Id,
ParentId = x.ParentId,
ViewConfigId = x.ViewConfigId,
Type = x.Type,
Title = x.Title,
Disabled = x.Disabled,
IsMenuItem = x.IsMenuItem,
IsDashboardItem = x.IsDashboardItem,
Path = x.Path,
SortOrder = x.SortOrder,
Icon = x.Icon,
SvgIcon = x.SvgIcon,
Breadcrumbs = x.Breadcrumbs,
Translate = x.Translate,
ExternalUrl = x.ExternalUrl,
Url = x.Url,
Function = x.Function,
OpenInNewTab = x.OpenInNewTab,
ExactMatch = x.ExactMatch,
ProductCode = x.ProductCode,
ProcessBusinessKey = x.ProcessBusinessKey,
AllowedRoles = string.IsNullOrWhiteSpace(x.AllowedRoles)
? Array.Empty<string>()
: x.AllowedRoles.Split(new[] { ';' }, StringSplitOptions.RemoveEmptyEntries)
})
.ToList();
// Convert flat list to a tree structure.
var tree = BuildTree(flatRoutes);
// Atomically update the snapshot.
routesTreeProvider.Tree = tree;
requestProfiler.EndStage("RoutesTreeBuilderService.UpdateSnapshot");
logger.LogInformation("Routes snapshot updated with {Count} root nodes and {Branches} branches.", tree.Count, flatRoutes.Count);
}
catch (Exception ex)
{
logger.LogError(ex, "Failed to update routes snapshot.");
}
}
private static IReadOnlyList<RouteEntity> BuildTree(List<RouteEntity> flatRoutes)
{
var lookup = flatRoutes.ToDictionary(r => r.Id);
var roots = new List<RouteEntity>();
// Build parent/child relationships.
foreach (var route in flatRoutes)
{
if (route.ParentId.HasValue && lookup.TryGetValue(route.ParentId.Value, out var parent))
{
parent.Children.Add(route);
}
else
{
roots.Add(route);
}
}
// Recursively sort children by SortOrder.
void SortTree(List<RouteEntity> routes)
{
routes.Sort((a, b) => (a.SortOrder ?? 0).CompareTo(b.SortOrder));
foreach (var r in routes)
{
if (r.Children.Any())
{
SortTree(r.Children);
}
}
}
SortTree(roots);
return roots;
}
public Task StopAsync(CancellationToken cancellationToken)
{
logger.LogInformation($"{nameof(RoutesTreeBuilderHostedService)} stopping...");
_timer?.Change(Timeout.Infinite, 0);
return Task.CompletedTask;
}
public void Dispose()
{
_timer?.Dispose();
}
}

View File

@@ -0,0 +1,12 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Ablera.Serdica.Authority.Models;
public record FileServerConfig
{
public string RootPathPrefixForWWW { get; set; } = string.Empty;
}

View File

@@ -0,0 +1,106 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Ablera.Serdica.Authority.Models;
public static class RouteEntityExtensions
{
public static SerdicaRoute? FilterAndMapRoute(this RouteEntity route, IEnumerable<string> userRoles)
{
// Skip nodes where user is not allowed.
if (!route.AllowedRoles.Intersect(userRoles).Any())
{
return null;
}
var children = route.Children
.Select(child => FilterAndMapRoute(child, userRoles))
.Where(childDto => childDto != null)
.Cast<SerdicaRoute>()
.ToList();
return new SerdicaRoute(
Id: route.Id,
ParentId: route.ParentId,
ViewConfigId: route.ViewConfigId,
Type: route.Type,
Title: route.Title,
Disabled: route.Disabled,
IsMenuItem: route.IsMenuItem,
IsDashboardItem: route.IsDashboardItem,
Path: route.Path,
SortOrder: route.SortOrder,
Icon: route.Icon,
SvgIcon: route.SvgIcon,
Breadcrumbs: route.Breadcrumbs,
Translate: route.Translate,
ExternalUrl: route.ExternalUrl,
Url: route.Url,
Function: route.Function,
OpenInNewTab: route.OpenInNewTab,
ExactMatch: route.ExactMatch,
ProductCode: route.ProductCode,
ProcessBusinessKey: route.ProcessBusinessKey,
AllowedRoles: route.AllowedRoles,
Children: children
);
}
}
public class RouteEntity
{
public Guid Id { get; set; }
public Guid? ParentId { get; set; }
public Guid? ViewConfigId { get; set; }
public string? Type { get; set; }
public string? Title { get; set; }
public string? Disabled { get; set; }
public string? IsMenuItem { get; set; }
public string? IsDashboardItem { get; set; }
public required string Path { get; set; }
public int? SortOrder { get; set; }
public string? Icon { get; set; }
public string? SvgIcon { get; set; }
public string? Breadcrumbs { get; set; }
public string? Translate { get; set; }
public string? ExternalUrl { get; set; }
public string? Url { get; set; }
public string? Function { get; set; }
public string? OpenInNewTab { get; set; }
public string? ExactMatch { get; set; }
public string? ProductCode { get; set; }
public string? ProcessBusinessKey { get; set; }
public required string[] AllowedRoles { get; set; }
// Children collection for building the tree.
public List<RouteEntity> Children { get; set; } = new List<RouteEntity>();
}
public record SerdicaRoute
(
Guid Id,
Guid? ParentId,
Guid? ViewConfigId,
string? Type,
string? Title,
string? Disabled,
string? IsMenuItem,
string? IsDashboardItem,
string? Path,
int? SortOrder,
string? Icon,
string? SvgIcon,
string? Breadcrumbs,
string? Translate,
string? ExternalUrl,
string? Url,
string? Function,
string? OpenInNewTab,
string? ExactMatch,
string? ProductCode,
string? ProcessBusinessKey,
string[]? AllowedRoles,
IReadOnlyList<SerdicaRoute> Children
);

View File

@@ -0,0 +1,21 @@
using Microsoft.AspNetCore.Mvc;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Ablera.Serdica.Authority.Models;
public sealed record TokenRequest
{
[FromForm(Name = "grant_type")] public string? GrantType { get; init; }
[FromForm(Name = "username")] public string? Username { get; init; }
[FromForm(Name = "password")] public string? Password { get; init; }
[FromForm(Name = "client_id")] public required string ClientId { get; init; }
[FromForm(Name = "client_secret")] public required string ClientSecret { get; init; }
[FromForm(Name = "scope")] public string? Scope { get; init; }
[FromForm(Name = "refresh_token")] public string? RefreshToken { get; init; }
[FromForm(Name = "code")] public string? Code { get; init; }
[FromForm(Name = "redirect_uri")] public string? RedirectUri { get; init; }
}

View File

@@ -0,0 +1,13 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Ablera.Serdica.Authority.Models;
public record UserManagingDirectorConfig
{
public bool LoginAnywhere { get; set; } = true;
public bool UpdateEveryWhere { get; set; } = false;
}

View File

@@ -0,0 +1,13 @@
<?xml version="1.0" encoding="utf-8"?>
<configuration>
<packageSources>
<add key="nuget-mirror" value="https://mirrors.ablera.dev/nuget/nuget-mirror/v3/index.json" />
<add key="GitlabSerdicaBackend" value="https://gitlab.ablera.dev/api/v4/projects/92/packages/nuget/index.json" />
</packageSources>
<packageSourceCredentials>
<GitlabSerdicaBackend>
<add key="Username" value="gitlab+deploy-token-3" />
<add key="ClearTextPassword" value="osdy7Ec2sVoSJC2Kaxvr" />
</GitlabSerdicaBackend>
</packageSourceCredentials>
</configuration>

View File

@@ -0,0 +1,80 @@
using System;
using System.Threading.Tasks;
using Ablera.Serdica.Authentication.Extensions;
using Ablera.Serdica.UserConfiguration.Contracts;
using Ablera.Serdica.UserConfiguration.Models;
using Ablera.Serdica.Authority.Constants;
using Ablera.Serdica.Authority.Contracts;
using Ablera.Serdica.Authority.Extensions;
using Ablera.Serdica.Authority.Services;
using Microsoft.AspNetCore;
using Microsoft.AspNetCore.Authentication;
using Microsoft.AspNetCore.Authentication.Cookies;
using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Identity;
using OpenIddict.Abstractions;
using OpenIddict.Server;
using OpenIddict.Server.AspNetCore;
namespace Ablera.Serdica.Authority.OpenIddictServerHandlers;
public sealed class AuthorizationRequestHandler(
AuthenticationUrlBuilder authenticationUrlBuilder,
IHttpContextAccessor httpContextAccessor,
IUserManagingDirector<IdentityUser<string>> users,
IUserConfigurationBuilder<UserConfigurationModel> userConfigurationBuilder,
IUserConfigurationRepository<UserConfigurationModel> userConfigurationRepository,
IOpenIddictApplicationManager manager) :
IOpenIddictServerHandler<OpenIddictServerEvents.HandleAuthorizationRequestContext>
{
public async ValueTask HandleAsync(
OpenIddictServerEvents.HandleAuthorizationRequestContext ctx)
{
var request = httpContextAccessor.HttpContext?.GetOpenIddictServerRequest()
?? throw new InvalidOperationException("No OIDC request found.");
var result = await httpContextAccessor.HttpContext.AuthenticateAsync(
CookieAuthenticationDefaults.AuthenticationScheme);
// ------------ local session exists → issue code/token ------------
if (result.Succeeded)
{
var userId = result.Principal.GetUserId();
if (userId == null) return;
var identityUser = await users.FindByIdAsync(userId);
if (identityUser == null) return;
var systemClaims = result.Principal.Claims ?? [];
//var baseClaims = await users.GetBaseClaimsAsync(identityUser) ?? [];
//var roleClaims = await users.GetRolesClaimsAsync(identityUser) ?? [];
//HashSet<Claim> claims = [.. systemClaims, .. baseClaims, .. roleClaims];
var principal = SerdicaPrincipalBuilder.Build(
systemClaims,
request.GetScopes(),
OpenIddictServerAspNetCoreDefaults.AuthenticationScheme);
ctx.SignIn(principal);
// store user configuration to be reused from microservices
var userConfiguration = await userConfigurationBuilder.BuildUserConfigurationAsync(userId);
await userConfigurationRepository.StoreAsync(userId, userConfiguration);
return;
}
var client = await manager.FindByClientIdAsync(request.ClientId!);
if (client is null) return;
// ------------- no session → choose where to login -----------------
var authenticationUrl = authenticationUrlBuilder.BuildAuthenticationUrl(
request.ClientId!,
(await manager.GetPropertiesAsync(client))
.GetStringOrThrow(ConstantsClass.AuthenticationDelegateUrlKey, request.ClientId!),
httpContextAccessor.HttpContext!.Request);
if (authenticationUrl is null) return;
httpContextAccessor.HttpContext!.Response.Redirect(authenticationUrl);
ctx.HandleRequest();
}
}

View File

@@ -0,0 +1,73 @@
using System.Collections.Generic;
using Ablera.Serdica.Authentication.Models.Oidc;
using Ablera.Serdica.Authority.Contracts;
using Ablera.Serdica.Authority.Models;
using Ablera.Serdica.Authority.Services;
using Microsoft.Extensions.Logging;
using OpenIddict.Abstractions;
using OpenIddict.Server;
using System.Linq;
using System.Security.Claims;
using System.Threading.Tasks;
using OpenIddict.Server.AspNetCore;
using static OpenIddict.Abstractions.OpenIddictConstants;
using static OpenIddict.Server.OpenIddictServerEvents;
namespace Ablera.Serdica.Authority.OpenIddictServerHandlers;
public sealed class ClientCredentialsGrantHandler(
OidcJsonSettingsProvider settingsProvider) :
IOpenIddictServerHandler<HandleTokenRequestContext>
{
public async ValueTask HandleAsync(HandleTokenRequestContext ctx)
{
if (!ctx.Request.IsClientCredentialsGrantType())
return;
var registeredClient = settingsProvider.Settings
.RegisteredClients
.FirstOrDefault(x => x.ClientId == ctx.Request.ClientId!);
if (registeredClient == null)
return;
var claims = new List<Claim>
{
// Exactly **one** subject claim the client_id.
new(Claims.Subject, ctx.Request.ClientId!),
// Name related claims
new(ClaimTypes.NameIdentifier, ctx.Request.ClientId!),
new(ClaimTypes.Name, registeredClient.DisplayName)
};
// Any pre-configured claims
claims.AddRange(
from claimTypeAndValue in registeredClient.BuiltinClaims ?? []
select new Claim(claimTypeAndValue.Type, claimTypeAndValue.Value));
// Build a fresh identity to avoid duplicates.
var principal =
new ClaimsPrincipal(
new ClaimsIdentity(
claims,
OpenIddictServerAspNetCoreDefaults.AuthenticationScheme,
Claims.Name,
Claims.Role));
// Scopes: intersect requested with allowed set.
var scopes = (registeredClient.Permissions ?? [])
.Where(x => x.StartsWith("scp:"))
.Select(x => x.Substring(4))
.Concat(settingsProvider.Settings.Scopes)
.Distinct()
.ToArray();
principal.SetScopes(ctx.Request.GetScopes().Intersect(scopes));
// API audience(s) your APIs expect.
principal.SetResources(Authentication.Constants.ConstantsClass.SerdicaAPIAudience);
principal.SetDestinations(c =>
c.Type == Claims.Name ? new[] { Destinations.AccessToken,
Destinations.IdentityToken }
: new[] { Destinations.AccessToken });
ctx.SignIn(principal);
}
}

View File

@@ -0,0 +1,43 @@
using Microsoft.AspNetCore.Authentication;
using Microsoft.AspNetCore.Authentication.Cookies;
using System.Threading.Tasks;
using OpenIddict.Server;
using Microsoft.AspNetCore.Http;
namespace Ablera.Serdica.Authority.OpenIddictServerHandlers;
public sealed class EndSessionHandler(
IHttpContextAccessor accessor//,
//IOpenIddictAuthorizationManager authMgr,
//IOpenIddictTokenManager tokMgr
) :
IOpenIddictServerHandler<OpenIddictServerEvents.HandleEndSessionRequestContext>
{
public async ValueTask HandleAsync(OpenIddictServerEvents.HandleEndSessionRequestContext ctx)
{
// Do not revoke tokens if the request is not a valid end session request.
// User might be logged in on multiple devices, so we only remove the SSO cookie
// 1) authenticate the cookie (if any)
//var principal = (await accessor.HttpContext!
// .AuthenticateAsync(CookieAuthenticationDefaults.AuthenticationScheme))
// ?.Principal;
//// 2) otherwise fall back to the id_token_hint analysed by OpenIddict
//principal ??= ctx.IdentityTokenHintPrincipal;
//// 3) revoke tokens/authorisations that belong to that user
//if (principal is { }) {
// await foreach (var auth in authMgr.ListAsync())
// await authMgr.TryRevokeAsync(auth);
// await foreach (var tok in tokMgr.ListAsync())
// await tokMgr.TryRevokeAsync(tok);
//}
// 4) remove the SSO cookie
await accessor.HttpContext!.SignOutAsync(CookieAuthenticationDefaults.AuthenticationScheme);
// 5) let OpenIddict produce the normal response (redirect to SPA)
ctx.SignOut();
}
}

View File

@@ -0,0 +1,92 @@
using OpenIddict.Abstractions;
using Ablera.Serdica.Authority.Contracts;
using Microsoft.AspNetCore.Identity;
using System.Threading.Tasks;
using OpenIddict.Server;
using Microsoft.AspNetCore.Authentication.Cookies;
using Microsoft.AspNetCore.Authentication;
using System;
using Microsoft.AspNetCore.Http;
using System.Linq;
using Ablera.Serdica.Authority.Extensions;
using static OpenIddict.Abstractions.OpenIddictConstants;
using static OpenIddict.Server.OpenIddictServerEvents;
namespace Ablera.Serdica.Authority.OpenIddictServerHandlers;
public sealed class PasswordGrantHandler(
IUserManagingDirector<IdentityUser<string>> users,
IHttpContextAccessor httpContextAccessor) :
IOpenIddictServerHandler<HandleTokenRequestContext>
{
public async ValueTask HandleAsync(HandleTokenRequestContext ctx)
{
if (!ctx.Request.IsPasswordGrantType())
return; // not our grant → ignore
var username = ctx.Request.Username;
var password = ctx.Request.Password;
if (username is null || password is null)
{
ctx.Reject(
error: Errors.InvalidGrant,
description: "Missing username or password.");
return;
}
// 1) Find user.
var user = await users.FindByEmailAsync(username) ??
await users.FindByNameAsync(username);
if (user is null)
{
ctx.Reject(
error: Errors.InvalidGrant,
description: "Invalid credentials.");
return;
}
// 2) Validate the password.
var auth = await users.AuthenticateAsync(user, password, false);
if (!auth.Succeeded || auth.ClaimsPrincipal is null)
{
ctx.Reject(
error: Errors.InvalidGrant,
description: "Invalid credentials.");
return;
}
var props = new AuthenticationProperties
{
IsPersistent = true
};
var roleClaims = await users.GetRolesClaimsAsync(user);
var baseClaims = await users.GetBaseClaimsAsync(user);
var principal = SerdicaPrincipalBuilder.Build(
[ ..baseClaims, ..(roleClaims ?? [])],
ctx.Request.GetScopes(),
auth.ClaimsPrincipal.Identity!.AuthenticationType!);
// Issue the local session cookie **for the browser**
await httpContextAccessor.HttpContext!.SignInAsync(
CookieAuthenticationDefaults.AuthenticationScheme,
principal,
props);
// 4) Tell OpenIddict that everything is OK.
ctx.SignIn(principal);
// ------------------------------------------------------------------
var confirmUrl = httpContextAccessor.HttpContext?.Request?.Query.TryGetValue("confirmUrl", out var values) == true
? values.FirstOrDefault()
: null;
if (string.IsNullOrEmpty(confirmUrl)) return;
httpContextAccessor.HttpContext!.Response.Redirect(Uri.UnescapeDataString(confirmUrl));
ctx.HandleRequest();
}
}

View File

@@ -0,0 +1,45 @@
using System;
using System.Linq;
using System.Threading.Tasks;
using Ablera.Serdica.Authentication.Models.Oidc;
using Ablera.Serdica.Authority.Extensions;
using Ablera.Serdica.Authority.Services;
using Microsoft.AspNetCore.Http;
using OpenIddict.Abstractions;
using OpenIddict.Server;
namespace Ablera.Serdica.Authority.OpenIddictServerHandlers;
public sealed class ValidateClientCredentialsRequest(OidcJsonSettingsProvider settings, IHttpContextAccessor http)
: IOpenIddictServerHandler<OpenIddictServerEvents.ValidateTokenRequestContext>
{
public ValueTask HandleAsync(OpenIddictServerEvents.ValidateTokenRequestContext ctx)
{
if (!ctx.Request.IsClientCredentialsGrantType())
return default;
var client = settings.Settings.RegisteredClients
.FirstOrDefault(c => c.ClientId == ctx.Request.ClientId);
if (client is null)
{
ctx.Reject(OpenIddictConstants.Errors.InvalidClient, "Unknown client.");
return default;
}
// Confidential clients: check secret.
if (!string.IsNullOrEmpty(client.ClientSecret))
{
if (!string.Equals(ctx.ClientSecret, client.ClientSecret, StringComparison.Ordinal))
ctx.Reject(OpenIddictConstants.Errors.InvalidClient, "Invalid client secret.");
return default;
}
// Public/secret-less clients: enforce your allowed network masks.
var masks = (client.AllowedMasks ?? Enumerable.Empty<AllowedMask>())
.Concat(settings.Settings.AllowedMasks ?? Enumerable.Empty<AllowedMask>());
if (!masks.Any(m => m.MatchesRemote(http.HttpContext!)))
ctx.Reject(OpenIddictConstants.Errors.InvalidClient, "Client not allowed from this origin.");
// If were here and not rejected, we let the pipeline continue.
return default;
}
}

View File

@@ -0,0 +1,346 @@
using Ablera.Serdica.Common.Tools.Helpers;
using Ablera.Serdica.Microservice.Consumer.Config;
using Ablera.Serdica.Authority.Services;
using Ablera.Serdica.DBModels.Serdica;
using Serilog;
using System.Diagnostics;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Configuration;
using System;
using Ablera.Serdica.Extensions.Serilog;
using Microsoft.EntityFrameworkCore;
using Ablera.Serdica.DBModels.Oidc;
using Microsoft.AspNetCore.Builder;
using Ablera.Serdica.Authority.HostServices;
using Microsoft.IdentityModel.Tokens;
using Quartz;
using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Authentication.Cookies;
using Ablera.Serdica.Authority.Extensions;
using Ablera.Serdica.Authority.Models;
using Ablera.Serdica.Authority.Contracts;
using Microsoft.AspNetCore.Identity;
using Ablera.Serdica.Common.Tools.Models.Config;
using Microsoft.IdentityModel.Protocols.Configuration;
using Microsoft.Extensions.Options;
using Ablera.Serdica.Authority.Constants;
using OpenIddict.Server;
using Ablera.Serdica.Authority.OpenIddictServerHandlers;
using Ablera.Serdica.DependencyInjection;
using Ablera.Serdica.UserConfiguration.Models;
using Ablera.Serdica.HealthChecks.Extensions;
using Microsoft.AspNetCore.StaticFiles;
using static OpenIddict.Server.OpenIddictServerEvents;
// Use the W3C format for Activity IDs.
Activity.DefaultIdFormat = ActivityIdFormat.W3C;
// Create the WebApplicationBuilder instead of using Host.CreateDefaultBuilder.
// This model ensures that the built application supports middleware configuration.
var builder = WebApplication.CreateBuilder(args);
// Adjust configuration set the environment name (if provided by an environment variable)
// and add the "SERDICA_" prefixed environment variables.
{
var environmentName = Environment.GetEnvironmentVariable("SERDICA_PROJECT_ENV")
?? Environment.GetEnvironmentVariable("ASPNETCORE_ENVIRONMENT");
if (!string.IsNullOrWhiteSpace(environmentName))
{
builder.Environment.EnvironmentName = environmentName;
}
builder.Configuration.AddEnvironmentVariables(prefix: "SERDICA_");
}
// Configure Serilog as the logging provider.
builder.Host.UseSerilog((context, _, configuration) =>
{
configuration.ReadFrom.Configuration(context.Configuration)
.Enrich.With(new MoveScopeToFieldsLogEventEnricher());
});
var jsonSettingsConfig = builder.Configuration.GetSection(nameof(JsonFileSettingsConfig)).Get<JsonFileSettingsConfig>() ?? new JsonFileSettingsConfig();
using var oidConfigProvider = new OidcJsonSettingsProvider(null, Options.Create(jsonSettingsConfig));
var oidcSettings = oidConfigProvider.Settings;
var issuerUrl = oidcSettings.IssuerUrl.TrimEnd('/');
var oidcEncryptionKey = Convert.FromBase64String(oidcSettings.EncryptionKey!)
?? throw new InvalidConfigurationException($"Invalid or no base64 key provided for {nameof(OidcServerSettings)}.{nameof(OidcServerSettings.EncryptionKey)}");
// Register Ablera Serdica configuration.
builder.Services
.ConfigureTools(builder.Configuration)
.AddRedisFromEntityFrameworkEntityCacheManager<Route, SerdicaDbContext>(
builder.Configuration, e => e.Id.ToString())
.AddDbContext<SerdicaDbContext>(
builder.Configuration, OptimizedSerdicaDbContextModel.Instance)
.AddDbContext<OidcDbContext>(
builder.Configuration, null, null, options =>
options
.UseOracle(
builder.Configuration.GetConnectionString(ConstantsClass.ConnectionNameDefault),
b => b.MigrationsAssembly(typeof(Ablera.Serdica.DBModels.Oidc.Migrations.OidcDbContextFactory).Assembly.GetName().Name))
.UseOpenIddict())
.AddInitializationRoutine<Ablera.Serdica.Microservice.Initializer.EndpointsRegistration.Initializer>()
.AddTranslationProvider(builder.Configuration)
.AddCacheManager(builder.Configuration)
.AddUserConfiguration<UserConfigurationModel>(builder.Configuration)
.AddRedis(builder.Configuration)
.AddRedisUserConfigurationRepository<UserConfigurationModel>(builder.Configuration)
.AddSerdicaUserConfigurationBuilder(builder.Configuration)
.AddPluginIntegrations(builder.Configuration)
.AddSystem(builder.Configuration)
.AddAsConsumerAsync(builder.Configuration);
// Register Ablera.Serdica.Authority services
builder.Services
.Configure<UserManagingDirectorConfig>(builder.Configuration.GetSection(nameof(UserManagingDirectorConfig)))
.Configure<FileServerConfig>(builder.Configuration.GetSection(nameof(FileServerConfig)))
.Configure<OidcServerSettings>(builder.Configuration.GetSection(nameof(OidcServerSettings)))
.AddSingleton<RoutesTreeProvider>()
.AddSingleton<OidcJsonSettingsProvider>()
.AddScoped<OidcClientSynchronizer>()
.AddSingleton<AuthenticationUrlBuilder>()
.AddScoped<IUserManagingDirector<IdentityUser<string>>, UserManagingDirector>()
.AddHostedService<RoutesTreeBuilderHostedService>()
.AddHostedService<OidcInfrastructureHostedService>();
// Get FileServerConfig to determine the correct paths with prefix
var fileServerConfig = builder.Configuration.GetSection(nameof(FileServerConfig)).Get<FileServerConfig>() ?? new FileServerConfig();
var pathPrefix = fileServerConfig.RootPathPrefixForWWW ?? string.Empty;
// Configure authentication using cookies.
builder
.Services
.AddSession(options =>
{
options.IdleTimeout = TimeSpan.FromMinutes(1);
options.Cookie.HttpOnly = true;
options.Cookie.SameSite = SameSiteMode.None;
options.Cookie.SecurePolicy = CookieSecurePolicy.Always;
})
.AddHttpContextAccessor()
.AddCors(options =>
{
options
.AddDefaultPolicy(policy => policy
.SetIsOriginAllowed(_ => true) // Allow any origin
.AllowAnyHeader()
.AllowAnyMethod()
.AllowCredentials()
.AllowCredentials());
})
.AddQuartz(options =>
{
options.UseSimpleTypeLoader();
options.UseInMemoryStore();
})
.AddQuartzHostedService(options => options.WaitForJobsToComplete = true)
.AddAuthorization()
.AddAuthentication(CookieAuthenticationDefaults.AuthenticationScheme)
.AddCookie(options =>
{
var loginPath = string.IsNullOrEmpty(pathPrefix)
? "/login.html"
: $"{pathPrefix}/login.html";
var accessDeniedPath = string.IsNullOrEmpty(pathPrefix)
? oidcSettings.Endpoints.Authorization.EnsureStartsWith("/")
: $"{pathPrefix}{oidcSettings.Endpoints.Authorization.EnsureStartsWith("/")}";
var logoutPath = string.IsNullOrEmpty(pathPrefix)
? oidcSettings.Endpoints.Logout.EnsureStartsWith("/")
: $"{pathPrefix}{oidcSettings.Endpoints.Logout.EnsureStartsWith("/")}";
options.AccessDeniedPath = accessDeniedPath;
options.LoginPath = loginPath;
options.LogoutPath = logoutPath;
options.Cookie.SameSite = SameSiteMode.Lax;
options.Cookie.SecurePolicy = CookieSecurePolicy.None;
options.Cookie.Name = oidcSettings.CookieName;
options.SlidingExpiration = true;
options.ExpireTimeSpan = TimeSpan.FromMinutes(oidcSettings.CookieExpirationInMinutes);
options.Events = new CookieAuthenticationEvents
{
OnRedirectToLogin = x => x.HandlerRedirectToLogin()
};
}).Services
.AddSingleton<IPostConfigureOptions<CookieAuthenticationOptions>, ConfigureCookieTicketStore>()
.AddSingleton<ITicketStore, RedisTicketStore>()
.AddRedis(builder.Configuration);
// Register health checks
builder.Services
.AddHealthChecks(builder.Configuration, typeof(SerdicaDbContext), typeof(OidcDbContext))
.AddRedis(builder.Configuration)
.AddRabbitMQ(builder.Configuration, builder.Services);
builder.Services
.AddDataProtection(builder.Configuration);
// Register OpenIddict.
builder.Services.AddOpenIddict()
.AddCore(options =>
{
// Use your Oracle-based SerdicaDbContext for OpenIddict's stores.
options.UseEntityFrameworkCore()
.UseDbContext<OidcDbContext>();
options.UseQuartz();
})
.AddServer(options =>
{
options.SetIssuer(new Uri(issuerUrl));
options.SetAuthorizationCodeLifetime(TimeSpan.FromMinutes(oidcSettings.AuthorizationTokenDurationInMinutes));
// Get FileServerConfig to apply path prefix to endpoints
var fileServerConfig = builder.Configuration.GetSection(nameof(FileServerConfig)).Get<FileServerConfig>() ?? new FileServerConfig();
var pathPrefix = fileServerConfig.RootPathPrefixForWWW ?? string.Empty;
options
.SetAuthorizationEndpointUris(
$"{pathPrefix}{oidcSettings.Endpoints.Authorization.EnsureStartsWith("/")}")
.SetDeviceAuthorizationEndpointUris(
$"{pathPrefix}{oidcSettings.Endpoints.Device.EnsureStartsWith("/")}")
.SetIntrospectionEndpointUris(
$"{pathPrefix}{oidcSettings.Endpoints.Introspection.EnsureStartsWith("/")}")
.SetEndSessionEndpointUris(
$"{pathPrefix}{oidcSettings.Endpoints.Logout.EnsureStartsWith("/")}")
.SetTokenEndpointUris(
$"{pathPrefix}{oidcSettings.Endpoints.Token.EnsureStartsWith("/")}")
.SetUserInfoEndpointUris(
$"{pathPrefix}{oidcSettings.Endpoints.Userinfo.EnsureStartsWith("/")}")
.SetRevocationEndpointUris(
$"{pathPrefix}{oidcSettings.Endpoints.Revocation.EnsureStartsWith("/")}")
.SetEndUserVerificationEndpointUris(
$"{pathPrefix}{oidcSettings.Endpoints.EndUserVerification.EnsureStartsWith("/")}")
.SetJsonWebKeySetEndpointUris(
$"{pathPrefix}{oidcSettings.Endpoints.Jwks.EnsureStartsWith("/")}")
.SetConfigurationEndpointUris(
$"{pathPrefix}{oidcSettings.Endpoints.Configuration.EnsureStartsWith("/")}");
options
.AllowAuthorizationCodeFlow()
.AllowHybridFlow()
.AllowClientCredentialsFlow()
.AcceptAnonymousClients()
.AllowPasswordFlow()
.AllowRefreshTokenFlow()
.AllowDeviceAuthorizationFlow()
.AllowNoneFlow();
options.AddEventHandler<HandleAuthorizationRequestContext>(
x => x
.UseScopedHandler<AuthorizationRequestHandler>()
.SetOrder(int.MinValue)
//.SetOrder(OpenIddictServerHandlers.Authentication.ValidateAuthentication.Descriptor.Order + 1)
.SetType(OpenIddictServerHandlerType.Custom));
options.AddEventHandler<ValidateTokenRequestContext>(x => x
.UseScopedHandler<ValidateClientCredentialsRequest>()
.SetType(OpenIddictServerHandlerType.Custom));
options.AddEventHandler<HandleTokenRequestContext>(
x => x
.UseScopedHandler<ClientCredentialsGrantHandler>()
.SetOrder(OpenIddictServerHandlers.ValidateIdentityToken.Descriptor.Order + 1)
.SetType(OpenIddictServerHandlerType.Custom));
options.AddEventHandler<HandleTokenRequestContext>(
x => x
.UseScopedHandler<PasswordGrantHandler>()
.SetOrder(OpenIddictServerHandlers.ValidateIdentityToken.Descriptor.Order + 2)
.SetType(OpenIddictServerHandlerType.Custom));
options.AddEventHandler<HandleEndSessionRequestContext>(
x => x.UseScopedHandler<EndSessionHandler>());
options.RegisterClaims(oidcSettings.Claims);
options.RegisterScopes(oidcSettings.Scopes);
options.RequireProofKeyForCodeExchange();
// Use development certificates replace with a production certificate in real applications.
options.AddDevelopmentEncryptionCertificate()
.AddDevelopmentSigningCertificate();
options.UseDataProtection()
.PreferDefaultAccessTokenFormat();
options.AddEncryptionKey(new SymmetricSecurityKey(oidcEncryptionKey));
var aspNetCoreConfiguration = options.UseAspNetCore();
aspNetCoreConfiguration.EnableStatusCodePagesIntegration();
if (oidcSettings.RequireHttps != true)
{
aspNetCoreConfiguration.DisableTransportSecurityRequirement();
}
})
.AddValidation(options =>
{
options.UseLocalServer();
options.AddEncryptionKey(new SymmetricSecurityKey(oidcEncryptionKey));
options.UseSystemNetHttp();
options.UseAspNetCore();
options.UseDataProtection();
options.EnableAuthorizationEntryValidation();
});
// Build the WebApplication.
var app = builder.Build();
// Configure the middleware pipeline.
if (app.Environment.IsDevelopment())
{
app.UseDeveloperExceptionPage();
}
app
.UseForwardedHeadersExt(builder.Configuration)
.UseRouting();
// Configure static files with path prefix support
if (!string.IsNullOrEmpty(fileServerConfig.RootPathPrefixForWWW))
{
// Serve static files with path prefix (e.g., /identity)
app.UseStaticFiles(new StaticFileOptions
{
RequestPath = fileServerConfig.RootPathPrefixForWWW
});
}
else
{
// Serve static files at root level (default behavior)
app.UseStaticFiles();
}
app
.UseCors()
.UseAuthentication()
.UseAuthorization();
app.MapHealthChecks();
// Bind the service provider if needed (legacy support).
ServiceProviderAccessor.Initialize(app.Services);
// Start the application within a try/catch to log errors.
try
{
Log.Information("Starting application with issuer url {issuerUrl}", issuerUrl);
ServiceProviderAccessor.Initialize(app.Services);
await app.RunAsync();
return 0;
}
catch (Exception ex)
{
Log.Fatal(ex, "Host terminated unexpectedly.");
Console.WriteLine("Host terminated unexpectedly. " +
"Exception: " + ex.Message + Environment.NewLine +
"Stacktrace: " + ex.StackTrace);
return 1;
}
finally
{
Log.CloseAndFlush();
}

View File

@@ -0,0 +1,18 @@
{
"profiles": {
"SelfHost": {
"commandName": "Project",
"environmentVariables": {
"ASPNETCORE_ENVIRONMENT": "Development",
"SERDICA_PROJECT": "dev",
"SERDICA_PROJECT_ENV": "development",
"SERDICA_PROJECT_INSTANCE": "local",
"SERDICA_RUNTIME": "local",
"SERDICA_Serilog__WriteTo__0__Args__configure__0__Args__outputTemplate": "[{Timestamp:HH:mm:ss} {Level}] {SourceContext}{NewLine}(UserId {SerdicaUserId} | RequestId {RequestId}){NewLine}{Message:lj}{NewLine}{Exception}{NewLine}",
"SERDICA_Serilog__WriteTo__0__Args__configure__0__Args__theme": "Serilog.Sinks.SystemConsole.Themes.AnsiConsoleTheme::Code, Serilog.Sinks.Console",
"ASPNETCORE_URLS": "https://localhost:57001;http://localhost:57000;http://authority:57000"
},
"applicationUrl": "https://localhost:57001;http://localhost:57000;http://authority:57000"
}
}
}

View File

@@ -0,0 +1 @@
dotnet ef migrations add InitialOpenIddictMigration --context OidcDbContext --project "..\..\..\Common\CommonCustomLibraries\Ablera.Serdica.DBModels.Oidc.Migrations\Ablera.Serdica.DBModels.Oidc.Migrations.csproj" --startup-project "..\Ablera.Serdica.Users.csproj"

View File

@@ -0,0 +1,49 @@
using System;
using System.Linq;
using Ablera.Serdica.Authentication.Models.Oidc;
using Microsoft.AspNetCore.Http;
using Microsoft.Extensions.Logging;
namespace Ablera.Serdica.Authority.Services;
public class AuthenticationUrlBuilder(ILogger<AuthenticationUrlBuilder> logger, OidcJsonSettingsProvider oidcJsonSettingsProvider)
{
public string? BuildAuthenticationUrl(
string clientId,
string authenticationDelegateUrl,
HttpRequest request)
{
var redirectUrls = oidcJsonSettingsProvider.Settings.RegisteredClients.Where(x => x.ClientId == clientId)
.SelectMany(x => x.RedirectUris ?? [])
.ToArray();
if (redirectUrls.Length == 0)
{
logger.LogError($"No {nameof(RegisteredClient.RedirectUris)} configured for client with id {clientId}", clientId);
return null;
}
string? redirectUrl = null;
var refererHeader = request.Headers["Referer"].ToString();
if (!string.IsNullOrEmpty(refererHeader) && redirectUrls.Any(x => refererHeader.StartsWith(x)))
{
var refererUri = new Uri(refererHeader);
redirectUrl = $"{refererUri.Scheme}://{refererUri.Host}{(refererUri.IsDefaultPort ? "" : $":{refererUri.Port}")}{refererUri.AbsolutePath}";
}
if (redirectUrl == null)
{
redirectUrl = redirectUrls[0];
logger.LogWarning("Unable to determine client url from headers. Will use default redirect url instead {issuerUrl}",
redirectUrl);
}
var processedDelegateUrl = authenticationDelegateUrl
.Replace("{{issuer_url}}", oidcJsonSettingsProvider.Settings.IssuerUrl)
.Replace("{{redirect_url}}", redirectUrl ?? string.Empty);
var authorizationConfirmUrl =
$"{request.Scheme}://{request.Host}{request.Path}{request.QueryString}";
var authenticationUrl = processedDelegateUrl + "&confirmUrl=" + Uri.EscapeDataString(authorizationConfirmUrl);
return authenticationUrl;
}
}

View File

@@ -0,0 +1,16 @@
using Microsoft.AspNetCore.Authentication.Cookies;
using Microsoft.Extensions.Options;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Ablera.Serdica.Authority.Services;
public sealed class ConfigureCookieTicketStore(ITicketStore store)
: IPostConfigureOptions<CookieAuthenticationOptions>
{
public void PostConfigure(string? scheme, CookieAuthenticationOptions opts)
=> opts.SessionStore = store;
}

View File

@@ -0,0 +1,159 @@
using Ablera.Serdica.Authentication.Models.Oidc;
using Ablera.Serdica.Authority.Extensions;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using OpenIddict.Abstractions;
using System;
using System.Collections.Generic;
using System.Collections.Immutable;
using System.Linq;
using System.Reflection;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using static OpenIddict.Abstractions.OpenIddictConstants;
namespace Ablera.Serdica.Authority.Services;
/// <summary>
/// Synchronizes OpenIddict client registrations from configuration, performing upserts
/// and only applying changes when the descriptor differs from stored values.
/// </summary>
public class OidcClientSynchronizer(
IOpenIddictApplicationManager manager,
OidcJsonSettingsProvider settingsProvider,
ILogger<OidcClientSynchronizer> logger)
{
/// <summary>
/// Reads configured clients and upserts them into OpenIddict, applying changes only when needed.
/// </summary>
public async Task SynchronizeAsync(CancellationToken cancellationToken = default)
{
// Iterate all clients from JSON settings
foreach (var client in settingsProvider.Settings.RegisteredClients)
{
// Build the descriptor from config, injecting dynamic UI URLs when applicable
var descriptor = BuildDescriptor(client);
// Upsert the application
await UpsertClientAsync(descriptor, cancellationToken);
}
}
private OpenIddictApplicationDescriptor BuildDescriptor(
RegisteredClient client)
{
var descriptor = new OpenIddictApplicationDescriptor
{
ClientId = client.ClientId,
ClientType = client.ClientType switch
{
OpenIddictConstants.ClientTypes.Public => OpenIddictConstants.ClientTypes.Public,
OpenIddictConstants.ClientTypes.Confidential => OpenIddictConstants.ClientTypes.Confidential,
_ => throw new InvalidOperationException("Unknown client type")
},
DisplayName = client.DisplayName,
// ClientSecret may be null for public clients
ClientSecret = client.ClientSecret,
};
// non-UI clients: use static values
foreach (var uri in client.RedirectUris ?? [])
descriptor.RedirectUris.Add(new Uri(uri, UriKind.Absolute));
foreach (var uri in client.PostLogoutRedirectUris ?? [])
descriptor.PostLogoutRedirectUris.Add(new Uri(uri, UriKind.Absolute));
// copy over any custom Properties
foreach (var kv in client.Properties ?? [])
descriptor.Properties[kv.Key] = kv.Value;
// Copy permissions and requirements
client.Permissions?.ToList()?.ForEach(x => descriptor.Permissions.Add(x));
client.Requirements?.ToList()?.ForEach(x => descriptor.Requirements.Add(x));
return descriptor;
}
private async Task UpsertClientAsync(
OpenIddictApplicationDescriptor descriptor,
CancellationToken cancellationToken)
{
var existing = await manager.FindByClientIdAsync(
descriptor.ClientId ?? throw new ArgumentNullException(nameof(descriptor.ClientId)),
cancellationToken);
if (existing is null)
{
try
{
await manager.CreateAsync(descriptor, cancellationToken);
}
catch (Exception ex)
{
logger.LogError(ex, "Failed to create OIDC client '{ClientId}'", descriptor.ClientId);
throw;
}
logger.LogInformation("Created OIDC client '{ClientId}'", descriptor.ClientId);
return;
}
// Compare existing settings to the descriptor
if (!await NeedsUpdateAsync(existing, descriptor, cancellationToken))
{
logger.LogDebug("No changes for client '{ClientId}', skipping update.", descriptor.ClientId);
return;
}
// Perform update
await manager.UpdateAsync(existing, descriptor, cancellationToken);
logger.LogInformation("Updated OIDC client '{ClientId}'", descriptor.ClientId);
}
private async Task<bool> NeedsUpdateAsync(
object existing,
OpenIddictApplicationDescriptor descriptor,
CancellationToken cancellationToken)
{
var existingRedirectUris = (await manager
.GetRedirectUrisAsync(existing, cancellationToken))
.ToHashSet(StringComparer.Ordinal);
var descriptorRedirectUris = descriptor.RedirectUris
.Select(u => u.OriginalString)
.ToHashSet(StringComparer.Ordinal);
if (!existingRedirectUris.SetEquals(descriptorRedirectUris)) return true;
var existingPostLogoutRedirectUris = (await manager
.GetPostLogoutRedirectUrisAsync(existing, cancellationToken))
.ToHashSet(StringComparer.Ordinal);
var descriptorPostLogoutRedirectUris = descriptor.PostLogoutRedirectUris
.Select(u => u.OriginalString)
.ToHashSet(StringComparer.Ordinal);
if (!existingPostLogoutRedirectUris.SetEquals(descriptorPostLogoutRedirectUris)) return true;
// Load permissions, requirements, client type, and check secret
var existingPerms = (await manager.GetPermissionsAsync(existing, cancellationToken))
.ToHashSet(StringComparer.OrdinalIgnoreCase);
if (!existingPerms.SetEquals(descriptor.Permissions)) return true;
var existingReqs = (await manager.GetRequirementsAsync(existing, cancellationToken))
.ToHashSet(StringComparer.OrdinalIgnoreCase);
if (!existingPerms.SetEquals(descriptor.Permissions)) return true;
var existingType = await manager.GetClientTypeAsync(existing, cancellationToken);
if (!string.Equals(existingType, descriptor.ClientType, StringComparison.OrdinalIgnoreCase)) return true;
bool secretChanged = false;
if (!string.IsNullOrWhiteSpace(descriptor.ClientSecret))
{
secretChanged = !await manager.ValidateClientSecretAsync(
existing, descriptor.ClientSecret, cancellationToken);
}
if (secretChanged) return true;
var existingProperties = (await manager.GetPropertiesAsync(existing, cancellationToken));
if (!descriptor.Properties.DictionaryEquals(existingProperties, JsonElementEqualityComparer.Default)) return true;
return false;
}
}

View File

@@ -0,0 +1,24 @@
using Ablera.Serdica.Common.Tools;
using Ablera.Serdica.Common.Tools.Models.Config;
using Ablera.Serdica.Authority.Models;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Ablera.Serdica.Authority.Services;
public class OidcJsonSettingsProvider : GenericJsonSettingsProvider<OidcServerSettings>
{
public const string JsonFilePath = "oidc-settings.json";
public OidcJsonSettingsProvider(
ILogger<GenericJsonSettingsProvider<OidcServerSettings>>? logger,
IOptions<JsonFileSettingsConfig> options)
: base(logger, options, JsonFilePath, null)
{
}
}

View File

@@ -0,0 +1,62 @@
using Microsoft.AspNetCore.Authentication.Cookies;
using Microsoft.AspNetCore.Authentication;
using Microsoft.AspNetCore.WebUtilities;
using Microsoft.Extensions.Caching.Distributed;
using Microsoft.Extensions.Logging;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Security.Cryptography;
using System.Text;
using System.Threading.Tasks;
using Microsoft.Extensions.Options;
using Ablera.Serdica.Authentication.Models;
using Ablera.Serdica.Authority.Models;
namespace Ablera.Serdica.Authority.Services;
public sealed class RedisTicketStore(IDistributedCache cache, IOptions<OidcServerSettings> options) : ITicketStore
{
private static readonly TicketSerializer serializer = TicketSerializer.Default;
private const string Prefix = "auth_ticket_";
private readonly TimeSpan lifetime = TimeSpan.FromMinutes(options.Value.CookieExpirationInMinutes);
public async Task<string> StoreAsync(AuthenticationTicket ticket)
{
var key = CreateKey();
await RenewAsync(key, ticket);
return key;
}
public Task RenewAsync(string key, AuthenticationTicket ticket)
{
var bytes = serializer.Serialize(ticket);
var opts = new DistributedCacheEntryOptions
{
AbsoluteExpirationRelativeToNow = lifetime,
SlidingExpiration = lifetime
};
return cache.SetAsync(Prefix + key, bytes, opts);
}
public async Task<AuthenticationTicket?> RetrieveAsync(string key)
{
var bytes = await cache.GetAsync(Prefix + key);
return bytes is null ? null : serializer.Deserialize(bytes);
}
public Task RemoveAsync(string key)
=> cache.RemoveAsync(Prefix + key);
// --------------- helpers ----------------------
private static string CreateKey()
{
// 32 random bytes > SHA-256 > Base64-url
Span<byte> rnd = stackalloc byte[32];
RandomNumberGenerator.Fill(rnd);
Span<byte> hash = stackalloc byte[32];
SHA256.HashData(rnd, hash);
return WebEncoders.Base64UrlEncode(hash);
}
}

View File

@@ -0,0 +1,13 @@
using Ablera.Serdica.Authority.Models;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Ablera.Serdica.Authority.Services;
public class RoutesTreeProvider
{
public IReadOnlyList<RouteEntity>? Tree { get; set; }
}

View File

@@ -0,0 +1,268 @@
using Ablera.Serdica.Common.Tools.Extensions;
using Ablera.Serdica.Authority.Contracts;
using Ablera.Serdica.Authority.Models;
using Ablera.Serdica.Authority.Plugins.Base.Contracts;
using Ablera.Serdica.Authority.Plugins.Base.Models;
using Microsoft.AspNetCore.Identity;
using Microsoft.Extensions.Options;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Security.Claims;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.EntityFrameworkCore;
using Ablera.Serdica.DBModels.Serdica;
using Microsoft.Extensions.Logging;
namespace Ablera.Serdica.Authority.Services;
public class UserManagingDirector(
SerdicaDbContext dbContext,
ILogger<UserManagingDirector> logger,
IEnumerable<IUserManagementFacade<IdentityUser<string>>> userManagers,
IOptions<UserManagingDirectorConfig> options)
: IUserManagingDirector<IdentityUser<string>>
{
// --------------------------------------------------------------------
// Configuration taken from appsettings → injected via IOptions
// --------------------------------------------------------------------
private readonly UserManagingDirectorConfig _cfg = options.Value;
// --------------------------------------------------------------------
// Priority table bigger number = stronger / more important error
// --------------------------------------------------------------------
private static readonly IReadOnlyDictionary<string, int> ErrorRank =
new Dictionary<string, int>(StringComparer.OrdinalIgnoreCase)
{
[AuthenticationCode.AccountIsLocked.ToScreamingSnakeCase()] = 400,
[AuthenticationCode.AccountIsNotActive.ToScreamingSnakeCase()] = 300,
[AuthenticationCode.InvalidPassword.ToScreamingSnakeCase()] = 200,
[AuthenticationCode.InvalidCredentials.ToScreamingSnakeCase()] = 200,
[AuthenticationCode.AccountIsNotFound.ToScreamingSnakeCase()] = 100,
[AuthenticationCode.NoAuthBackend.ToScreamingSnakeCase()] = 0
};
// --------------------------------------------------------------------
// Helpers that pick the “stronger” result
// --------------------------------------------------------------------
private static AuthenticationResult Pick(AuthenticationResult? a, AuthenticationResult? b)
{
if (a is null) return b!;
if (b is null) return a;
var ra = ErrorRank.GetValueOrDefault(a.ErrorCode ?? string.Empty, -1);
var rb = ErrorRank.GetValueOrDefault(b.ErrorCode ?? string.Empty, -1);
return rb > ra ? b : a;
}
private static OperationResult Pick(OperationResult? a, OperationResult? b)
{
if (a is null) return b!;
if (b is null) return a;
// Success beats any failure
if (a.Succeeded && !b.Succeeded) return a;
if (b.Succeeded && !a.Succeeded) return b;
// Both success or both failure → use the ranking table
var ra = ErrorRank.GetValueOrDefault(a.ErrorCode ?? string.Empty, -1);
var rb = ErrorRank.GetValueOrDefault(b.ErrorCode ?? string.Empty, -1);
return rb > ra ? b : a;
}
// ====================================================================
// 1. Authentication
// ====================================================================
public async Task<AuthenticationResult> AuthenticateAsync(
IdentityUser<string> user,
string password,
bool lockoutOnFailure = false,
CancellationToken ct = default)
{
if (userManagers.Any() == false)
{
logger.LogWarning("No any backend authorization backend are found. Did you install any plugins?");
return AuthenticationResult.Fail(AuthenticationCode.NoAuthBackend.ToScreamingSnakeCase());
}
AuthenticationResult? aggregate = null;
foreach (var manager in userManagers)
{
var res = await manager.AuthenticateAsync(user, password, lockoutOnFailure, ct);
if (res.Succeeded) // success wins instantly
return res;
aggregate = Pick(aggregate, res); // remember strongest error
if (!_cfg.LoginAnywhere) // only first backend is allowed
break;
}
return aggregate
?? AuthenticationResult.Fail(AuthenticationCode.NoAuthBackend.ToScreamingSnakeCase());
}
// ====================================================================
// 2. WRITE operations (propagation depends on UpdateEveryWhere flag)
// ====================================================================
public Task<OperationResult> CreateAsync(
IdentityUser<string> user,
string password,
CancellationToken ct = default)
=> PropagateAsync(mgr => mgr.CreateAsync(user, password, ct));
public Task<OperationResult> ChangePasswordAsync(
IdentityUser<string> user,
string currentPassword,
string newPassword,
CancellationToken ct = default)
=> PropagateAsync(mgr => mgr.ChangePasswordAsync(user, currentPassword, newPassword, ct));
public Task<OperationResult> ResetPasswordAsync(
IdentityUser<string> user,
string token,
string newPassword,
CancellationToken ct = default)
=> PropagateAsync(mgr => mgr.ResetPasswordAsync(user, token, newPassword, ct));
public Task<OperationResult> UpdateAsync(
IdentityUser<string> user,
CancellationToken ct = default)
=> PropagateAsync(mgr => mgr.UpdateAsync(user, ct));
public Task<OperationResult> LockAsync(
IdentityUser<string> user,
CancellationToken ct = default)
=> PropagateAsync(mgr => mgr.LockAsync(user, ct));
public Task<OperationResult> UnlockAsync(
IdentityUser<string> user,
CancellationToken ct = default)
=> PropagateAsync(mgr => mgr.UnlockAsync(user, ct));
// --------------------------------------------------------------------
// Shared propagator for all write operations
// --------------------------------------------------------------------
private async Task<OperationResult> PropagateAsync(
Func<IUserManagementFacade<IdentityUser<string>>, Task<OperationResult>> call)
{
OperationResult? aggregate = null;
foreach (var mgr in userManagers)
{
var res = await call(mgr);
aggregate = Pick(aggregate, res);
if (!_cfg.UpdateEveryWhere) // stop after first try
break;
if (!res.Succeeded) // stop propagation on first failure
break;
}
return aggregate
?? OperationResult.Fail(AuthenticationCode.NoAuthBackend.ToScreamingSnakeCase());
}
// We seek on first mgr able to login and return the first non-null result.
private async Task<IdentityUser<string>?> FindUserAsync(
Func<IUserManagementFacade<IdentityUser<string>>,
CancellationToken,
Task<IdentityUser<string>?>> finder,
CancellationToken ct)
{
foreach (var mgr in userManagers)
{
var user = await finder(mgr, ct);
if (user is not null)
{
if (string.IsNullOrEmpty(user.Id)) // some backends may not have the Id populated, restore it from the DB
{
if (string.IsNullOrEmpty(user.Email) == false)
{
user.Id = dbContext.UserAccounts.Where(x => x.UserEmail == user.Email).Select(x => x.UserGuid).FirstOrDefault();
}
else if (string.IsNullOrEmpty(user.UserName) == false)
{
user.Id = dbContext.UserAccounts.Where(x => x.UserName == user.UserName).Select(x => x.UserGuid).FirstOrDefault();
}
}
if (string.IsNullOrWhiteSpace(user.Id) == false)
{
return user; // found user with ID, return it
}
}
if (!_cfg.LoginAnywhere) // stop after first backend → “not found”
return null;
}
// searched all backends
return null;
}
// ====================================================================
// 3. READ operations
// ====================================================================
public Task<IdentityUser<string>?> FindByEmailAsync(string email, CancellationToken ct = default)
=> FindUserAsync((mgr, token) => mgr.FindByEmailAsync(email, token), ct);
public Task<IdentityUser<string>?> FindByNameAsync(string username, CancellationToken ct = default)
=> FindUserAsync((mgr, token) => mgr.FindByNameAsync(username, token), ct);
public Task<IdentityUser<string>?> FindByIdAsync(string id, CancellationToken ct = default)
=> FindUserAsync((mgr, token) => mgr.FindByIdAsync(id, token), ct);
// --------------------------------------------------------------------
// Claims aggregation remove duplicates afterwards
// --------------------------------------------------------------------
public async Task<IReadOnlyCollection<Claim>> GetBaseClaimsAsync(
IdentityUser<string> user,
CancellationToken ct = default)
{
var bag = new List<Claim>();
foreach (var mgr in userManagers)
{
var c = await mgr.GetBaseClaimsAsync(user, ct);
if (c.Count > 0) bag.AddRange(c);
if (!_cfg.LoginAnywhere) break;
}
return bag.Distinct(new ClaimComparer()).ToList().AsReadOnly();
}
public async Task<IReadOnlyCollection<Claim>?> GetRolesClaimsAsync(
IdentityUser<string> user,
CancellationToken ct = default)
{
var all = new List<Claim>();
foreach (var mgr in userManagers)
{
var c = await mgr.GetRolesClaimsAsync(user, ct);
if (c != null) all.AddRange(c);
if (!_cfg.LoginAnywhere) break;
}
return all.Distinct(new ClaimComparer()).ToList().AsReadOnly();
}
// --------------------------------------------------------------------
// Claim structural equality helper
// --------------------------------------------------------------------
private sealed class ClaimComparer : IEqualityComparer<Claim>
{
public bool Equals(Claim? x, Claim? y)
=> x?.Type == y?.Type &&
x?.Value == y?.Value &&
x?.ValueType == y?.ValueType;
public int GetHashCode(Claim obj)
=> HashCode.Combine(obj.Type, obj.Value, obj.ValueType);
}
}

View File

@@ -0,0 +1,89 @@
{
"Serilog": {
"Using": [
"Serilog.Sinks.Async",
"Serilog.Sinks.Console"
],
"MinimumLevel": {
"Default": "Information",
"Override": {
"Microsoft": "Information",
"Microsoft.Hosting.Lifetime": "Debug",
"System": "Information"
}
},
"WriteTo": [
{
"Name": "Async",
"Args": {
"configure": [
{
"Name": "Console",
"Args": {}
}
]
}
}
]
},
"RabbitConfig": {
"HostName": "serdica.ablera.dev",
"UserName": "ablera",
"Password": "AblerA2022",
"Port": 5672,
"ParallelConsumersCount": 2,
"ConsumerPrefetchCount": 1,
"Exchange": "authority",
"RequestQueueName": "authority.request"
},
"MicroserviceConfig": {
"SectionName": "Authority",
"ExchangeName": "authority",
"DefaultAllowedRoles": [ "DBA" ],
"DefaultTimeout": "00:00:15"
},
"RedisConfig": {
"ServerUrl": "serdica.ablera.dev:6379",
"Password": "AblerA2022"
},
"ConnectionStrings": {
"DefaultConnection": "DATA SOURCE=(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=db.serdica.ablera.dev)(PORT=1521))(CONNECT_DATA=(SID=orcl1)));USER ID=srd_sys;PASSWORD=srd_sys"
},
"ConnectionSettings": {
"Oracle": {
"KeepAlive": true,
"KeepAliveInterval": 60,
"KeepAliveTime": 10,
"MaxCachedQueries": 200
}
},
"UsersConfigurationSettings": {
"AuthorizationTokenDurationInMinutes": 6,
"CacheKey": "users-configuration",
"MinimumAutoLogoutMinutes": 5,
"MaximumAutoLogoutMinutes": 43000,
"DefaultAutoLoginInSeconds": null,
"IsAutoLogoutEnabled": true,
"DefaultMainOfficeCode": "0200",
"DefaultCountry": "BG",
"DefaultLanguage": "BG"
},
"UserManagingDirectorConfig": {
"LoginAnywhere": true,
"UpdateEveryWhere": false
},
"SerdicaConfig": {
"TrustedNetworks": [
"127.0.0.1/8",
"10.0.0.0/8",
"172.16.0.0/12"
]
},
"PluginsConfig": {
"PluginsDirectory": "PluginBinaries",
"PluginsOrder": [ "Ablera.Serdica.Authority.Plugin.Ldap", "Ablera.Serdica.Authority.Plugin.Bulstrad", "Ablera.Serdica.Authority.Plugin.Standard" ]
},
"FileServerConfig": {
"RootPathPrefixForWWW": ""
}
}

Some files were not shown because too many files have changed in this diff Show More