feat: Initialize Zastava Webhook service with TLS and Authority authentication
- Added Program.cs to set up the web application with Serilog for logging, health check endpoints, and a placeholder admission endpoint. - Configured Kestrel server to use TLS 1.3 and handle client certificates appropriately. - Created StellaOps.Zastava.Webhook.csproj with necessary dependencies including Serilog and Polly. - Documented tasks in TASKS.md for the Zastava Webhook project, outlining current work and exit criteria for each task.
This commit is contained in:
@@ -85,6 +85,29 @@ jobs:
|
||||
--logger "trx;LogFileName=stellaops-feedser-tests.trx" \
|
||||
--results-directory "$TEST_RESULTS_DIR"
|
||||
|
||||
- name: Build scanner language analyzer projects
|
||||
run: |
|
||||
dotnet restore src/StellaOps.sln
|
||||
for project in \
|
||||
src/StellaOps.Scanner.Analyzers.Lang/StellaOps.Scanner.Analyzers.Lang.csproj \
|
||||
src/StellaOps.Scanner.Analyzers.Lang.Java/StellaOps.Scanner.Analyzers.Lang.Java.csproj \
|
||||
src/StellaOps.Scanner.Analyzers.Lang.Node/StellaOps.Scanner.Analyzers.Lang.Node.csproj \
|
||||
src/StellaOps.Scanner.Analyzers.Lang.Python/StellaOps.Scanner.Analyzers.Lang.Python.csproj \
|
||||
src/StellaOps.Scanner.Analyzers.Lang.Go/StellaOps.Scanner.Analyzers.Lang.Go.csproj \
|
||||
src/StellaOps.Scanner.Analyzers.Lang.DotNet/StellaOps.Scanner.Analyzers.Lang.DotNet.csproj \
|
||||
src/StellaOps.Scanner.Analyzers.Lang.Rust/StellaOps.Scanner.Analyzers.Lang.Rust.csproj
|
||||
do
|
||||
dotnet build "$project" --configuration $BUILD_CONFIGURATION --no-restore -warnaserror
|
||||
done
|
||||
|
||||
- name: Run scanner language analyzer tests
|
||||
run: |
|
||||
dotnet test src/StellaOps.Scanner.Analyzers.Lang.Tests/StellaOps.Scanner.Analyzers.Lang.Tests.csproj \
|
||||
--configuration $BUILD_CONFIGURATION \
|
||||
--no-build \
|
||||
--logger "trx;LogFileName=stellaops-scanner-lang-tests.trx" \
|
||||
--results-directory "$TEST_RESULTS_DIR"
|
||||
|
||||
- name: Publish BuildX SBOM generator
|
||||
run: |
|
||||
dotnet publish src/StellaOps.Scanner.Sbomer.BuildXPlugin/StellaOps.Scanner.Sbomer.BuildXPlugin.csproj \
|
||||
@@ -152,6 +175,25 @@ PY
|
||||
if-no-files-found: error
|
||||
retention-days: 7
|
||||
|
||||
- name: Package OS analyzer plug-ins
|
||||
run: |
|
||||
if [ ! -d "plugins/scanner/analyzers/os" ]; then
|
||||
echo "OS analyzer plug-in directory not found" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p artifacts/plugins/os
|
||||
tar -czf artifacts/plugins/os/stellaops-scanner-os-analyzers.tar.gz -C plugins/scanner/analyzers/os .
|
||||
sha256sum artifacts/plugins/os/stellaops-scanner-os-analyzers.tar.gz > artifacts/plugins/os/stellaops-scanner-os-analyzers.tar.gz.sha256
|
||||
|
||||
- name: Upload OS analyzer plug-ins
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: scanner-os-analyzers
|
||||
path: artifacts/plugins/os
|
||||
if-no-files-found: error
|
||||
retention-days: 7
|
||||
|
||||
- name: Publish Feedser web service
|
||||
run: |
|
||||
mkdir -p "$PUBLISH_DIR"
|
||||
@@ -224,7 +266,7 @@ PY
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
DOCS_OUTPUT_DIR: ${{ github.workspace }}/artifacts/docs-site
|
||||
steps:
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
@@ -246,18 +288,100 @@ PY
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: feedser-docs-site
|
||||
path: ${{ env.DOCS_OUTPUT_DIR }}
|
||||
if-no-files-found: error
|
||||
retention-days: 7
|
||||
|
||||
deploy:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [build-test, docs]
|
||||
if: >-
|
||||
needs.build-test.result == 'success' &&
|
||||
needs.docs.result == 'success' &&
|
||||
(
|
||||
(github.event_name == 'push' && github.ref == 'refs/heads/main') ||
|
||||
path: ${{ env.DOCS_OUTPUT_DIR }}
|
||||
if-no-files-found: error
|
||||
retention-days: 7
|
||||
|
||||
scanner-perf:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: build-test
|
||||
env:
|
||||
BENCH_DIR: bench/Scanner.Analyzers
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
|
||||
- name: Run analyzer microbench
|
||||
working-directory: ${{ env.BENCH_DIR }}
|
||||
run: |
|
||||
node run-bench.js \
|
||||
--repo-root "${{ github.workspace }}" \
|
||||
--out latest.csv \
|
||||
--threshold-ms 5000
|
||||
|
||||
- name: Compare against baseline
|
||||
working-directory: ${{ env.BENCH_DIR }}
|
||||
run: |
|
||||
node - <<'NODE'
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
function parseCsv(file) {
|
||||
const rows = fs.readFileSync(file, 'utf8').trim().split(/\r?\n/);
|
||||
rows.shift();
|
||||
const data = {};
|
||||
for (const row of rows) {
|
||||
const [id, iterations, sampleCount, mean, p95, max] = row.split(',');
|
||||
data[id] = {
|
||||
iterations: Number(iterations),
|
||||
sampleCount: Number(sampleCount),
|
||||
mean: Number(mean),
|
||||
p95: Number(p95),
|
||||
max: Number(max),
|
||||
};
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
const baseline = parseCsv('baseline.csv');
|
||||
const latest = parseCsv('latest.csv');
|
||||
const allowedMultiplier = 1.20;
|
||||
const regressions = [];
|
||||
|
||||
for (const [id, baseMetrics] of Object.entries(baseline)) {
|
||||
const current = latest[id];
|
||||
if (!current) {
|
||||
regressions.push(`Scenario ${id} missing from latest run`);
|
||||
continue;
|
||||
}
|
||||
if (current.mean > baseMetrics.mean * allowedMultiplier) {
|
||||
regressions.push(`Scenario ${id} mean ${current.mean.toFixed(2)}ms exceeded baseline ${baseMetrics.mean.toFixed(2)}ms by >20%`);
|
||||
}
|
||||
if (current.max > baseMetrics.max * allowedMultiplier) {
|
||||
regressions.push(`Scenario ${id} max ${current.max.toFixed(2)}ms exceeded baseline ${baseMetrics.max.toFixed(2)}ms by >20%`);
|
||||
}
|
||||
}
|
||||
|
||||
if (regressions.length > 0) {
|
||||
console.error('Performance regression detected:');
|
||||
for (const msg of regressions) {
|
||||
console.error(` - ${msg}`);
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
NODE
|
||||
|
||||
- name: Upload bench report
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: scanner-analyzers-bench
|
||||
path: ${{ env.BENCH_DIR }}/latest.csv
|
||||
retention-days: 7
|
||||
|
||||
deploy:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [build-test, docs, scanner-perf]
|
||||
if: >-
|
||||
needs.build-test.result == 'success' &&
|
||||
needs.docs.result == 'success' &&
|
||||
needs.scanner-perf.result == 'success' &&
|
||||
(
|
||||
(github.event_name == 'push' && github.ref == 'refs/heads/main') ||
|
||||
github.event_name == 'workflow_dispatch'
|
||||
)
|
||||
environment: staging
|
||||
|
||||
@@ -49,9 +49,19 @@ jobs:
|
||||
|
||||
- name: Validate event schemas
|
||||
run: |
|
||||
set -euo pipefail
|
||||
for schema in docs/events/*.json; do
|
||||
npx ajv compile -c ajv-formats -s "$schema"
|
||||
done
|
||||
for sample in docs/events/samples/*.json; do
|
||||
schema_name=$(basename "$sample" .sample.json)
|
||||
schema_path="docs/events/${schema_name}.json"
|
||||
if [ ! -f "$schema_path" ]; then
|
||||
echo "Missing schema for sample ${sample}" >&2
|
||||
exit 1
|
||||
fi
|
||||
npx ajv validate -c ajv-formats -s "$schema_path" -d "$sample"
|
||||
done
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
|
||||
Reference in New Issue
Block a user