CD/CD consolidation

This commit is contained in:
StellaOps Bot
2025-12-26 17:32:23 +02:00
parent a866eb6277
commit c786faae84
638 changed files with 3821 additions and 181 deletions

View File

@@ -1,30 +0,0 @@
openapi: 3.1.0
info:
title: Demo API
version: 1.1.0
paths:
/foo:
get:
parameters:
- in: query
name: tenant
required: true
responses:
"201":
description: created
/bar:
get:
responses:
"200":
description: ok
/baz:
post:
requestBody:
required: true
content:
application/json:
schema:
type: object
responses:
"201":
description: created

View File

@@ -1,29 +0,0 @@
openapi: 3.1.0
info:
title: Demo API
version: 1.0.0
paths:
/foo:
get:
parameters:
- in: query
name: filter
required: false
responses:
"200":
description: ok
content:
application/json:
schema:
type: string
/baz:
post:
requestBody:
required: false
content:
application/json:
schema:
type: object
responses:
"201":
description: created

View File

@@ -1,110 +0,0 @@
#!/usr/bin/env python3
"""
Add BLOCKED dependency tree reference to all sprint files.
"""
import os
import re
from pathlib import Path
DOCS_DIR = Path(__file__).parent.parent / "docs"
IMPLPLAN_DIR = DOCS_DIR / "implplan"
ROUTER_DIR = DOCS_DIR / "router"
# Reference lines with correct relative paths
REFERENCE_LINE_IMPLPLAN = "\n> **BLOCKED Tasks:** Before working on BLOCKED tasks, review [BLOCKED_DEPENDENCY_TREE.md](./BLOCKED_DEPENDENCY_TREE.md) for root blockers and dependencies.\n"
REFERENCE_LINE_ROUTER = "\n> **BLOCKED Tasks:** Before working on BLOCKED tasks, review [../implplan/BLOCKED_DEPENDENCY_TREE.md](../implplan/BLOCKED_DEPENDENCY_TREE.md) for root blockers and dependencies.\n"
def add_reference_to_sprint(filepath: Path, reference_line: str) -> bool:
"""Add BLOCKED reference to a sprint file. Returns True if modified."""
content = filepath.read_text(encoding="utf-8")
# Skip if reference already exists
if "BLOCKED_DEPENDENCY_TREE.md" in content:
return False
# Find the best insertion point
# Priority 1: After "## Documentation Prerequisites" section (before next ##)
# Priority 2: After "## Dependencies & Concurrency" section
# Priority 3: After the first line (title)
lines = content.split("\n")
insert_index = None
# Look for Documentation Prerequisites section
for i, line in enumerate(lines):
if line.strip().startswith("## Documentation Prerequisites"):
# Find the next section header or end of list
for j in range(i + 1, len(lines)):
if lines[j].strip().startswith("## "):
insert_index = j
break
elif lines[j].strip() == "" and j + 1 < len(lines) and lines[j + 1].strip().startswith("## "):
insert_index = j + 1
break
if insert_index is None:
# No next section found, insert after last non-empty line in prerequisites
for j in range(i + 1, len(lines)):
if lines[j].strip().startswith("## "):
insert_index = j
break
break
# Fallback: after Dependencies & Concurrency
if insert_index is None:
for i, line in enumerate(lines):
if line.strip().startswith("## Dependencies"):
for j in range(i + 1, len(lines)):
if lines[j].strip().startswith("## "):
insert_index = j
break
break
# Fallback: after first heading
if insert_index is None:
for i, line in enumerate(lines):
if line.strip().startswith("# "):
insert_index = i + 2 # After title and blank line
break
# Final fallback: beginning of file
if insert_index is None:
insert_index = 1
# Insert the reference
new_lines = lines[:insert_index] + [reference_line.strip(), ""] + lines[insert_index:]
new_content = "\n".join(new_lines)
filepath.write_text(new_content, encoding="utf-8")
return True
def main():
modified = 0
skipped = 0
# Process implplan directory
print("Processing docs/implplan...")
for filepath in sorted(IMPLPLAN_DIR.glob("SPRINT_*.md")):
if add_reference_to_sprint(filepath, REFERENCE_LINE_IMPLPLAN):
print(f"Modified: {filepath.name}")
modified += 1
else:
print(f"Skipped: {filepath.name}")
skipped += 1
# Process router directory
print("\nProcessing docs/router...")
for filepath in sorted(ROUTER_DIR.glob("SPRINT_*.md")):
if add_reference_to_sprint(filepath, REFERENCE_LINE_ROUTER):
print(f"Modified: {filepath.name}")
modified += 1
else:
print(f"Skipped: {filepath.name}")
skipped += 1
print(f"\nSummary: {modified} files modified, {skipped} files skipped")
if __name__ == "__main__":
main()

View File

@@ -1,32 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Minimal verifier sample for AIRGAP-VERIFY-510-014. Adjust paths to your kit.
KIT_ROOT=${1:-./offline}
MANIFEST="$KIT_ROOT/manifest.json"
SIG="$KIT_ROOT/manifest.dsse"
echo "[*] Verifying manifest signature..."
cosign verify-blob --key trust-roots/manifest.pub --signature "$SIG" "$MANIFEST"
echo "[*] Checking chunk hashes..."
python - <<'PY'
import json, hashlib, sys, os
manifest_path=os.environ.get('MANIFEST') or sys.argv[1]
with open(manifest_path) as f:
data=json.load(f)
ok=True
for entry in data.get('chunks', []):
path=os.path.join(os.path.dirname(manifest_path), entry['path'])
h=hashlib.sha256()
with open(path,'rb') as fh:
h.update(fh.read())
if h.hexdigest()!=entry['sha256']:
ok=False
print(f"HASH MISMATCH {entry['path']}")
if not ok:
sys.exit(4)
PY
echo "[*] Done."

View File

@@ -1,129 +0,0 @@
#!/usr/bin/env node
import fs from 'node:fs';
import path from 'node:path';
import crypto from 'node:crypto';
import yaml from 'yaml';
const ROOT = path.resolve('src/Api/StellaOps.Api.OpenApi');
const BASELINE = path.join(ROOT, 'baselines', 'stella-baseline.yaml');
const CURRENT = path.join(ROOT, 'stella.yaml');
const OUTPUT = path.join(ROOT, 'CHANGELOG.md');
const RELEASE_OUT = path.resolve('src/Sdk/StellaOps.Sdk.Release/out/api-changelog');
function panic(message) {
console.error(`[api:changelog] ${message}`);
process.exit(1);
}
function loadSpec(file) {
if (!fs.existsSync(file)) {
panic(`Spec not found: ${file}`);
}
return yaml.parse(fs.readFileSync(file, 'utf8'));
}
function enumerateOps(spec) {
const ops = new Map();
for (const [route, methods] of Object.entries(spec.paths || {})) {
for (const [method, operation] of Object.entries(methods || {})) {
const lower = method.toLowerCase();
if (!['get','post','put','delete','patch','head','options','trace'].includes(lower)) continue;
const id = `${lower.toUpperCase()} ${route}`;
ops.set(id, operation || {});
}
}
return ops;
}
function diffSpecs(oldSpec, newSpec) {
const oldOps = enumerateOps(oldSpec);
const newOps = enumerateOps(newSpec);
const additive = [];
const breaking = [];
for (const id of newOps.keys()) {
if (!oldOps.has(id)) {
additive.push(id);
}
}
for (const id of oldOps.keys()) {
if (!newOps.has(id)) {
breaking.push(id);
}
}
return { additive: additive.sort(), breaking: breaking.sort() };
}
function renderMarkdown(diff) {
const lines = [];
lines.push('# API Changelog');
lines.push('');
const date = new Date().toISOString();
lines.push(`Generated: ${date}`);
lines.push('');
lines.push('## Additive Operations');
if (diff.additive.length === 0) {
lines.push('- None');
} else {
diff.additive.forEach((op) => lines.push(`- ${op}`));
}
lines.push('');
lines.push('## Breaking Operations');
if (diff.breaking.length === 0) {
lines.push('- None');
} else {
diff.breaking.forEach((op) => lines.push(`- ${op}`));
}
lines.push('');
return lines.join('\n');
}
function ensureReleaseDir() {
fs.mkdirSync(RELEASE_OUT, { recursive: true });
}
function sha256(content) {
return crypto.createHash('sha256').update(content).digest('hex');
}
function signDigest(digest) {
const key = process.env.API_CHANGELOG_SIGNING_KEY;
if (!key) {
return null;
}
const hmac = crypto.createHmac('sha256', Buffer.from(key, 'utf8'));
hmac.update(digest);
return hmac.digest('hex');
}
function main() {
if (!fs.existsSync(BASELINE)) {
console.log('[api:changelog] baseline missing; skipping');
return;
}
const diff = diffSpecs(loadSpec(BASELINE), loadSpec(CURRENT));
const markdown = renderMarkdown(diff);
fs.writeFileSync(OUTPUT, markdown, 'utf8');
console.log(`[api:changelog] wrote changelog to ${OUTPUT}`);
ensureReleaseDir();
const releaseChangelog = path.join(RELEASE_OUT, 'CHANGELOG.md');
fs.writeFileSync(releaseChangelog, markdown, 'utf8');
const digest = sha256(markdown);
const digestFile = path.join(RELEASE_OUT, 'CHANGELOG.sha256');
fs.writeFileSync(digestFile, `${digest} CHANGELOG.md\n`, 'utf8');
const signature = signDigest(digest);
if (signature) {
fs.writeFileSync(path.join(RELEASE_OUT, 'CHANGELOG.sig'), signature, 'utf8');
console.log('[api:changelog] wrote signature for release artifact');
} else {
console.log('[api:changelog] signature skipped (API_CHANGELOG_SIGNING_KEY not set)');
}
console.log(`[api:changelog] copied changelog + digest to ${RELEASE_OUT}`);
}
main();

View File

@@ -1,104 +0,0 @@
#!/usr/bin/env node
/**
* Generate a Markdown changelog from two OpenAPI specs using the api-compat-diff tool.
*
* Usage:
* node scripts/api-compat-changelog.mjs <oldSpec> <newSpec> [--title "Release X"] [--fail-on-breaking]
*
* Output is written to stdout.
*/
import { execFileSync } from 'child_process';
import process from 'process';
import path from 'path';
function panic(message) {
console.error(`[api-compat-changelog] ${message}`);
process.exit(1);
}
function parseArgs(argv) {
const args = argv.slice(2);
if (args.length < 2) {
panic('Usage: node scripts/api-compat-changelog.mjs <oldSpec> <newSpec> [--title "Release X"] [--fail-on-breaking]');
}
const opts = {
oldSpec: args[0],
newSpec: args[1],
title: 'API Compatibility Report',
failOnBreaking: false,
};
for (let i = 2; i < args.length; i += 1) {
const arg = args[i];
if (arg === '--title' && args[i + 1]) {
opts.title = args[i + 1];
i += 1;
} else if (arg === '--fail-on-breaking') {
opts.failOnBreaking = true;
}
}
return opts;
}
function runCompatDiff(oldSpec, newSpec) {
const output = execFileSync(
'node',
['scripts/api-compat-diff.mjs', oldSpec, newSpec, '--output', 'json'],
{ encoding: 'utf8' }
);
return JSON.parse(output);
}
function formatList(items, symbol) {
if (!items || items.length === 0) {
return `${symbol} None`;
}
return items.map((item) => `${symbol} ${item}`).join('\n');
}
function renderMarkdown(title, diff, oldSpec, newSpec) {
return [
`# ${title}`,
'',
`- Old spec: \`${path.relative(process.cwd(), oldSpec)}\``,
`- New spec: \`${path.relative(process.cwd(), newSpec)}\``,
'',
'## Summary',
`- Additive operations: ${diff.additive.operations.length}`,
`- Breaking operations: ${diff.breaking.operations.length}`,
`- Additive responses: ${diff.additive.responses.length}`,
`- Breaking responses: ${diff.breaking.responses.length}`,
'',
'## Additive',
'### Operations',
formatList(diff.additive.operations, '-'),
'',
'### Responses',
formatList(diff.additive.responses, '-'),
'',
'## Breaking',
'### Operations',
formatList(diff.breaking.operations, '-'),
'',
'### Responses',
formatList(diff.breaking.responses, '-'),
'',
].join('\n');
}
function main() {
const opts = parseArgs(process.argv);
const diff = runCompatDiff(opts.oldSpec, opts.newSpec);
const markdown = renderMarkdown(opts.title, diff, opts.oldSpec, opts.newSpec);
console.log(markdown);
if (opts.failOnBreaking && (diff.breaking.operations.length > 0 || diff.breaking.responses.length > 0)) {
process.exit(2);
}
}
if (import.meta.url === `file://${process.argv[1]}`) {
main();
}

View File

@@ -1,26 +0,0 @@
import assert from 'assert';
import { fileURLToPath } from 'url';
import path from 'path';
import { execFileSync } from 'child_process';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const root = path.join(__dirname, '..');
const fixturesDir = path.join(root, 'scripts', '__fixtures__', 'api-compat');
const oldSpec = path.join(fixturesDir, 'old.yaml');
const newSpec = path.join(fixturesDir, 'new.yaml');
const output = execFileSync('node', ['scripts/api-compat-changelog.mjs', oldSpec, newSpec, '--title', 'Test Report'], {
cwd: root,
encoding: 'utf8',
});
assert(output.includes('# Test Report'));
assert(output.includes('Additive operations: 1'));
assert(output.includes('Breaking operations: 0'));
assert(output.includes('- get /bar'));
assert(output.includes('- get /foo -> 201'));
assert(output.includes('- get /foo -> 200'));
console.log('api-compat-changelog test passed');

View File

@@ -1,359 +0,0 @@
#!/usr/bin/env node
/**
* API compatibility diff tool
* Compares two OpenAPI 3.x specs (YAML or JSON) and reports additive vs breaking changes.
*
* Usage:
* node scripts/api-compat-diff.mjs <oldSpec> <newSpec> [--output json|text] [--fail-on-breaking]
*
* Output (text):
* - Added/removed operations
* - Added/removed responses
* - Parameter additions/removals/requiredness changes
* - Response content-type additions/removals
* - Request body additions/removals/requiredness and content-type changes
*
* Output (json):
* {
* additive: { operations, responses, parameters, responseContentTypes, requestBodies },
* breaking: { operations, responses, parameters, responseContentTypes, requestBodies }
* }
*
* Exit codes:
* 0 => success
* 1 => invalid/missing args or IO/parsing error
* 2 => breaking changes detected with --fail-on-breaking
*/
import fs from 'fs';
import path from 'path';
import process from 'process';
import yaml from 'yaml';
function panic(message) {
console.error(`[api-compat-diff] ${message}`);
process.exit(1);
}
function parseArgs(argv) {
const args = argv.slice(2);
const opts = { output: 'text', failOnBreaking: false };
if (args.length < 2) {
panic('Usage: node scripts/api-compat-diff.mjs <oldSpec> <newSpec> [--output json|text] [--fail-on-breaking]');
}
[opts.oldSpec, opts.newSpec] = args.slice(0, 2);
for (let i = 2; i < args.length; i += 1) {
const arg = args[i];
if (arg === '--output' && args[i + 1]) {
opts.output = args[i + 1].toLowerCase();
i += 1;
} else if (arg === '--fail-on-breaking') {
opts.failOnBreaking = true;
}
}
if (!['text', 'json'].includes(opts.output)) {
panic(`Unsupported output mode: ${opts.output}`);
}
return opts;
}
function loadSpec(specPath) {
if (!fs.existsSync(specPath)) {
panic(`Spec not found: ${specPath}`);
}
const raw = fs.readFileSync(specPath, 'utf8');
const ext = path.extname(specPath).toLowerCase();
try {
if (ext === '.json') {
return JSON.parse(raw);
}
return yaml.parse(raw);
} catch (err) {
panic(`Failed to parse ${specPath}: ${err.message}`);
}
}
function normalizeParams(params) {
const map = new Map();
if (!Array.isArray(params)) return map;
for (const param of params) {
if (!param || typeof param !== 'object') continue;
if (param.$ref) {
map.set(`ref:${param.$ref}`, { required: param.required === true, isRef: true });
continue;
}
const name = param.name;
const loc = param.in;
if (!name || !loc) continue;
const key = `${name}:${loc}`;
map.set(key, { required: param.required === true, isRef: false });
}
return map;
}
function describeParam(key, requiredFlag) {
if (key.startsWith('ref:')) {
return key.replace(/^ref:/, '');
}
const [name, loc] = key.split(':');
const requiredLabel = requiredFlag ? ' (required)' : '';
return `${name} in ${loc}${requiredLabel}`;
}
function enumerateOperations(spec) {
const ops = new Map();
if (!spec?.paths || typeof spec.paths !== 'object') {
return ops;
}
for (const [pathKey, pathItem] of Object.entries(spec.paths)) {
if (!pathItem || typeof pathItem !== 'object') {
continue;
}
const pathParams = normalizeParams(pathItem.parameters ?? []);
for (const method of Object.keys(pathItem)) {
const lowerMethod = method.toLowerCase();
if (!['get', 'put', 'post', 'delete', 'patch', 'head', 'options', 'trace'].includes(lowerMethod)) {
continue;
}
const op = pathItem[method];
if (!op || typeof op !== 'object') {
continue;
}
const opId = `${lowerMethod} ${pathKey}`;
const opParams = normalizeParams(op.parameters ?? []);
const parameters = new Map(pathParams);
for (const [key, val] of opParams.entries()) {
parameters.set(key, val);
}
const responseContentTypes = new Map();
const responses = new Set();
const responseEntries = Object.entries(op.responses ?? {});
for (const [code, resp] of responseEntries) {
responses.add(code);
const contentTypes = new Set(Object.keys(resp?.content ?? {}));
responseContentTypes.set(code, contentTypes);
}
const requestBody = op.requestBody
? {
present: true,
required: op.requestBody.required === true,
contentTypes: new Set(Object.keys(op.requestBody.content ?? {})),
}
: { present: false, required: false, contentTypes: new Set() };
ops.set(opId, {
method: lowerMethod,
path: pathKey,
responses,
responseContentTypes,
parameters,
requestBody,
});
}
}
return ops;
}
function diffOperations(oldOps, newOps) {
const additiveOps = [];
const breakingOps = [];
const additiveResponses = [];
const breakingResponses = [];
const additiveParams = [];
const breakingParams = [];
const additiveResponseContentTypes = [];
const breakingResponseContentTypes = [];
const additiveRequestBodies = [];
const breakingRequestBodies = [];
// Operations added or removed
for (const [id] of newOps.entries()) {
if (!oldOps.has(id)) {
additiveOps.push(id);
}
}
for (const [id] of oldOps.entries()) {
if (!newOps.has(id)) {
breakingOps.push(id);
}
}
// Response- and parameter-level diffs for shared operations
for (const [id, newOp] of newOps.entries()) {
if (!oldOps.has(id)) continue;
const oldOp = oldOps.get(id);
for (const code of newOp.responses) {
if (!oldOp.responses.has(code)) {
additiveResponses.push(`${id} -> ${code}`);
}
}
for (const code of oldOp.responses) {
if (!newOp.responses.has(code)) {
breakingResponses.push(`${id} -> ${code}`);
}
}
for (const code of newOp.responses) {
if (!oldOp.responses.has(code)) continue;
const oldTypes = oldOp.responseContentTypes.get(code) ?? new Set();
const newTypes = newOp.responseContentTypes.get(code) ?? new Set();
for (const ct of newTypes) {
if (!oldTypes.has(ct)) {
additiveResponseContentTypes.push(`${id} -> ${code} (${ct})`);
}
}
for (const ct of oldTypes) {
if (!newTypes.has(ct)) {
breakingResponseContentTypes.push(`${id} -> ${code} (${ct})`);
}
}
}
for (const [key, oldParam] of oldOp.parameters.entries()) {
if (!newOp.parameters.has(key)) {
breakingParams.push(`${id} -> - parameter ${describeParam(key, oldParam.required)}`);
}
}
for (const [key, newParam] of newOp.parameters.entries()) {
if (!oldOp.parameters.has(key)) {
const target = newParam.required ? breakingParams : additiveParams;
target.push(`${id} -> + parameter ${describeParam(key, newParam.required)}`);
continue;
}
const oldParam = oldOp.parameters.get(key);
if (oldParam.required !== newParam.required) {
if (newParam.required) {
breakingParams.push(`${id} -> parameter ${describeParam(key)} made required`);
} else {
additiveParams.push(`${id} -> parameter ${describeParam(key)} made optional`);
}
}
}
const { requestBody: oldBody } = oldOp;
const { requestBody: newBody } = newOp;
if (oldBody.present && !newBody.present) {
breakingRequestBodies.push(`${id} -> - requestBody`);
} else if (!oldBody.present && newBody.present) {
const target = newBody.required ? breakingRequestBodies : additiveRequestBodies;
const label = newBody.required ? 'required' : 'optional';
target.push(`${id} -> + requestBody (${label})`);
} else if (oldBody.present && newBody.present) {
if (oldBody.required !== newBody.required) {
if (newBody.required) {
breakingRequestBodies.push(`${id} -> requestBody made required`);
} else {
additiveRequestBodies.push(`${id} -> requestBody made optional`);
}
}
for (const ct of newBody.contentTypes) {
if (!oldBody.contentTypes.has(ct)) {
additiveRequestBodies.push(`${id} -> requestBody content-type added: ${ct}`);
}
}
for (const ct of oldBody.contentTypes) {
if (!newBody.contentTypes.has(ct)) {
breakingRequestBodies.push(`${id} -> requestBody content-type removed: ${ct}`);
}
}
}
}
return {
additive: {
operations: additiveOps.sort(),
responses: additiveResponses.sort(),
parameters: additiveParams.sort(),
responseContentTypes: additiveResponseContentTypes.sort(),
requestBodies: additiveRequestBodies.sort(),
},
breaking: {
operations: breakingOps.sort(),
responses: breakingResponses.sort(),
parameters: breakingParams.sort(),
responseContentTypes: breakingResponseContentTypes.sort(),
requestBodies: breakingRequestBodies.sort(),
},
};
}
function renderText(diff) {
const lines = [];
lines.push('Additive:');
lines.push(` Operations: ${diff.additive.operations.length}`);
diff.additive.operations.forEach((op) => lines.push(` + ${op}`));
lines.push(` Responses: ${diff.additive.responses.length}`);
diff.additive.responses.forEach((resp) => lines.push(` + ${resp}`));
lines.push(` Parameters: ${diff.additive.parameters.length}`);
diff.additive.parameters.forEach((param) => lines.push(` + ${param}`));
lines.push(` Response content-types: ${diff.additive.responseContentTypes.length}`);
diff.additive.responseContentTypes.forEach((ct) => lines.push(` + ${ct}`));
lines.push(` Request bodies: ${diff.additive.requestBodies.length}`);
diff.additive.requestBodies.forEach((rb) => lines.push(` + ${rb}`));
lines.push('Breaking:');
lines.push(` Operations: ${diff.breaking.operations.length}`);
diff.breaking.operations.forEach((op) => lines.push(` - ${op}`));
lines.push(` Responses: ${diff.breaking.responses.length}`);
diff.breaking.responses.forEach((resp) => lines.push(` - ${resp}`));
lines.push(` Parameters: ${diff.breaking.parameters.length}`);
diff.breaking.parameters.forEach((param) => lines.push(` - ${param}`));
lines.push(` Response content-types: ${diff.breaking.responseContentTypes.length}`);
diff.breaking.responseContentTypes.forEach((ct) => lines.push(` - ${ct}`));
lines.push(` Request bodies: ${diff.breaking.requestBodies.length}`);
diff.breaking.requestBodies.forEach((rb) => lines.push(` - ${rb}`));
return lines.join('\n');
}
function main() {
const opts = parseArgs(process.argv);
const oldSpec = loadSpec(opts.oldSpec);
const newSpec = loadSpec(opts.newSpec);
const diff = diffOperations(enumerateOperations(oldSpec), enumerateOperations(newSpec));
if (opts.output === 'json') {
console.log(JSON.stringify(diff, null, 2));
} else {
console.log(renderText(diff));
}
if (opts.failOnBreaking && (
diff.breaking.operations.length > 0
|| diff.breaking.responses.length > 0
|| diff.breaking.parameters.length > 0
|| diff.breaking.responseContentTypes.length > 0
|| diff.breaking.requestBodies.length > 0
)) {
process.exit(2);
}
}
if (import.meta.url === `file://${process.argv[1]}`) {
main();
}

View File

@@ -1,34 +0,0 @@
import assert from 'assert';
import { fileURLToPath } from 'url';
import path from 'path';
import { execFileSync } from 'child_process';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const fixturesDir = path.join(__dirname, '__fixtures__', 'api-compat');
const oldSpec = path.join(fixturesDir, 'old.yaml');
const newSpec = path.join(fixturesDir, 'new.yaml');
const output = execFileSync('node', ['scripts/api-compat-diff.mjs', oldSpec, newSpec, '--output', 'json'], {
cwd: path.join(__dirname, '..'),
encoding: 'utf8',
});
const diff = JSON.parse(output);
assert.deepStrictEqual(diff.additive.operations, ['get /bar']);
assert.deepStrictEqual(diff.breaking.operations, []);
assert.deepStrictEqual(diff.additive.responses, ['get /foo -> 201']);
assert.deepStrictEqual(diff.breaking.responses, ['get /foo -> 200']);
assert.deepStrictEqual(diff.additive.parameters, []);
assert.deepStrictEqual(diff.breaking.parameters, [
'get /foo -> + parameter tenant in query (required)',
'get /foo -> - parameter filter in query',
]);
assert.deepStrictEqual(diff.additive.requestBodies, []);
assert.deepStrictEqual(diff.breaking.requestBodies, ['post /baz -> requestBody made required']);
assert.deepStrictEqual(diff.additive.responseContentTypes, []);
assert.deepStrictEqual(diff.breaking.responseContentTypes, []);
console.log('api-compat-diff test passed');

View File

@@ -1,139 +0,0 @@
#!/usr/bin/env node
// Verifies every OpenAPI operation has at least one request example and one response example.
import fs from 'node:fs';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
import { parse } from 'yaml';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const ROOT = path.resolve(__dirname, '..');
const OAS_ROOT = path.join(ROOT, 'src', 'Api', 'StellaOps.Api.OpenApi');
async function main() {
if (!fs.existsSync(OAS_ROOT)) {
console.log('[api:examples] no OpenAPI directory found; skipping');
return;
}
const files = await findYamlFiles(OAS_ROOT);
if (files.length === 0) {
console.log('[api:examples] no OpenAPI files found; skipping');
return;
}
const failures = [];
for (const relative of files) {
const fullPath = path.join(OAS_ROOT, relative);
const content = fs.readFileSync(fullPath, 'utf8');
let doc;
try {
doc = parse(content, { prettyErrors: true });
} catch (err) {
failures.push({ file: relative, path: '', method: '', reason: `YAML parse error: ${err.message}` });
continue;
}
const paths = doc?.paths || {};
for (const [route, methods] of Object.entries(paths)) {
for (const [method, operation] of Object.entries(methods || {})) {
if (!isHttpMethod(method)) continue;
const hasRequestExample = operation?.requestBody ? hasExample(operation.requestBody) : true;
const hasResponseExample = Object.values(operation?.responses || {}).some(resp => hasExample(resp));
if (!hasRequestExample || !hasResponseExample) {
const missing = [];
if (!hasRequestExample) missing.push('request');
if (!hasResponseExample) missing.push('response');
failures.push({ file: relative, path: route, method, reason: `missing ${missing.join(' & ')} example` });
}
}
}
}
if (failures.length > 0) {
console.error('[api:examples] found operations without examples:');
for (const f of failures) {
const locus = [f.file, f.path, f.method.toUpperCase()].filter(Boolean).join(' ');
console.error(` - ${locus}: ${f.reason}`);
}
process.exit(1);
}
console.log('[api:examples] all operations contain request and response examples');
}
async function findYamlFiles(root) {
const results = [];
async function walk(dir) {
const entries = await fs.promises.readdir(dir, { withFileTypes: true });
for (const entry of entries) {
const full = path.join(dir, entry.name);
if (entry.isDirectory()) {
await walk(full);
} else if (entry.isFile() && entry.name.toLowerCase().endsWith('.yaml')) {
results.push(path.relative(root, full));
}
}
}
await walk(root);
return results;
}
function isHttpMethod(method) {
return ['get', 'post', 'put', 'patch', 'delete', 'options', 'head', 'trace'].includes(method.toLowerCase());
}
function hasExample(node) {
if (!node) return false;
// request/response objects may include content -> mediaType -> schema/example/examples
const content = node.content || {};
for (const media of Object.values(content)) {
if (!media) continue;
if (media.example !== undefined) return true;
if (media.examples && Object.keys(media.examples).length > 0) return true;
if (media.schema && hasSchemaExample(media.schema)) return true;
}
// response objects may have "examples" directly (non-standard but allowed by spectral rules)
if (node.examples && Object.keys(node.examples).length > 0) return true;
return false;
}
function hasSchemaExample(schema) {
if (!schema) return false;
if (schema.example !== undefined) return true;
if (schema.examples && Array.isArray(schema.examples) && schema.examples.length > 0) return true;
// Recurse into allOf/oneOf/anyOf
const composites = ['allOf', 'oneOf', 'anyOf'];
for (const key of composites) {
if (Array.isArray(schema[key])) {
if (schema[key].some(hasSchemaExample)) return true;
}
}
// For objects, check properties
if (schema.type === 'object' && schema.properties) {
for (const value of Object.values(schema.properties)) {
if (hasSchemaExample(value)) return true;
}
}
// For arrays, check items
if (schema.type === 'array' && schema.items) {
return hasSchemaExample(schema.items);
}
return false;
}
main().catch(err => {
console.error('[api:examples] fatal error', err);
process.exit(1);
});

View File

@@ -1,63 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# DEVOPS-ATTEST-74-002: package attestation outputs into an offline bundle with checksums.
if [[ $# -lt 1 ]]; then
echo "Usage: $0 <attest-dir> [bundle-out]" >&2
exit 64
fi
ATTEST_DIR=$1
BUNDLE_OUT=${2:-"out/attest-bundles"}
if [[ ! -d "$ATTEST_DIR" ]]; then
echo "[attest-bundle] attestation directory not found: $ATTEST_DIR" >&2
exit 66
fi
mkdir -p "$BUNDLE_OUT"
TS=$(date -u +"%Y%m%dT%H%M%SZ")
BUNDLE_NAME="attestation-bundle-${TS}"
WORK_DIR="${BUNDLE_OUT}/${BUNDLE_NAME}"
mkdir -p "$WORK_DIR"
copy_if_exists() {
local pattern="$1"
shopt -s nullglob
local files=("$ATTEST_DIR"/$pattern)
if (( ${#files[@]} > 0 )); then
cp "${files[@]}" "$WORK_DIR/"
fi
shopt -u nullglob
}
# Collect common attestation artefacts
copy_if_exists "*.dsse.json"
copy_if_exists "*.in-toto.jsonl"
copy_if_exists "*.sarif"
copy_if_exists "*.intoto.json"
copy_if_exists "*.rekor.txt"
copy_if_exists "*.sig"
copy_if_exists "*.crt"
copy_if_exists "*.pem"
copy_if_exists "*.json"
# Manifest
cat > "${WORK_DIR}/manifest.json" <<EOF
{
"created_at": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")",
"source_dir": "${ATTEST_DIR}",
"files": $(ls -1 "${WORK_DIR}" | jq -R . | jq -s .)
}
EOF
# Checksums
(
cd "$WORK_DIR"
sha256sum * > SHA256SUMS
)
tar -C "$BUNDLE_OUT" -czf "${WORK_DIR}.tgz" "${BUNDLE_NAME}"
echo "[attest-bundle] bundle created at ${WORK_DIR}.tgz"

View File

@@ -1,163 +0,0 @@
#!/usr/bin/env pwsh
<#
.SYNOPSIS
Audits the codebase for direct usage of System.Security.Cryptography in production code.
.DESCRIPTION
This script scans the codebase for direct usage of System.Security.Cryptography namespace,
which should only be used within crypto provider plugin implementations, not in production code.
All cryptographic operations in production code should use the ICryptoProvider abstraction.
.PARAMETER RootPath
The root path of the StellaOps repository. Defaults to parent directory of this script.
.PARAMETER FailOnViolations
If set, the script will exit with code 1 when violations are found. Default: true.
.PARAMETER Verbose
Enable verbose output showing all scanned files.
.EXAMPLE
.\audit-crypto-usage.ps1
.EXAMPLE
.\audit-crypto-usage.ps1 -RootPath "C:\dev\git.stella-ops.org" -FailOnViolations $true
#>
param(
[Parameter(Mandatory=$false)]
[string]$RootPath = (Split-Path -Parent (Split-Path -Parent $PSScriptRoot)),
[Parameter(Mandatory=$false)]
[bool]$FailOnViolations = $true,
[Parameter(Mandatory=$false)]
[switch]$Verbose
)
Set-StrictMode -Version Latest
$ErrorActionPreference = "Stop"
# ANSI color codes for output
$Red = "`e[31m"
$Green = "`e[32m"
$Yellow = "`e[33m"
$Blue = "`e[34m"
$Reset = "`e[0m"
Write-Host "${Blue}==================================================================${Reset}"
Write-Host "${Blue}StellaOps Cryptography Usage Audit${Reset}"
Write-Host "${Blue}==================================================================${Reset}"
Write-Host ""
# Patterns to search for
$directCryptoPattern = "using System\.Security\.Cryptography"
# Allowed paths where direct crypto usage is permitted
$allowedPathPatterns = @(
"\\__Libraries\\StellaOps\.Cryptography\.Plugin\.", # All crypto plugins
"\\__Tests\\", # Test code
"\\third_party\\", # Third-party code
"\\bench\\", # Benchmark code
"\\.git\\" # Git metadata
)
# Compile regex for performance
$allowedRegex = ($allowedPathPatterns | ForEach-Object { [regex]::Escape($_) }) -join "|"
Write-Host "Scanning for direct crypto usage in production code..."
Write-Host "Root path: ${Blue}$RootPath${Reset}"
Write-Host ""
# Find all C# files
$allCsFiles = Get-ChildItem -Path $RootPath -Recurse -Filter "*.cs" -ErrorAction SilentlyContinue
$scannedCount = 0
$violations = @()
foreach ($file in $allCsFiles) {
$scannedCount++
# Check if file is in an allowed path
$relativePath = $file.FullName.Substring($RootPath.Length)
$isAllowed = $relativePath -match $allowedRegex
if ($isAllowed) {
if ($Verbose) {
Write-Host "${Green}[SKIP]${Reset} $relativePath (allowed path)"
}
continue
}
# Search for direct crypto usage
$matches = Select-String -Path $file.FullName -Pattern $directCryptoPattern -ErrorAction SilentlyContinue
if ($matches) {
foreach ($match in $matches) {
$violations += [PSCustomObject]@{
File = $relativePath
Line = $match.LineNumber
Content = $match.Line.Trim()
}
}
}
if ($Verbose) {
Write-Host "${Green}[OK]${Reset} $relativePath"
}
}
Write-Host ""
Write-Host "${Blue}==================================================================${Reset}"
Write-Host "Scan Results"
Write-Host "${Blue}==================================================================${Reset}"
Write-Host "Total C# files scanned: ${Blue}$scannedCount${Reset}"
Write-Host "Violations found: $(if ($violations.Count -gt 0) { "${Red}$($violations.Count)${Reset}" } else { "${Green}0${Reset}" })"
Write-Host ""
if ($violations.Count -gt 0) {
Write-Host "${Red}FAILED: Direct crypto usage detected in production code!${Reset}"
Write-Host ""
Write-Host "The following files use ${Yellow}System.Security.Cryptography${Reset} directly:"
Write-Host "Production code must use ${Green}ICryptoProvider${Reset} abstraction instead."
Write-Host ""
$groupedViolations = $violations | Group-Object -Property File
foreach ($group in $groupedViolations) {
Write-Host "${Red}${Reset} $($group.Name)"
foreach ($violation in $group.Group) {
Write-Host " Line $($violation.Line): $($violation.Content)"
}
Write-Host ""
}
Write-Host "${Yellow}How to fix:${Reset}"
Write-Host "1. Use ${Green}ICryptoProviderRegistry.ResolveSigner()${Reset} or ${Green}.ResolveHasher()${Reset}"
Write-Host "2. Inject ${Green}ICryptoProviderRegistry${Reset} via dependency injection"
Write-Host "3. For offline/airgap scenarios, use ${Green}OfflineVerificationCryptoProvider${Reset}"
Write-Host ""
Write-Host "Example refactoring:"
Write-Host "${Red}// BEFORE (❌ Not allowed)${Reset}"
Write-Host "using System.Security.Cryptography;"
Write-Host "var hash = SHA256.HashData(data);"
Write-Host ""
Write-Host "${Green}// AFTER (✅ Correct)${Reset}"
Write-Host "using StellaOps.Cryptography;"
Write-Host "var hasher = _cryptoRegistry.ResolveHasher(\"SHA-256\");"
Write-Host "var hash = hasher.Hasher.ComputeHash(data);"
Write-Host ""
if ($FailOnViolations) {
Write-Host "${Red}Audit failed. Exiting with code 1.${Reset}"
exit 1
} else {
Write-Host "${Yellow}Audit failed but FailOnViolations is false. Continuing...${Reset}"
}
} else {
Write-Host "${Green}✓ SUCCESS: No direct crypto usage found in production code!${Reset}"
Write-Host ""
Write-Host "All cryptographic operations correctly use the ${Green}ICryptoProvider${Reset} abstraction."
exit 0
}

View File

@@ -1,13 +0,0 @@
# Bench scripts
- `determinism-run.sh`: runs BENCH-DETERMINISM-401-057 harness (`src/Bench/StellaOps.Bench/Determinism`), writes artifacts to `out/bench-determinism`, and enforces threshold via `BENCH_DETERMINISM_THRESHOLD` (default 0.95). Defaults to 10 runs per scanner/SBOM pair. Pass `DET_EXTRA_INPUTS` (space-separated globs) to include frozen feeds in `inputs.sha256`; `DET_RUN_EXTRA_ARGS` to forward extra args to the harness; `DET_REACH_GRAPHS`/`DET_REACH_RUNTIME` to hash reachability datasets and emit `dataset.sha256` + `results-reach.*`.
- `offline_run.sh` (in `Determinism/`): air-gapped runner that reads inputs from `offline/inputs`, writes to `offline/results`, defaults runs=10 threshold=0.95, and calls reachability hashing when graph/runtime inputs exist.
Usage:
```sh
BENCH_DETERMINISM_THRESHOLD=0.97 \
DET_EXTRA_INPUTS="offline/feeds/*.tar.gz" \
DET_REACH_GRAPHS="offline/reachability/graphs/*.json" \
DET_REACH_RUNTIME="offline/reachability/runtime/*.ndjson" \
scripts/bench/determinism-run.sh
```

View File

@@ -1,353 +0,0 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: AGPL-3.0-or-later
# BENCH-AUTO-401-019: Compute FP/MTTD/repro metrics from bench findings
"""
Computes benchmark metrics from src/__Tests/__Benchmarks/findings/** and outputs to results/summary.csv.
Metrics:
- True Positives (TP): Reachable vulns correctly identified
- False Positives (FP): Unreachable vulns incorrectly marked affected
- True Negatives (TN): Unreachable vulns correctly marked not_affected
- False Negatives (FN): Reachable vulns missed
- MTTD: Mean Time To Detect (simulated)
- Reproducibility: Determinism score
Usage:
python scripts/bench/compute-metrics.py [--findings PATH] [--output PATH] [--baseline PATH]
"""
import argparse
import csv
import json
import os
import sys
from dataclasses import dataclass, field
from datetime import datetime, timezone
from pathlib import Path
from typing import Any
@dataclass
class FindingMetrics:
"""Metrics for a single finding."""
finding_id: str
cve_id: str
variant: str # reachable or unreachable
vex_status: str # affected or not_affected
is_correct: bool
detection_time_ms: float = 0.0
evidence_hash: str = ""
@dataclass
class AggregateMetrics:
"""Aggregated benchmark metrics."""
total_findings: int = 0
true_positives: int = 0 # reachable + affected
false_positives: int = 0 # unreachable + affected
true_negatives: int = 0 # unreachable + not_affected
false_negatives: int = 0 # reachable + not_affected
mttd_ms: float = 0.0
reproducibility: float = 1.0
findings: list = field(default_factory=list)
@property
def precision(self) -> float:
"""TP / (TP + FP)"""
denom = self.true_positives + self.false_positives
return self.true_positives / denom if denom > 0 else 0.0
@property
def recall(self) -> float:
"""TP / (TP + FN)"""
denom = self.true_positives + self.false_negatives
return self.true_positives / denom if denom > 0 else 0.0
@property
def f1_score(self) -> float:
"""2 * (precision * recall) / (precision + recall)"""
p, r = self.precision, self.recall
return 2 * p * r / (p + r) if (p + r) > 0 else 0.0
@property
def accuracy(self) -> float:
"""(TP + TN) / total"""
correct = self.true_positives + self.true_negatives
return correct / self.total_findings if self.total_findings > 0 else 0.0
def load_finding(finding_dir: Path) -> FindingMetrics | None:
"""Load a finding from its directory."""
metadata_path = finding_dir / "metadata.json"
openvex_path = finding_dir / "decision.openvex.json"
if not metadata_path.exists() or not openvex_path.exists():
return None
with open(metadata_path, 'r', encoding='utf-8') as f:
metadata = json.load(f)
with open(openvex_path, 'r', encoding='utf-8') as f:
openvex = json.load(f)
# Extract VEX status
statements = openvex.get("statements", [])
vex_status = statements[0].get("status", "unknown") if statements else "unknown"
# Determine correctness
variant = metadata.get("variant", "unknown")
is_correct = (
(variant == "reachable" and vex_status == "affected") or
(variant == "unreachable" and vex_status == "not_affected")
)
# Extract evidence hash from impact_statement
evidence_hash = ""
if statements:
impact = statements[0].get("impact_statement", "")
if "Evidence hash:" in impact:
evidence_hash = impact.split("Evidence hash:")[1].strip()
return FindingMetrics(
finding_id=finding_dir.name,
cve_id=metadata.get("cve_id", "UNKNOWN"),
variant=variant,
vex_status=vex_status,
is_correct=is_correct,
evidence_hash=evidence_hash
)
def compute_metrics(findings_dir: Path) -> AggregateMetrics:
"""Compute aggregate metrics from all findings."""
metrics = AggregateMetrics()
if not findings_dir.exists():
return metrics
for finding_path in sorted(findings_dir.iterdir()):
if not finding_path.is_dir():
continue
finding = load_finding(finding_path)
if finding is None:
continue
metrics.total_findings += 1
metrics.findings.append(finding)
# Classify finding
if finding.variant == "reachable":
if finding.vex_status == "affected":
metrics.true_positives += 1
else:
metrics.false_negatives += 1
else: # unreachable
if finding.vex_status == "not_affected":
metrics.true_negatives += 1
else:
metrics.false_positives += 1
# Compute MTTD (simulated - based on evidence availability)
# In real scenarios, this would be the time from CVE publication to detection
metrics.mttd_ms = sum(f.detection_time_ms for f in metrics.findings)
if metrics.total_findings > 0:
metrics.mttd_ms /= metrics.total_findings
return metrics
def load_baseline(baseline_path: Path) -> dict:
"""Load baseline scanner results for comparison."""
if not baseline_path.exists():
return {}
with open(baseline_path, 'r', encoding='utf-8') as f:
return json.load(f)
def compare_with_baseline(metrics: AggregateMetrics, baseline: dict) -> dict:
"""Compare StellaOps metrics with baseline scanner."""
comparison = {
"stellaops": {
"precision": metrics.precision,
"recall": metrics.recall,
"f1_score": metrics.f1_score,
"accuracy": metrics.accuracy,
"false_positive_rate": metrics.false_positives / metrics.total_findings if metrics.total_findings > 0 else 0
}
}
if baseline:
# Extract baseline metrics
baseline_metrics = baseline.get("metrics", {})
comparison["baseline"] = {
"precision": baseline_metrics.get("precision", 0),
"recall": baseline_metrics.get("recall", 0),
"f1_score": baseline_metrics.get("f1_score", 0),
"accuracy": baseline_metrics.get("accuracy", 0),
"false_positive_rate": baseline_metrics.get("false_positive_rate", 0)
}
# Compute deltas
comparison["delta"] = {
k: comparison["stellaops"][k] - comparison["baseline"].get(k, 0)
for k in comparison["stellaops"]
}
return comparison
def write_summary_csv(metrics: AggregateMetrics, comparison: dict, output_path: Path):
"""Write summary.csv with all metrics."""
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
# Header
writer.writerow([
"timestamp",
"total_findings",
"true_positives",
"false_positives",
"true_negatives",
"false_negatives",
"precision",
"recall",
"f1_score",
"accuracy",
"mttd_ms",
"reproducibility"
])
# Data row
writer.writerow([
datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
metrics.total_findings,
metrics.true_positives,
metrics.false_positives,
metrics.true_negatives,
metrics.false_negatives,
f"{metrics.precision:.4f}",
f"{metrics.recall:.4f}",
f"{metrics.f1_score:.4f}",
f"{metrics.accuracy:.4f}",
f"{metrics.mttd_ms:.2f}",
f"{metrics.reproducibility:.4f}"
])
def write_detailed_json(metrics: AggregateMetrics, comparison: dict, output_path: Path):
"""Write detailed JSON report."""
output_path.parent.mkdir(parents=True, exist_ok=True)
report = {
"generated_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
"summary": {
"total_findings": metrics.total_findings,
"true_positives": metrics.true_positives,
"false_positives": metrics.false_positives,
"true_negatives": metrics.true_negatives,
"false_negatives": metrics.false_negatives,
"precision": metrics.precision,
"recall": metrics.recall,
"f1_score": metrics.f1_score,
"accuracy": metrics.accuracy,
"mttd_ms": metrics.mttd_ms,
"reproducibility": metrics.reproducibility
},
"comparison": comparison,
"findings": [
{
"finding_id": f.finding_id,
"cve_id": f.cve_id,
"variant": f.variant,
"vex_status": f.vex_status,
"is_correct": f.is_correct,
"evidence_hash": f.evidence_hash
}
for f in metrics.findings
]
}
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(report, f, indent=2, sort_keys=True)
def main():
parser = argparse.ArgumentParser(
description="Compute FP/MTTD/repro metrics from bench findings"
)
parser.add_argument(
"--findings",
type=Path,
default=Path("src/__Tests/__Benchmarks/findings"),
help="Path to findings directory"
)
parser.add_argument(
"--output",
type=Path,
default=Path("src/__Tests/__Benchmarks/results"),
help="Output directory for metrics"
)
parser.add_argument(
"--baseline",
type=Path,
default=None,
help="Path to baseline scanner results JSON"
)
parser.add_argument(
"--json",
action="store_true",
help="Also output detailed JSON report"
)
args = parser.parse_args()
# Resolve paths relative to repo root
repo_root = Path(__file__).parent.parent.parent
findings_path = repo_root / args.findings if not args.findings.is_absolute() else args.findings
output_path = repo_root / args.output if not args.output.is_absolute() else args.output
print(f"Findings path: {findings_path}")
print(f"Output path: {output_path}")
# Compute metrics
metrics = compute_metrics(findings_path)
print(f"\nMetrics Summary:")
print(f" Total findings: {metrics.total_findings}")
print(f" True Positives: {metrics.true_positives}")
print(f" False Positives: {metrics.false_positives}")
print(f" True Negatives: {metrics.true_negatives}")
print(f" False Negatives: {metrics.false_negatives}")
print(f" Precision: {metrics.precision:.4f}")
print(f" Recall: {metrics.recall:.4f}")
print(f" F1 Score: {metrics.f1_score:.4f}")
print(f" Accuracy: {metrics.accuracy:.4f}")
# Load baseline if provided
baseline = {}
if args.baseline:
baseline_path = repo_root / args.baseline if not args.baseline.is_absolute() else args.baseline
baseline = load_baseline(baseline_path)
if baseline:
print(f"\nBaseline comparison loaded from: {baseline_path}")
comparison = compare_with_baseline(metrics, baseline)
# Write outputs
write_summary_csv(metrics, comparison, output_path / "summary.csv")
print(f"\nWrote summary to: {output_path / 'summary.csv'}")
if args.json:
write_detailed_json(metrics, comparison, output_path / "metrics.json")
print(f"Wrote detailed report to: {output_path / 'metrics.json'}")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,55 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# BENCH-DETERMINISM-401-057: run determinism harness and collect artifacts
ROOT="$(git rev-parse --show-toplevel)"
HARNESS="${ROOT}/src/Bench/StellaOps.Bench/Determinism"
OUT="${ROOT}/out/bench-determinism"
THRESHOLD="${BENCH_DETERMINISM_THRESHOLD:-0.95}"
mkdir -p "$OUT"
cd "$HARNESS"
python run_bench.py \
--sboms inputs/sboms/*.json \
--vex inputs/vex/*.json \
--config configs/scanners.json \
--runs 10 \
--shuffle \
--output results \
--manifest-extra "${DET_EXTRA_INPUTS:-}" \
${DET_RUN_EXTRA_ARGS:-}
cp -a results "$OUT"/
det_rate=$(python -c "import json;print(json.load(open('results/summary.json'))['determinism_rate'])")
printf "determinism_rate=%s\n" "$det_rate" > "$OUT/summary.txt"
printf "timestamp=%s\n" "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" >> "$OUT/summary.txt"
awk -v rate="$det_rate" -v th="$THRESHOLD" 'BEGIN {if (rate+0 < th+0) {printf("determinism_rate %s is below threshold %s\n", rate, th); exit 1}}'
if [ -n "${DET_REACH_GRAPHS:-}" ]; then
echo "[bench-determinism] running reachability dataset hash"
reach_graphs=${DET_REACH_GRAPHS}
reach_runtime=${DET_REACH_RUNTIME:-}
# prefix relative globs with repo root for consistency
case "$reach_graphs" in
/*) ;;
*) reach_graphs="${ROOT}/${reach_graphs}" ;;
esac
case "$reach_runtime" in
/*|"") ;;
*) reach_runtime="${ROOT}/${reach_runtime}" ;;
esac
python run_reachability.py \
--graphs ${reach_graphs} \
--runtime ${reach_runtime} \
--output results
# copy reachability outputs
cp results/results-reach.csv "$OUT"/ || true
cp results/results-reach.json "$OUT"/ || true
cp results/dataset.sha256 "$OUT"/ || true
fi
tar -C "$OUT" -czf "$OUT/bench-determinism-artifacts.tgz" .
echo "[bench-determinism] artifacts at $OUT"

View File

@@ -1,417 +0,0 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: AGPL-3.0-or-later
# BENCH-AUTO-401-019: Automate population of src/__Tests/__Benchmarks/findings/** from reachbench fixtures
"""
Populates src/__Tests/__Benchmarks/findings/** with per-CVE VEX decision bundles derived from
reachbench fixtures, including reachability evidence, SBOM excerpts, and
DSSE envelope stubs.
Usage:
python scripts/bench/populate-findings.py [--fixtures PATH] [--output PATH] [--dry-run]
"""
import argparse
import hashlib
import json
import os
import sys
from datetime import datetime, timezone
from pathlib import Path
from typing import Any
def blake3_hex(data: bytes) -> str:
"""Compute BLAKE3-256 hash (fallback to SHA-256 if blake3 not installed)."""
try:
import blake3
return blake3.blake3(data).hexdigest()
except ImportError:
return "sha256:" + hashlib.sha256(data).hexdigest()
def sha256_hex(data: bytes) -> str:
"""Compute SHA-256 hash."""
return hashlib.sha256(data).hexdigest()
def canonical_json(obj: Any) -> str:
"""Serialize object to canonical JSON (sorted keys, no extra whitespace for hashes)."""
return json.dumps(obj, sort_keys=True, separators=(',', ':'))
def canonical_json_pretty(obj: Any) -> str:
"""Serialize object to canonical JSON with indentation for readability."""
return json.dumps(obj, sort_keys=True, indent=2)
def load_reachbench_index(fixtures_path: Path) -> dict:
"""Load the reachbench INDEX.json."""
index_path = fixtures_path / "INDEX.json"
if not index_path.exists():
raise FileNotFoundError(f"Reachbench INDEX not found: {index_path}")
with open(index_path, 'r', encoding='utf-8') as f:
return json.load(f)
def load_ground_truth(case_path: Path, variant: str) -> dict | None:
"""Load ground-truth.json for a variant."""
truth_path = case_path / "images" / variant / "reachgraph.truth.json"
if not truth_path.exists():
return None
with open(truth_path, 'r', encoding='utf-8') as f:
return json.load(f)
def create_openvex_decision(
cve_id: str,
purl: str,
status: str, # "not_affected" or "affected"
justification: str | None,
evidence_hash: str,
timestamp: str
) -> dict:
"""Create an OpenVEX decision document."""
statement = {
"@context": "https://openvex.dev/ns/v0.2.0",
"@type": "VEX",
"author": "StellaOps Bench Automation",
"role": "security_team",
"timestamp": timestamp,
"version": 1,
"tooling": "StellaOps/bench-auto@1.0.0",
"statements": [
{
"vulnerability": {
"@id": f"https://nvd.nist.gov/vuln/detail/{cve_id}",
"name": cve_id,
},
"products": [
{"@id": purl}
],
"status": status,
}
]
}
if justification and status == "not_affected":
statement["statements"][0]["justification"] = justification
# Add action_statement for affected
if status == "affected":
statement["statements"][0]["action_statement"] = "Upgrade to patched version or apply mitigation."
# Add evidence reference
statement["statements"][0]["impact_statement"] = f"Evidence hash: {evidence_hash}"
return statement
def create_dsse_envelope_stub(payload: dict, payload_type: str = "application/vnd.openvex+json") -> dict:
"""Create a DSSE envelope stub (signature placeholder for actual signing)."""
payload_json = canonical_json(payload)
payload_b64 = __import__('base64').b64encode(payload_json.encode()).decode()
return {
"payloadType": payload_type,
"payload": payload_b64,
"signatures": [
{
"keyid": "stella.ops/bench-automation@v1",
"sig": "PLACEHOLDER_SIGNATURE_REQUIRES_ACTUAL_SIGNING"
}
]
}
def create_metadata(
cve_id: str,
purl: str,
variant: str,
case_id: str,
ground_truth: dict | None,
timestamp: str
) -> dict:
"""Create metadata.json for a finding."""
return {
"cve_id": cve_id,
"purl": purl,
"case_id": case_id,
"variant": variant,
"reachability_status": "reachable" if variant == "reachable" else "unreachable",
"ground_truth_schema": ground_truth.get("schema_version") if ground_truth else None,
"generated_at": timestamp,
"generator": "scripts/bench/populate-findings.py",
"generator_version": "1.0.0"
}
def extract_cve_id(case_id: str) -> str:
"""Extract CVE ID from case_id, or generate a placeholder."""
# Common patterns: log4j -> CVE-2021-44228, curl -> CVE-2023-38545, etc.
cve_mapping = {
"log4j": "CVE-2021-44228",
"curl": "CVE-2023-38545",
"kestrel": "CVE-2023-44487",
"spring": "CVE-2022-22965",
"openssl": "CVE-2022-3602",
"glibc": "CVE-2015-7547",
}
for key, cve in cve_mapping.items():
if key in case_id.lower():
return cve
# Generate placeholder CVE for unknown cases
return f"CVE-BENCH-{case_id.upper()[:8]}"
def extract_purl(case_id: str, case_data: dict) -> str:
"""Extract or generate a purl from case data."""
# Use case metadata if available
if "purl" in case_data:
return case_data["purl"]
# Generate based on case_id patterns
lang = case_data.get("language", "unknown")
version = case_data.get("version", "1.0.0")
pkg_type_map = {
"java": "maven",
"dotnet": "nuget",
"go": "golang",
"python": "pypi",
"rust": "cargo",
"native": "generic",
}
pkg_type = pkg_type_map.get(lang, "generic")
return f"pkg:{pkg_type}/{case_id}@{version}"
def populate_finding(
case_id: str,
case_data: dict,
case_path: Path,
output_dir: Path,
timestamp: str,
dry_run: bool
) -> dict:
"""Populate a single CVE finding bundle."""
cve_id = extract_cve_id(case_id)
purl = extract_purl(case_id, case_data)
results = {
"case_id": case_id,
"cve_id": cve_id,
"variants_processed": [],
"errors": []
}
for variant in ["reachable", "unreachable"]:
variant_path = case_path / "images" / variant
if not variant_path.exists():
continue
ground_truth = load_ground_truth(case_path, variant)
# Determine VEX status based on variant
if variant == "reachable":
vex_status = "affected"
justification = None
else:
vex_status = "not_affected"
justification = "vulnerable_code_not_present"
# Create finding directory
finding_id = f"{cve_id}-{variant}"
finding_dir = output_dir / finding_id
evidence_dir = finding_dir / "evidence"
if not dry_run:
finding_dir.mkdir(parents=True, exist_ok=True)
evidence_dir.mkdir(parents=True, exist_ok=True)
# Create reachability evidence excerpt
evidence = {
"schema_version": "richgraph-excerpt/v1",
"case_id": case_id,
"variant": variant,
"ground_truth": ground_truth,
"paths": ground_truth.get("paths", []) if ground_truth else [],
"generated_at": timestamp
}
evidence_json = canonical_json_pretty(evidence)
evidence_hash = blake3_hex(evidence_json.encode())
if not dry_run:
with open(evidence_dir / "reachability.json", 'w', encoding='utf-8') as f:
f.write(evidence_json)
# Create SBOM excerpt
sbom = {
"bomFormat": "CycloneDX",
"specVersion": "1.6",
"version": 1,
"metadata": {
"timestamp": timestamp,
"tools": [{"vendor": "StellaOps", "name": "bench-auto", "version": "1.0.0"}]
},
"components": [
{
"type": "library",
"purl": purl,
"name": case_id,
"version": case_data.get("version", "1.0.0")
}
]
}
if not dry_run:
with open(evidence_dir / "sbom.cdx.json", 'w', encoding='utf-8') as f:
json.dump(sbom, f, indent=2, sort_keys=True)
# Create OpenVEX decision
openvex = create_openvex_decision(
cve_id=cve_id,
purl=purl,
status=vex_status,
justification=justification,
evidence_hash=evidence_hash,
timestamp=timestamp
)
if not dry_run:
with open(finding_dir / "decision.openvex.json", 'w', encoding='utf-8') as f:
json.dump(openvex, f, indent=2, sort_keys=True)
# Create DSSE envelope stub
dsse = create_dsse_envelope_stub(openvex)
if not dry_run:
with open(finding_dir / "decision.dsse.json", 'w', encoding='utf-8') as f:
json.dump(dsse, f, indent=2, sort_keys=True)
# Create Rekor placeholder
if not dry_run:
with open(finding_dir / "rekor.txt", 'w', encoding='utf-8') as f:
f.write(f"# Rekor log entry placeholder\n")
f.write(f"# Submit DSSE envelope to Rekor to populate this file\n")
f.write(f"log_index: PENDING\n")
f.write(f"uuid: PENDING\n")
f.write(f"timestamp: {timestamp}\n")
# Create metadata
metadata = create_metadata(
cve_id=cve_id,
purl=purl,
variant=variant,
case_id=case_id,
ground_truth=ground_truth,
timestamp=timestamp
)
if not dry_run:
with open(finding_dir / "metadata.json", 'w', encoding='utf-8') as f:
json.dump(metadata, f, indent=2, sort_keys=True)
results["variants_processed"].append({
"variant": variant,
"finding_id": finding_id,
"vex_status": vex_status,
"evidence_hash": evidence_hash
})
return results
def main():
parser = argparse.ArgumentParser(
description="Populate src/__Tests/__Benchmarks/findings/** from reachbench fixtures"
)
parser.add_argument(
"--fixtures",
type=Path,
default=Path("src/__Tests/reachability/fixtures/reachbench-2025-expanded"),
help="Path to reachbench fixtures directory"
)
parser.add_argument(
"--output",
type=Path,
default=Path("src/__Tests/__Benchmarks/findings"),
help="Output directory for findings"
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Print what would be created without writing files"
)
parser.add_argument(
"--limit",
type=int,
default=0,
help="Limit number of cases to process (0 = all)"
)
args = parser.parse_args()
# Resolve paths relative to repo root
repo_root = Path(__file__).parent.parent.parent
fixtures_path = repo_root / args.fixtures if not args.fixtures.is_absolute() else args.fixtures
output_path = repo_root / args.output if not args.output.is_absolute() else args.output
print(f"Fixtures path: {fixtures_path}")
print(f"Output path: {output_path}")
print(f"Dry run: {args.dry_run}")
# Load reachbench index
try:
index = load_reachbench_index(fixtures_path)
except FileNotFoundError as e:
print(f"Error: {e}", file=sys.stderr)
return 1
timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
cases = index.get("cases", [])
if args.limit > 0:
cases = cases[:args.limit]
print(f"Processing {len(cases)} cases...")
all_results = []
for case in cases:
case_id = case["id"]
case_path_rel = case.get("path", f"cases/{case_id}")
case_path = fixtures_path / case_path_rel
if not case_path.exists():
print(f" Warning: Case path not found: {case_path}")
continue
print(f" Processing: {case_id}")
result = populate_finding(
case_id=case_id,
case_data=case,
case_path=case_path,
output_dir=output_path,
timestamp=timestamp,
dry_run=args.dry_run
)
all_results.append(result)
for v in result["variants_processed"]:
print(f" - {v['finding_id']}: {v['vex_status']}")
# Summary
total_findings = sum(len(r["variants_processed"]) for r in all_results)
print(f"\nGenerated {total_findings} findings from {len(all_results)} cases")
if args.dry_run:
print("(dry-run mode - no files written)")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,107 +0,0 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: AGPL-3.0-or-later
# BENCH-AUTO-401-019: Run baseline benchmark automation
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
log_info() { echo -e "${GREEN}[INFO]${NC} $*"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
log_error() { echo -e "${RED}[ERROR]${NC} $*"; }
usage() {
echo "Usage: $0 [--populate] [--compute] [--compare BASELINE] [--all]"
echo ""
echo "Run benchmark automation pipeline."
echo ""
echo "Options:"
echo " --populate Populate src/__Tests/__Benchmarks/findings from reachbench fixtures"
echo " --compute Compute metrics from findings"
echo " --compare BASELINE Compare with baseline scanner results"
echo " --all Run all steps (populate + compute)"
echo " --dry-run Don't write files (populate only)"
echo " --limit N Limit cases processed (populate only)"
echo " --help, -h Show this help"
exit 1
}
DO_POPULATE=false
DO_COMPUTE=false
BASELINE_PATH=""
DRY_RUN=""
LIMIT=""
while [[ $# -gt 0 ]]; do
case $1 in
--populate)
DO_POPULATE=true
shift
;;
--compute)
DO_COMPUTE=true
shift
;;
--compare)
BASELINE_PATH="$2"
shift 2
;;
--all)
DO_POPULATE=true
DO_COMPUTE=true
shift
;;
--dry-run)
DRY_RUN="--dry-run"
shift
;;
--limit)
LIMIT="--limit $2"
shift 2
;;
--help|-h)
usage
;;
*)
log_error "Unknown option: $1"
usage
;;
esac
done
if [[ "$DO_POPULATE" == false && "$DO_COMPUTE" == false && -z "$BASELINE_PATH" ]]; then
log_error "No action specified"
usage
fi
cd "$REPO_ROOT"
# Step 1: Populate findings
if [[ "$DO_POPULATE" == true ]]; then
log_info "Step 1: Populating findings from reachbench fixtures..."
python3 scripts/bench/populate-findings.py $DRY_RUN $LIMIT
echo ""
fi
# Step 2: Compute metrics
if [[ "$DO_COMPUTE" == true ]]; then
log_info "Step 2: Computing metrics..."
python3 scripts/bench/compute-metrics.py --json
echo ""
fi
# Step 3: Compare with baseline
if [[ -n "$BASELINE_PATH" ]]; then
log_info "Step 3: Comparing with baseline..."
python3 src/__Tests/__Benchmarks/tools/compare.py --baseline "$BASELINE_PATH" --json
echo ""
fi
log_info "Benchmark automation complete!"
log_info "Results available in src/__Tests/__Benchmarks/results/"

View File

@@ -1,43 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# DEVOPS-CONTAINERS-46-001: build air-gap bundle from existing buildx OCI archive
if [[ $# -lt 1 ]]; then
echo "Usage: $0 <image-tag> [bundle-dir]" >&2
exit 64
fi
IMAGE_TAG=$1
BUNDLE_DIR=${2:-"out/bundles/$(echo "$IMAGE_TAG" | tr '/:' '__')"}
SRC_DIR="out/buildx/$(echo "$IMAGE_TAG" | tr '/:' '__')"
OCI_ARCHIVE="${SRC_DIR}/image.oci"
if [[ ! -f "$OCI_ARCHIVE" ]]; then
echo "[airgap] OCI archive not found at $OCI_ARCHIVE. Run build-multiarch first." >&2
exit 66
fi
mkdir -p "$BUNDLE_DIR"
SBOM_FILE=""
if [[ -f "${SRC_DIR}/sbom.syft.json" ]]; then
SBOM_FILE="${SRC_DIR}/sbom.syft.json"
fi
cat > "${BUNDLE_DIR}/bundle-manifest.json" <<EOF
{
"image": "${IMAGE_TAG}",
"oci_archive": "image.oci",
"sbom": "$( [[ -n "$SBOM_FILE" ]] && echo sbom.syft.json || echo null )",
"created_at": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
}
EOF
cp "$OCI_ARCHIVE" "${BUNDLE_DIR}/image.oci"
[[ -n "$SBOM_FILE" ]] && cp "$SBOM_FILE" "${BUNDLE_DIR}/sbom.syft.json"
[[ -f "${SRC_DIR}/image.sha256" ]] && cp "${SRC_DIR}/image.sha256" "${BUNDLE_DIR}/image.sha256"
[[ -f "${SRC_DIR}/image.sig" ]] && cp "${SRC_DIR}/image.sig" "${BUNDLE_DIR}/image.sig"
tar -C "$BUNDLE_DIR" -czf "${BUNDLE_DIR}.tgz" .
echo "[airgap] bundle created at ${BUNDLE_DIR}.tgz"

View File

@@ -1,93 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Multi-arch buildx helper for DEVOPS-CONTAINERS-44-001
# Requirements: docker CLI with buildx, optional syft (for SBOM) and cosign (for signing).
usage() {
echo "Usage: $0 <image-tag> <context-dir> [--platform linux/amd64,linux/arm64] [--push] [--sbom syft|none] [--sign <cosign-key>]" >&2
exit 64
}
if [[ $# -lt 2 ]]; then
usage
fi
IMAGE_TAG=$1; shift
CONTEXT_DIR=$1; shift
PLATFORMS="linux/amd64,linux/arm64"
PUSH=false
SBOM_TOOL="syft"
COSIGN_KEY=""
while [[ $# -gt 0 ]]; do
case "$1" in
--platform) PLATFORMS="$2"; shift 2;;
--push) PUSH=true; shift;;
--sbom) SBOM_TOOL="$2"; shift 2;;
--sign) COSIGN_KEY="$2"; shift 2;;
*) echo "Unknown option: $1" >&2; usage;;
esac
done
if ! command -v docker >/dev/null 2>&1; then
echo "[buildx] docker CLI not found" >&2
exit 69
fi
OUT_ROOT="out/buildx/$(echo "$IMAGE_TAG" | tr '/:' '__')"
mkdir -p "$OUT_ROOT"
BUILDER_NAME="stellaops-multiarch"
if ! docker buildx inspect "$BUILDER_NAME" >/dev/null 2>&1; then
docker buildx create --name "$BUILDER_NAME" --driver docker-container --use >/dev/null
else
docker buildx use "$BUILDER_NAME" >/dev/null
fi
BUILD_OPTS=(
--platform "$PLATFORMS"
-t "$IMAGE_TAG"
--provenance=false
--sbom=false
--output "type=oci,dest=${OUT_ROOT}/image.oci"
)
if $PUSH; then
BUILD_OPTS+=("--push")
fi
echo "[buildx] building $IMAGE_TAG for $PLATFORMS"
docker buildx build "${BUILD_OPTS[@]}" "$CONTEXT_DIR"
echo "[buildx] computing digest"
IMAGE_DIGEST=$(sha256sum "${OUT_ROOT}/image.oci" | awk '{print $1}')
echo "$IMAGE_DIGEST image.oci" > "${OUT_ROOT}/image.sha256"
if [[ "$SBOM_TOOL" == "syft" ]] && command -v syft >/dev/null 2>&1; then
echo "[buildx] generating SBOM via syft"
syft "oci-archive:${OUT_ROOT}/image.oci" -o json > "${OUT_ROOT}/sbom.syft.json"
else
echo "[buildx] skipping SBOM (tool=$SBOM_TOOL, syft available? $(command -v syft >/dev/null && echo yes || echo no))"
fi
if [[ -n "$COSIGN_KEY" ]] && command -v cosign >/dev/null 2>&1; then
echo "[buildx] signing digest with cosign key"
COSIGN_EXPERIMENTAL=1 cosign sign-blob --key "$COSIGN_KEY" --output-signature "${OUT_ROOT}/image.sig" --output-certificate "${OUT_ROOT}/image.cert" "${OUT_ROOT}/image.oci"
else
echo "[buildx] signature skipped (no key provided or cosign missing)"
fi
cat > "${OUT_ROOT}/build-metadata.json" <<EOF
{
"image": "${IMAGE_TAG}",
"platforms": "${PLATFORMS}",
"pushed": ${PUSH},
"digest_sha256": "${IMAGE_DIGEST}",
"generated_at": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")",
"sbom": "$( [[ -f ${OUT_ROOT}/sbom.syft.json ]] && echo sbom.syft.json || echo null )"
}
EOF
echo "[buildx] artifacts written to ${OUT_ROOT}"

View File

@@ -1,287 +0,0 @@
#!/usr/bin/env bash
# =============================================================================
# compute-reachability-metrics.sh
# Computes reachability metrics against ground-truth corpus
#
# Usage: ./compute-reachability-metrics.sh [options]
# --corpus-path PATH Path to ground-truth corpus (default: src/__Tests/reachability/corpus)
# --output FILE Output JSON file (default: stdout)
# --dry-run Show what would be computed without running scanner
# --strict Exit non-zero if any threshold is violated
# --verbose Enable verbose output
#
# Output: JSON with recall, precision, accuracy metrics per vulnerability class
# =============================================================================
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
# Default paths
CORPUS_PATH="${REPO_ROOT}/src/__Tests/reachability/corpus"
OUTPUT_FILE=""
DRY_RUN=false
STRICT=false
VERBOSE=false
# Parse arguments
while [[ $# -gt 0 ]]; do
case "$1" in
--corpus-path)
CORPUS_PATH="$2"
shift 2
;;
--output)
OUTPUT_FILE="$2"
shift 2
;;
--dry-run)
DRY_RUN=true
shift
;;
--strict)
STRICT=true
shift
;;
--verbose)
VERBOSE=true
shift
;;
-h|--help)
head -20 "$0" | tail -15
exit 0
;;
*)
echo "Unknown option: $1" >&2
exit 1
;;
esac
done
log() {
if [[ "${VERBOSE}" == "true" ]]; then
echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] $*" >&2
fi
}
error() {
echo "[ERROR] $*" >&2
}
# Validate corpus exists
if [[ ! -d "${CORPUS_PATH}" ]]; then
error "Corpus directory not found: ${CORPUS_PATH}"
exit 1
fi
MANIFEST_FILE="${CORPUS_PATH}/manifest.json"
if [[ ! -f "${MANIFEST_FILE}" ]]; then
error "Corpus manifest not found: ${MANIFEST_FILE}"
exit 1
fi
log "Loading corpus from ${CORPUS_PATH}"
log "Manifest: ${MANIFEST_FILE}"
# Initialize counters for each vulnerability class
declare -A true_positives
declare -A false_positives
declare -A false_negatives
declare -A total_expected
CLASSES=("runtime_dep" "os_pkg" "code" "config")
for class in "${CLASSES[@]}"; do
true_positives[$class]=0
false_positives[$class]=0
false_negatives[$class]=0
total_expected[$class]=0
done
if [[ "${DRY_RUN}" == "true" ]]; then
log "[DRY RUN] Would process corpus fixtures..."
# Generate mock metrics for dry-run
cat <<EOF
{
"timestamp": "$(date -u '+%Y-%m-%dT%H:%M:%SZ')",
"corpus_path": "${CORPUS_PATH}",
"dry_run": true,
"metrics": {
"runtime_dep": {
"recall": 0.96,
"precision": 0.94,
"f1_score": 0.95,
"total_expected": 100,
"true_positives": 96,
"false_positives": 6,
"false_negatives": 4
},
"os_pkg": {
"recall": 0.98,
"precision": 0.97,
"f1_score": 0.975,
"total_expected": 50,
"true_positives": 49,
"false_positives": 2,
"false_negatives": 1
},
"code": {
"recall": 0.92,
"precision": 0.90,
"f1_score": 0.91,
"total_expected": 25,
"true_positives": 23,
"false_positives": 3,
"false_negatives": 2
},
"config": {
"recall": 0.88,
"precision": 0.85,
"f1_score": 0.865,
"total_expected": 20,
"true_positives": 18,
"false_positives": 3,
"false_negatives": 2
}
},
"aggregate": {
"overall_recall": 0.9538,
"overall_precision": 0.9302,
"reachability_accuracy": 0.9268
}
}
EOF
exit 0
fi
# Process each fixture in the corpus
log "Processing corpus fixtures..."
# Read manifest and iterate fixtures
FIXTURE_COUNT=$(jq -r '.fixtures | length' "${MANIFEST_FILE}")
log "Found ${FIXTURE_COUNT} fixtures"
for i in $(seq 0 $((FIXTURE_COUNT - 1))); do
FIXTURE_ID=$(jq -r ".fixtures[$i].id" "${MANIFEST_FILE}")
FIXTURE_PATH="${CORPUS_PATH}/$(jq -r ".fixtures[$i].path" "${MANIFEST_FILE}")"
FIXTURE_CLASS=$(jq -r ".fixtures[$i].class" "${MANIFEST_FILE}")
EXPECTED_REACHABLE=$(jq -r ".fixtures[$i].expected_reachable // 0" "${MANIFEST_FILE}")
EXPECTED_UNREACHABLE=$(jq -r ".fixtures[$i].expected_unreachable // 0" "${MANIFEST_FILE}")
log "Processing fixture: ${FIXTURE_ID} (class: ${FIXTURE_CLASS})"
if [[ ! -d "${FIXTURE_PATH}" ]] && [[ ! -f "${FIXTURE_PATH}" ]]; then
error "Fixture not found: ${FIXTURE_PATH}"
continue
fi
# Update expected counts
total_expected[$FIXTURE_CLASS]=$((${total_expected[$FIXTURE_CLASS]} + EXPECTED_REACHABLE))
# Run scanner on fixture (deterministic mode, offline)
SCAN_RESULT_FILE=$(mktemp)
trap "rm -f ${SCAN_RESULT_FILE}" EXIT
if dotnet run --project "${REPO_ROOT}/src/Scanner/StellaOps.Scanner.Cli" -- \
scan --input "${FIXTURE_PATH}" \
--output "${SCAN_RESULT_FILE}" \
--deterministic \
--offline \
--format json \
2>/dev/null; then
# Parse scanner results
DETECTED_REACHABLE=$(jq -r '[.findings[] | select(.reachable == true)] | length' "${SCAN_RESULT_FILE}" 2>/dev/null || echo "0")
DETECTED_UNREACHABLE=$(jq -r '[.findings[] | select(.reachable == false)] | length' "${SCAN_RESULT_FILE}" 2>/dev/null || echo "0")
# Calculate TP, FP, FN for this fixture
TP=$((DETECTED_REACHABLE < EXPECTED_REACHABLE ? DETECTED_REACHABLE : EXPECTED_REACHABLE))
FP=$((DETECTED_REACHABLE > EXPECTED_REACHABLE ? DETECTED_REACHABLE - EXPECTED_REACHABLE : 0))
FN=$((EXPECTED_REACHABLE - TP))
true_positives[$FIXTURE_CLASS]=$((${true_positives[$FIXTURE_CLASS]} + TP))
false_positives[$FIXTURE_CLASS]=$((${false_positives[$FIXTURE_CLASS]} + FP))
false_negatives[$FIXTURE_CLASS]=$((${false_negatives[$FIXTURE_CLASS]} + FN))
else
error "Scanner failed for fixture: ${FIXTURE_ID}"
false_negatives[$FIXTURE_CLASS]=$((${false_negatives[$FIXTURE_CLASS]} + EXPECTED_REACHABLE))
fi
done
# Calculate metrics per class
calculate_metrics() {
local class=$1
local tp=${true_positives[$class]}
local fp=${false_positives[$class]}
local fn=${false_negatives[$class]}
local total=${total_expected[$class]}
local recall=0
local precision=0
local f1=0
if [[ $((tp + fn)) -gt 0 ]]; then
recall=$(echo "scale=4; $tp / ($tp + $fn)" | bc)
fi
if [[ $((tp + fp)) -gt 0 ]]; then
precision=$(echo "scale=4; $tp / ($tp + $fp)" | bc)
fi
if (( $(echo "$recall + $precision > 0" | bc -l) )); then
f1=$(echo "scale=4; 2 * $recall * $precision / ($recall + $precision)" | bc)
fi
echo "{\"recall\": $recall, \"precision\": $precision, \"f1_score\": $f1, \"total_expected\": $total, \"true_positives\": $tp, \"false_positives\": $fp, \"false_negatives\": $fn}"
}
# Generate output JSON
OUTPUT=$(cat <<EOF
{
"timestamp": "$(date -u '+%Y-%m-%dT%H:%M:%SZ')",
"corpus_path": "${CORPUS_PATH}",
"dry_run": false,
"metrics": {
"runtime_dep": $(calculate_metrics "runtime_dep"),
"os_pkg": $(calculate_metrics "os_pkg"),
"code": $(calculate_metrics "code"),
"config": $(calculate_metrics "config")
},
"aggregate": {
"overall_recall": $(echo "scale=4; (${true_positives[runtime_dep]} + ${true_positives[os_pkg]} + ${true_positives[code]} + ${true_positives[config]}) / (${total_expected[runtime_dep]} + ${total_expected[os_pkg]} + ${total_expected[code]} + ${total_expected[config]} + 0.0001)" | bc),
"overall_precision": $(echo "scale=4; (${true_positives[runtime_dep]} + ${true_positives[os_pkg]} + ${true_positives[code]} + ${true_positives[config]}) / (${true_positives[runtime_dep]} + ${true_positives[os_pkg]} + ${true_positives[code]} + ${true_positives[config]} + ${false_positives[runtime_dep]} + ${false_positives[os_pkg]} + ${false_positives[code]} + ${false_positives[config]} + 0.0001)" | bc)
}
}
EOF
)
# Output results
if [[ -n "${OUTPUT_FILE}" ]]; then
echo "${OUTPUT}" > "${OUTPUT_FILE}"
log "Results written to ${OUTPUT_FILE}"
else
echo "${OUTPUT}"
fi
# Check thresholds in strict mode
if [[ "${STRICT}" == "true" ]]; then
THRESHOLDS_FILE="${SCRIPT_DIR}/reachability-thresholds.yaml"
if [[ -f "${THRESHOLDS_FILE}" ]]; then
log "Checking thresholds from ${THRESHOLDS_FILE}"
# Extract thresholds and check
MIN_RECALL=$(yq -r '.thresholds.runtime_dependency_recall.min // 0.95' "${THRESHOLDS_FILE}")
ACTUAL_RECALL=$(echo "${OUTPUT}" | jq -r '.metrics.runtime_dep.recall')
if (( $(echo "$ACTUAL_RECALL < $MIN_RECALL" | bc -l) )); then
error "Runtime dependency recall ${ACTUAL_RECALL} below threshold ${MIN_RECALL}"
exit 1
fi
log "All thresholds passed"
fi
fi
exit 0

View File

@@ -1,313 +0,0 @@
#!/usr/bin/env bash
# =============================================================================
# compute-ttfs-metrics.sh
# Computes Time-to-First-Signal (TTFS) metrics from test runs
#
# Usage: ./compute-ttfs-metrics.sh [options]
# --results-path PATH Path to test results directory
# --output FILE Output JSON file (default: stdout)
# --baseline FILE Baseline TTFS file for comparison
# --dry-run Show what would be computed
# --strict Exit non-zero if thresholds are violated
# --verbose Enable verbose output
#
# Output: JSON with TTFS p50, p95, p99 metrics and regression status
# =============================================================================
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
# Default paths
RESULTS_PATH="${REPO_ROOT}/src/__Tests/__Benchmarks/results"
OUTPUT_FILE=""
BASELINE_FILE="${REPO_ROOT}/src/__Tests/__Benchmarks/baselines/ttfs-baseline.json"
DRY_RUN=false
STRICT=false
VERBOSE=false
# Parse arguments
while [[ $# -gt 0 ]]; do
case "$1" in
--results-path)
RESULTS_PATH="$2"
shift 2
;;
--output)
OUTPUT_FILE="$2"
shift 2
;;
--baseline)
BASELINE_FILE="$2"
shift 2
;;
--dry-run)
DRY_RUN=true
shift
;;
--strict)
STRICT=true
shift
;;
--verbose)
VERBOSE=true
shift
;;
-h|--help)
head -20 "$0" | tail -15
exit 0
;;
*)
echo "Unknown option: $1" >&2
exit 1
;;
esac
done
log() {
if [[ "${VERBOSE}" == "true" ]]; then
echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] $*" >&2
fi
}
error() {
echo "[ERROR] $*" >&2
}
warn() {
echo "[WARN] $*" >&2
}
# Calculate percentiles from sorted array
percentile() {
local -n arr=$1
local p=$2
local n=${#arr[@]}
if [[ $n -eq 0 ]]; then
echo "0"
return
fi
local idx=$(echo "scale=0; ($n - 1) * $p / 100" | bc)
echo "${arr[$idx]}"
}
if [[ "${DRY_RUN}" == "true" ]]; then
log "[DRY RUN] Would process TTFS metrics..."
cat <<EOF
{
"timestamp": "$(date -u '+%Y-%m-%dT%H:%M:%SZ')",
"dry_run": true,
"results_path": "${RESULTS_PATH}",
"metrics": {
"ttfs_ms": {
"p50": 1250,
"p95": 3500,
"p99": 5200,
"min": 450,
"max": 8500,
"mean": 1850,
"sample_count": 100
},
"by_scan_type": {
"image_scan": {
"p50": 2100,
"p95": 4500,
"p99": 6800
},
"filesystem_scan": {
"p50": 850,
"p95": 1800,
"p99": 2500
},
"sbom_scan": {
"p50": 320,
"p95": 650,
"p99": 950
}
}
},
"baseline_comparison": {
"baseline_path": "${BASELINE_FILE}",
"p50_regression_pct": -2.5,
"p95_regression_pct": 1.2,
"regression_detected": false
}
}
EOF
exit 0
fi
# Validate results directory
if [[ ! -d "${RESULTS_PATH}" ]]; then
error "Results directory not found: ${RESULTS_PATH}"
exit 1
fi
log "Processing TTFS results from ${RESULTS_PATH}"
# Collect all TTFS values from result files
declare -a ttfs_values=()
declare -a image_ttfs=()
declare -a fs_ttfs=()
declare -a sbom_ttfs=()
# Find and process all result files
for result_file in "${RESULTS_PATH}"/*.json "${RESULTS_PATH}"/**/*.json; do
[[ -f "${result_file}" ]] || continue
log "Processing: ${result_file}"
# Extract TTFS value if present
TTFS=$(jq -r '.ttfs_ms // .time_to_first_signal_ms // empty' "${result_file}" 2>/dev/null || true)
SCAN_TYPE=$(jq -r '.scan_type // "unknown"' "${result_file}" 2>/dev/null || echo "unknown")
if [[ -n "${TTFS}" ]] && [[ "${TTFS}" != "null" ]]; then
ttfs_values+=("${TTFS}")
case "${SCAN_TYPE}" in
image|image_scan|container)
image_ttfs+=("${TTFS}")
;;
filesystem|fs|fs_scan)
fs_ttfs+=("${TTFS}")
;;
sbom|sbom_scan)
sbom_ttfs+=("${TTFS}")
;;
esac
fi
done
# Sort arrays for percentile calculation
IFS=$'\n' ttfs_sorted=($(sort -n <<<"${ttfs_values[*]}")); unset IFS
IFS=$'\n' image_sorted=($(sort -n <<<"${image_ttfs[*]}")); unset IFS
IFS=$'\n' fs_sorted=($(sort -n <<<"${fs_ttfs[*]}")); unset IFS
IFS=$'\n' sbom_sorted=($(sort -n <<<"${sbom_ttfs[*]}")); unset IFS
# Calculate overall metrics
SAMPLE_COUNT=${#ttfs_values[@]}
if [[ $SAMPLE_COUNT -eq 0 ]]; then
warn "No TTFS samples found"
P50=0
P95=0
P99=0
MIN=0
MAX=0
MEAN=0
else
P50=$(percentile ttfs_sorted 50)
P95=$(percentile ttfs_sorted 95)
P99=$(percentile ttfs_sorted 99)
MIN=${ttfs_sorted[0]}
MAX=${ttfs_sorted[-1]}
# Calculate mean
SUM=0
for v in "${ttfs_values[@]}"; do
SUM=$((SUM + v))
done
MEAN=$((SUM / SAMPLE_COUNT))
fi
# Calculate per-type metrics
IMAGE_P50=$(percentile image_sorted 50)
IMAGE_P95=$(percentile image_sorted 95)
IMAGE_P99=$(percentile image_sorted 99)
FS_P50=$(percentile fs_sorted 50)
FS_P95=$(percentile fs_sorted 95)
FS_P99=$(percentile fs_sorted 99)
SBOM_P50=$(percentile sbom_sorted 50)
SBOM_P95=$(percentile sbom_sorted 95)
SBOM_P99=$(percentile sbom_sorted 99)
# Compare against baseline if available
REGRESSION_DETECTED=false
P50_REGRESSION_PCT=0
P95_REGRESSION_PCT=0
if [[ -f "${BASELINE_FILE}" ]]; then
log "Comparing against baseline: ${BASELINE_FILE}"
BASELINE_P50=$(jq -r '.metrics.ttfs_ms.p50 // 0' "${BASELINE_FILE}")
BASELINE_P95=$(jq -r '.metrics.ttfs_ms.p95 // 0' "${BASELINE_FILE}")
if [[ $BASELINE_P50 -gt 0 ]]; then
P50_REGRESSION_PCT=$(echo "scale=2; (${P50} - ${BASELINE_P50}) * 100 / ${BASELINE_P50}" | bc)
fi
if [[ $BASELINE_P95 -gt 0 ]]; then
P95_REGRESSION_PCT=$(echo "scale=2; (${P95} - ${BASELINE_P95}) * 100 / ${BASELINE_P95}" | bc)
fi
# Check for regression (>10% increase)
if (( $(echo "${P50_REGRESSION_PCT} > 10" | bc -l) )) || (( $(echo "${P95_REGRESSION_PCT} > 10" | bc -l) )); then
REGRESSION_DETECTED=true
warn "TTFS regression detected: p50=${P50_REGRESSION_PCT}%, p95=${P95_REGRESSION_PCT}%"
fi
fi
# Generate output
OUTPUT=$(cat <<EOF
{
"timestamp": "$(date -u '+%Y-%m-%dT%H:%M:%SZ')",
"dry_run": false,
"results_path": "${RESULTS_PATH}",
"metrics": {
"ttfs_ms": {
"p50": ${P50},
"p95": ${P95},
"p99": ${P99},
"min": ${MIN},
"max": ${MAX},
"mean": ${MEAN},
"sample_count": ${SAMPLE_COUNT}
},
"by_scan_type": {
"image_scan": {
"p50": ${IMAGE_P50:-0},
"p95": ${IMAGE_P95:-0},
"p99": ${IMAGE_P99:-0}
},
"filesystem_scan": {
"p50": ${FS_P50:-0},
"p95": ${FS_P95:-0},
"p99": ${FS_P99:-0}
},
"sbom_scan": {
"p50": ${SBOM_P50:-0},
"p95": ${SBOM_P95:-0},
"p99": ${SBOM_P99:-0}
}
}
},
"baseline_comparison": {
"baseline_path": "${BASELINE_FILE}",
"p50_regression_pct": ${P50_REGRESSION_PCT},
"p95_regression_pct": ${P95_REGRESSION_PCT},
"regression_detected": ${REGRESSION_DETECTED}
}
}
EOF
)
# Output results
if [[ -n "${OUTPUT_FILE}" ]]; then
echo "${OUTPUT}" > "${OUTPUT_FILE}"
log "Results written to ${OUTPUT_FILE}"
else
echo "${OUTPUT}"
fi
# Strict mode: fail on regression
if [[ "${STRICT}" == "true" ]] && [[ "${REGRESSION_DETECTED}" == "true" ]]; then
error "TTFS regression exceeds threshold"
exit 1
fi
exit 0

View File

@@ -1,326 +0,0 @@
#!/usr/bin/env bash
# =============================================================================
# enforce-performance-slos.sh
# Enforces scan time and compute budget SLOs in CI
#
# Usage: ./enforce-performance-slos.sh [options]
# --results-path PATH Path to benchmark results directory
# --slos-file FILE Path to SLO definitions (default: scripts/ci/performance-slos.yaml)
# --output FILE Output JSON file (default: stdout)
# --dry-run Show what would be enforced
# --strict Exit non-zero if any SLO is violated
# --verbose Enable verbose output
#
# Output: JSON with SLO evaluation results and violations
# =============================================================================
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
# Default paths
RESULTS_PATH="${REPO_ROOT}/src/__Tests/__Benchmarks/results"
SLOS_FILE="${SCRIPT_DIR}/performance-slos.yaml"
OUTPUT_FILE=""
DRY_RUN=false
STRICT=false
VERBOSE=false
# Parse arguments
while [[ $# -gt 0 ]]; do
case "$1" in
--results-path)
RESULTS_PATH="$2"
shift 2
;;
--slos-file)
SLOS_FILE="$2"
shift 2
;;
--output)
OUTPUT_FILE="$2"
shift 2
;;
--dry-run)
DRY_RUN=true
shift
;;
--strict)
STRICT=true
shift
;;
--verbose)
VERBOSE=true
shift
;;
-h|--help)
head -20 "$0" | tail -15
exit 0
;;
*)
echo "Unknown option: $1" >&2
exit 1
;;
esac
done
log() {
if [[ "${VERBOSE}" == "true" ]]; then
echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] $*" >&2
fi
}
error() {
echo "[ERROR] $*" >&2
}
warn() {
echo "[WARN] $*" >&2
}
if [[ "${DRY_RUN}" == "true" ]]; then
log "[DRY RUN] Would enforce performance SLOs..."
cat <<EOF
{
"timestamp": "$(date -u '+%Y-%m-%dT%H:%M:%SZ')",
"dry_run": true,
"results_path": "${RESULTS_PATH}",
"slos_file": "${SLOS_FILE}",
"slo_evaluations": {
"scan_time_p95": {
"slo_name": "Scan Time P95",
"threshold_ms": 30000,
"actual_ms": 25000,
"passed": true,
"margin_pct": 16.7
},
"memory_peak_mb": {
"slo_name": "Peak Memory Usage",
"threshold_mb": 2048,
"actual_mb": 1650,
"passed": true,
"margin_pct": 19.4
},
"cpu_time_seconds": {
"slo_name": "CPU Time",
"threshold_seconds": 60,
"actual_seconds": 45,
"passed": true,
"margin_pct": 25.0
}
},
"summary": {
"total_slos": 3,
"passed": 3,
"failed": 0,
"all_passed": true
}
}
EOF
exit 0
fi
# Validate paths
if [[ ! -d "${RESULTS_PATH}" ]]; then
error "Results directory not found: ${RESULTS_PATH}"
exit 1
fi
if [[ ! -f "${SLOS_FILE}" ]]; then
warn "SLOs file not found: ${SLOS_FILE}, using defaults"
fi
log "Enforcing SLOs from ${SLOS_FILE}"
log "Results path: ${RESULTS_PATH}"
# Initialize evaluation results
declare -A slo_results
VIOLATIONS=()
TOTAL_SLOS=0
PASSED_SLOS=0
# Define default SLOs
declare -A SLOS
SLOS["scan_time_p95_ms"]=30000
SLOS["scan_time_p99_ms"]=60000
SLOS["memory_peak_mb"]=2048
SLOS["cpu_time_seconds"]=120
SLOS["sbom_gen_time_ms"]=10000
SLOS["policy_eval_time_ms"]=5000
# Load SLOs from file if exists
if [[ -f "${SLOS_FILE}" ]]; then
while IFS=: read -r key value; do
key=$(echo "$key" | tr -d ' ')
value=$(echo "$value" | tr -d ' ')
if [[ -n "$key" ]] && [[ -n "$value" ]] && [[ "$key" != "#"* ]]; then
SLOS["$key"]=$value
log "Loaded SLO: ${key}=${value}"
fi
done < <(yq -r 'to_entries | .[] | "\(.key):\(.value.threshold // .value)"' "${SLOS_FILE}" 2>/dev/null || true)
fi
# Collect metrics from results
SCAN_TIMES=()
MEMORY_VALUES=()
CPU_TIMES=()
SBOM_TIMES=()
POLICY_TIMES=()
for result_file in "${RESULTS_PATH}"/*.json "${RESULTS_PATH}"/**/*.json; do
[[ -f "${result_file}" ]] || continue
log "Processing: ${result_file}"
# Extract metrics
SCAN_TIME=$(jq -r '.duration_ms // .scan_time_ms // empty' "${result_file}" 2>/dev/null || true)
MEMORY=$(jq -r '.peak_memory_mb // .memory_mb // empty' "${result_file}" 2>/dev/null || true)
CPU_TIME=$(jq -r '.cpu_time_seconds // .cpu_seconds // empty' "${result_file}" 2>/dev/null || true)
SBOM_TIME=$(jq -r '.sbom_generation_ms // empty' "${result_file}" 2>/dev/null || true)
POLICY_TIME=$(jq -r '.policy_evaluation_ms // empty' "${result_file}" 2>/dev/null || true)
[[ -n "${SCAN_TIME}" ]] && SCAN_TIMES+=("${SCAN_TIME}")
[[ -n "${MEMORY}" ]] && MEMORY_VALUES+=("${MEMORY}")
[[ -n "${CPU_TIME}" ]] && CPU_TIMES+=("${CPU_TIME}")
[[ -n "${SBOM_TIME}" ]] && SBOM_TIMES+=("${SBOM_TIME}")
[[ -n "${POLICY_TIME}" ]] && POLICY_TIMES+=("${POLICY_TIME}")
done
# Helper: calculate percentile from array
calc_percentile() {
local -n values=$1
local pct=$2
if [[ ${#values[@]} -eq 0 ]]; then
echo "0"
return
fi
IFS=$'\n' sorted=($(sort -n <<<"${values[*]}")); unset IFS
local n=${#sorted[@]}
local idx=$(echo "scale=0; ($n - 1) * $pct / 100" | bc)
echo "${sorted[$idx]}"
}
# Helper: calculate max from array
calc_max() {
local -n values=$1
if [[ ${#values[@]} -eq 0 ]]; then
echo "0"
return
fi
local max=0
for v in "${values[@]}"; do
if (( $(echo "$v > $max" | bc -l) )); then
max=$v
fi
done
echo "$max"
}
# Evaluate each SLO
evaluate_slo() {
local name=$1
local threshold=$2
local actual=$3
local unit=$4
((TOTAL_SLOS++))
local passed=true
local margin_pct=0
if (( $(echo "$actual > $threshold" | bc -l) )); then
passed=false
margin_pct=$(echo "scale=2; ($actual - $threshold) * 100 / $threshold" | bc)
VIOLATIONS+=("${name}: ${actual}${unit} exceeds threshold ${threshold}${unit} (+${margin_pct}%)")
warn "SLO VIOLATION: ${name} = ${actual}${unit} (threshold: ${threshold}${unit})"
else
((PASSED_SLOS++))
margin_pct=$(echo "scale=2; ($threshold - $actual) * 100 / $threshold" | bc)
log "SLO PASSED: ${name} = ${actual}${unit} (threshold: ${threshold}${unit}, margin: ${margin_pct}%)"
fi
echo "{\"slo_name\": \"${name}\", \"threshold\": ${threshold}, \"actual\": ${actual}, \"unit\": \"${unit}\", \"passed\": ${passed}, \"margin_pct\": ${margin_pct}}"
}
# Calculate actuals
SCAN_P95=$(calc_percentile SCAN_TIMES 95)
SCAN_P99=$(calc_percentile SCAN_TIMES 99)
MEMORY_MAX=$(calc_max MEMORY_VALUES)
CPU_MAX=$(calc_max CPU_TIMES)
SBOM_P95=$(calc_percentile SBOM_TIMES 95)
POLICY_P95=$(calc_percentile POLICY_TIMES 95)
# Run evaluations
SLO_SCAN_P95=$(evaluate_slo "Scan Time P95" "${SLOS[scan_time_p95_ms]}" "${SCAN_P95}" "ms")
SLO_SCAN_P99=$(evaluate_slo "Scan Time P99" "${SLOS[scan_time_p99_ms]}" "${SCAN_P99}" "ms")
SLO_MEMORY=$(evaluate_slo "Peak Memory" "${SLOS[memory_peak_mb]}" "${MEMORY_MAX}" "MB")
SLO_CPU=$(evaluate_slo "CPU Time" "${SLOS[cpu_time_seconds]}" "${CPU_MAX}" "s")
SLO_SBOM=$(evaluate_slo "SBOM Generation P95" "${SLOS[sbom_gen_time_ms]}" "${SBOM_P95}" "ms")
SLO_POLICY=$(evaluate_slo "Policy Evaluation P95" "${SLOS[policy_eval_time_ms]}" "${POLICY_P95}" "ms")
# Generate output
ALL_PASSED=true
if [[ ${#VIOLATIONS[@]} -gt 0 ]]; then
ALL_PASSED=false
fi
# Build violations JSON array
VIOLATIONS_JSON="[]"
if [[ ${#VIOLATIONS[@]} -gt 0 ]]; then
VIOLATIONS_JSON="["
for i in "${!VIOLATIONS[@]}"; do
[[ $i -gt 0 ]] && VIOLATIONS_JSON+=","
VIOLATIONS_JSON+="\"${VIOLATIONS[$i]}\""
done
VIOLATIONS_JSON+="]"
fi
OUTPUT=$(cat <<EOF
{
"timestamp": "$(date -u '+%Y-%m-%dT%H:%M:%SZ')",
"dry_run": false,
"results_path": "${RESULTS_PATH}",
"slos_file": "${SLOS_FILE}",
"slo_evaluations": {
"scan_time_p95": ${SLO_SCAN_P95},
"scan_time_p99": ${SLO_SCAN_P99},
"memory_peak_mb": ${SLO_MEMORY},
"cpu_time_seconds": ${SLO_CPU},
"sbom_gen_time_ms": ${SLO_SBOM},
"policy_eval_time_ms": ${SLO_POLICY}
},
"summary": {
"total_slos": ${TOTAL_SLOS},
"passed": ${PASSED_SLOS},
"failed": $((TOTAL_SLOS - PASSED_SLOS)),
"all_passed": ${ALL_PASSED},
"violations": ${VIOLATIONS_JSON}
}
}
EOF
)
# Output results
if [[ -n "${OUTPUT_FILE}" ]]; then
echo "${OUTPUT}" > "${OUTPUT_FILE}"
log "Results written to ${OUTPUT_FILE}"
else
echo "${OUTPUT}"
fi
# Strict mode: fail on violations
if [[ "${STRICT}" == "true" ]] && [[ "${ALL_PASSED}" == "false" ]]; then
error "Performance SLO violations detected"
for v in "${VIOLATIONS[@]}"; do
error " - ${v}"
done
exit 1
fi
exit 0

View File

@@ -1,94 +0,0 @@
# =============================================================================
# Performance SLOs (Service Level Objectives)
# Reference: Testing and Quality Guardrails Technical Reference
#
# These SLOs define the performance budgets for CI quality gates.
# Violations will be flagged and may block releases.
# =============================================================================
# Scan Time SLOs (milliseconds)
scan_time:
p50:
threshold: 15000
description: "50th percentile scan time"
severity: "info"
p95:
threshold: 30000
description: "95th percentile scan time - primary SLO"
severity: "warning"
p99:
threshold: 60000
description: "99th percentile scan time - tail latency"
severity: "critical"
# Memory Usage SLOs (megabytes)
memory:
peak_mb:
threshold: 2048
description: "Peak memory usage during scan"
severity: "warning"
average_mb:
threshold: 1024
description: "Average memory usage"
severity: "info"
# CPU Time SLOs (seconds)
cpu:
max_seconds:
threshold: 120
description: "Maximum CPU time per scan"
severity: "warning"
average_seconds:
threshold: 60
description: "Average CPU time per scan"
severity: "info"
# Component-Specific SLOs (milliseconds)
components:
sbom_generation:
p95:
threshold: 10000
description: "SBOM generation time P95"
severity: "warning"
policy_evaluation:
p95:
threshold: 5000
description: "Policy evaluation time P95"
severity: "warning"
reachability_analysis:
p95:
threshold: 20000
description: "Reachability analysis time P95"
severity: "warning"
vulnerability_matching:
p95:
threshold: 8000
description: "Vulnerability matching time P95"
severity: "warning"
# Resource Budget SLOs
resource_budgets:
disk_io_mb:
threshold: 500
description: "Maximum disk I/O per scan"
network_calls:
threshold: 0
description: "Network calls (should be zero for offline scans)"
temp_storage_mb:
threshold: 1024
description: "Maximum temporary storage usage"
# Regression Thresholds
regression:
max_degradation_pct: 10
warning_threshold_pct: 5
baseline_window_days: 30
# Override Configuration
overrides:
allowed_labels:
- "performance-override"
- "large-scan"
required_approvers:
- "platform"
- "performance"

View File

@@ -1,102 +0,0 @@
# =============================================================================
# Reachability Quality Gate Thresholds
# Reference: Testing and Quality Guardrails Technical Reference
#
# These thresholds are enforced by CI quality gates. Violations will block PRs
# unless an override is explicitly approved.
# =============================================================================
thresholds:
# Runtime dependency recall: percentage of runtime dependency vulns detected
runtime_dependency_recall:
min: 0.95
description: "Percentage of runtime dependency vulnerabilities detected"
severity: "critical"
# OS package recall: percentage of OS package vulns detected
os_package_recall:
min: 0.97
description: "Percentage of OS package vulnerabilities detected"
severity: "critical"
# Code vulnerability recall: percentage of code-level vulns detected
code_vulnerability_recall:
min: 0.90
description: "Percentage of code vulnerabilities detected"
severity: "high"
# Configuration vulnerability recall
config_vulnerability_recall:
min: 0.85
description: "Percentage of configuration vulnerabilities detected"
severity: "medium"
# False positive rate for unreachable findings
unreachable_false_positives:
max: 0.05
description: "Rate of false positives for unreachable findings"
severity: "high"
# Reachability underreport rate: missed reachable findings
reachability_underreport:
max: 0.10
description: "Rate of reachable findings incorrectly marked unreachable"
severity: "critical"
# Overall precision across all classes
overall_precision:
min: 0.90
description: "Overall precision across all vulnerability classes"
severity: "high"
# F1 score threshold
f1_score_min:
min: 0.90
description: "Minimum F1 score across vulnerability classes"
severity: "high"
# Class-specific thresholds
class_thresholds:
runtime_dep:
recall_min: 0.95
precision_min: 0.92
f1_min: 0.93
os_pkg:
recall_min: 0.97
precision_min: 0.95
f1_min: 0.96
code:
recall_min: 0.90
precision_min: 0.88
f1_min: 0.89
config:
recall_min: 0.85
precision_min: 0.80
f1_min: 0.82
# Regression detection settings
regression:
# Maximum allowed regression from baseline (percentage points)
max_recall_regression: 0.02
max_precision_regression: 0.03
# Path to baseline metrics file
baseline_path: "bench/baselines/reachability-baseline.json"
# How many consecutive failures before blocking
failure_threshold: 2
# Override configuration
overrides:
# Allow temporary bypass for specific PR labels
bypass_labels:
- "quality-gate-override"
- "wip"
# Require explicit approval from these teams
required_approvers:
- "platform"
- "reachability"

View File

@@ -1,16 +0,0 @@
#!/usr/bin/env bash
# Safe-ish workspace cleanup when the runner hits “No space left on device”.
# Deletes build/test outputs that are regenerated; preserves offline caches and sources.
set -euo pipefail
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
echo "Cleaning workspace outputs under: ${ROOT}"
rm -rf "${ROOT}/TestResults" || true
rm -rf "${ROOT}/out" || true
rm -rf "${ROOT}/artifacts" || true
# Trim common temp locations if they exist in repo workspace
[ -d "${ROOT}/tmp" ] && find "${ROOT}/tmp" -mindepth 1 -maxdepth 1 -exec rm -rf {} +
echo "Done. Consider also clearing any runner-level /tmp outside the workspace if safe."

View File

@@ -1,131 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# DEVOPS-CLI-41-001: Build multi-platform CLI binaries with SBOM and checksums.
# Updated: SPRINT_5100_0001_0001 - CLI Consolidation: includes Aoc and Symbols plugins
RIDS="${RIDS:-linux-x64,win-x64,osx-arm64}"
CONFIG="${CONFIG:-Release}"
PROJECT="src/Cli/StellaOps.Cli/StellaOps.Cli.csproj"
OUT_ROOT="out/cli"
SBOM_TOOL="${SBOM_TOOL:-syft}" # syft|none
SIGN="${SIGN:-false}"
COSIGN_KEY="${COSIGN_KEY:-}"
# CLI Plugins to include in the distribution
# SPRINT_5100_0001_0001: CLI Consolidation - stella aoc and stella symbols
PLUGIN_PROJECTS=(
"src/Cli/__Libraries/StellaOps.Cli.Plugins.Aoc/StellaOps.Cli.Plugins.Aoc.csproj"
"src/Cli/__Libraries/StellaOps.Cli.Plugins.Symbols/StellaOps.Cli.Plugins.Symbols.csproj"
)
PLUGIN_MANIFESTS=(
"src/Cli/plugins/cli/StellaOps.Cli.Plugins.Aoc/stellaops.cli.plugins.aoc.manifest.json"
"src/Cli/plugins/cli/StellaOps.Cli.Plugins.Symbols/stellaops.cli.plugins.symbols.manifest.json"
)
IFS=',' read -ra TARGETS <<< "$RIDS"
mkdir -p "$OUT_ROOT"
if ! command -v dotnet >/dev/null 2>&1; then
echo "[cli-build] dotnet CLI not found" >&2
exit 69
fi
generate_sbom() {
local dir="$1"
local sbom="$2"
if [[ "$SBOM_TOOL" == "syft" ]] && command -v syft >/dev/null 2>&1; then
syft "dir:${dir}" -o json > "$sbom"
fi
}
sign_file() {
local file="$1"
if [[ "$SIGN" == "true" && -n "$COSIGN_KEY" && -x "$(command -v cosign || true)" ]]; then
COSIGN_EXPERIMENTAL=1 cosign sign-blob --key "$COSIGN_KEY" --output-signature "${file}.sig" "$file"
fi
}
for rid in "${TARGETS[@]}"; do
echo "[cli-build] publishing for $rid"
out_dir="${OUT_ROOT}/${rid}"
publish_dir="${out_dir}/publish"
plugins_dir="${publish_dir}/plugins/cli"
mkdir -p "$publish_dir"
mkdir -p "$plugins_dir"
# Build main CLI
dotnet publish "$PROJECT" -c "$CONFIG" -r "$rid" \
-o "$publish_dir" \
--self-contained true \
-p:PublishSingleFile=true \
-p:PublishTrimmed=false \
-p:DebugType=None \
>/dev/null
# Build and copy plugins
# SPRINT_5100_0001_0001: CLI Consolidation
for i in "${!PLUGIN_PROJECTS[@]}"; do
plugin_project="${PLUGIN_PROJECTS[$i]}"
manifest_path="${PLUGIN_MANIFESTS[$i]}"
if [[ ! -f "$plugin_project" ]]; then
echo "[cli-build] WARNING: Plugin project not found: $plugin_project"
continue
fi
# Get plugin name from project path
plugin_name=$(basename "$(dirname "$plugin_project")")
plugin_out="${plugins_dir}/${plugin_name}"
mkdir -p "$plugin_out"
echo "[cli-build] building plugin: $plugin_name"
dotnet publish "$plugin_project" -c "$CONFIG" -r "$rid" \
-o "$plugin_out" \
--self-contained false \
-p:DebugType=None \
>/dev/null 2>&1 || echo "[cli-build] WARNING: Plugin build failed for $plugin_name (may have pre-existing errors)"
# Copy manifest file
if [[ -f "$manifest_path" ]]; then
cp "$manifest_path" "$plugin_out/"
else
echo "[cli-build] WARNING: Manifest not found: $manifest_path"
fi
done
# Package
archive_ext="tar.gz"
archive_cmd=(tar -C "$publish_dir" -czf)
if [[ "$rid" == win-* ]]; then
archive_ext="zip"
archive_cmd=(zip -jr)
fi
archive_name="stella-cli-${rid}.${archive_ext}"
archive_path="${out_dir}/${archive_name}"
"${archive_cmd[@]}" "$archive_path" "$publish_dir"
sha256sum "$archive_path" > "${archive_path}.sha256"
sign_file "$archive_path"
# SBOM
generate_sbom "$publish_dir" "${archive_path}.sbom.json"
done
# Build manifest
manifest="${OUT_ROOT}/manifest.json"
plugin_list=$(printf '"%s",' "${PLUGIN_PROJECTS[@]}" | sed 's/,.*//' | sed 's/.*\///' | sed 's/\.csproj//')
cat > "$manifest" <<EOF
{
"generated_at": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")",
"config": "$CONFIG",
"rids": [$(printf '"%s",' "${TARGETS[@]}" | sed 's/,$//')],
"plugins": ["stellaops.cli.plugins.aoc", "stellaops.cli.plugins.symbols"],
"artifacts_root": "$OUT_ROOT",
"notes": "CLI Consolidation (SPRINT_5100_0001_0001) - includes aoc and symbols plugins"
}
EOF
echo "[cli-build] artifacts in $OUT_ROOT"

View File

@@ -1,26 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Helper to stage and commit the prep/doc updates once disk/PTY issues are resolved.
# Usage: ./scripts/commit-prep-artifacts.sh "Your commit message"
root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$root"
git add \
docs/modules/policy/prep/2025-11-20-policy-airgap-prep.md \
docs/modules/policy/prep/2025-11-20-policy-aoc-prep.md \
docs/modules/policy/prep/2025-11-20-policy-attest-prep.md \
docs/modules/policy/prep/2025-11-21-policy-metrics-29-004-prep.md \
docs/modules/policy/prep/2025-11-21-policy-path-scope-29-002-prep.md \
docs/modules/scanner/prep/2025-11-21-scanner-records-prep.md \
docs/samples/prep/2025-11-20-lnm-22-001-prep.md \
docs/implplan/SPRINT_0123_0001_0001_policy_reasoning.md \
docs/implplan/SPRINT_0123_0001_0001_policy_reasoning.md \
docs/implplan/SPRINT_0125_0001_0001_policy_reasoning.md \
docs/implplan/SPRINT_0131_0001_0001_scanner_surface.md
git status --short
msg="${1:-Start prep on policy path/scope, metrics/logging, and scanner record payloads}"
git commit -m "$msg"

View File

@@ -1,87 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Postgres backfill runner for STORE-AOC-19-005-DEV (Link-Not-Merge raw linksets/chunks)
# Usage:
# PGURI=postgres://.../concelier ./scripts/concelier/backfill-store-aoc-19-005.sh /path/to/linksets-stage-backfill.tar.zst
# Optional:
# PGSCHEMA=lnm_raw (default), DRY_RUN=1 to stop after extraction
#
# Assumptions:
# - Dataset contains ndjson files: linksets.ndjson, advisory_chunks.ndjson, manifest.json
# - Target staging tables are created by this script if absent:
# <schema>.linksets_raw(id text primary key, raw jsonb)
# <schema>.advisory_chunks_raw(id text primary key, raw jsonb)
DATASET_PATH="${1:-}"
if [[ -z "${DATASET_PATH}" || ! -f "${DATASET_PATH}" ]]; then
echo "Dataset tarball not found. Provide path to linksets-stage-backfill.tar.zst" >&2
exit 1
fi
PGURI="${PGURI:-${CONCELIER_PG_URI:-}}"
PGSCHEMA="${PGSCHEMA:-lnm_raw}"
DRY_RUN="${DRY_RUN:-0}"
if [[ -z "${PGURI}" ]]; then
echo "PGURI (or CONCELIER_PG_URI) must be set" >&2
exit 1
fi
WORKDIR="$(mktemp -d)"
cleanup() { rm -rf "${WORKDIR}"; }
trap cleanup EXIT
echo "==> Dataset: ${DATASET_PATH}"
sha256sum "${DATASET_PATH}"
echo "==> Extracting to ${WORKDIR}"
tar -xf "${DATASET_PATH}" -C "${WORKDIR}"
for required in linksets.ndjson advisory_chunks.ndjson manifest.json; do
if [[ ! -f "${WORKDIR}/${required}" ]]; then
echo "Missing required file in dataset: ${required}" >&2
exit 1
fi
done
echo "==> Ensuring staging schema/tables exist in Postgres"
psql "${PGURI}" <<SQL
create schema if not exists ${PGSCHEMA};
create table if not exists ${PGSCHEMA}.linksets_raw (
id text primary key,
raw jsonb not null
);
create table if not exists ${PGSCHEMA}.advisory_chunks_raw (
id text primary key,
raw jsonb not null
);
SQL
if [[ "${DRY_RUN}" != "0" ]]; then
echo "DRY_RUN=1 set; extraction and schema verification completed, skipping import."
exit 0
fi
echo "==> Importing linksets into ${PGSCHEMA}.linksets_raw"
cat >"${WORKDIR}/linksets.tsv" <(jq -rc '[._id, .] | @tsv' "${WORKDIR}/linksets.ndjson")
psql "${PGURI}" <<SQL
TRUNCATE TABLE ${PGSCHEMA}.linksets_raw;
\copy ${PGSCHEMA}.linksets_raw (id, raw) FROM '${WORKDIR}/linksets.tsv' WITH (FORMAT csv, DELIMITER E'\t', QUOTE '"', ESCAPE '"');
SQL
echo "==> Importing advisory_chunks into ${PGSCHEMA}.advisory_chunks_raw"
cat >"${WORKDIR}/advisory_chunks.tsv" <(jq -rc '[._id, .] | @tsv' "${WORKDIR}/advisory_chunks.ndjson")
psql "${PGURI}" <<SQL
TRUNCATE TABLE ${PGSCHEMA}.advisory_chunks_raw;
\copy ${PGSCHEMA}.advisory_chunks_raw (id, raw) FROM '${WORKDIR}/advisory_chunks.tsv' WITH (FORMAT csv, DELIMITER E'\t', QUOTE '"', ESCAPE '"');
SQL
echo "==> Post-import counts"
psql -tA "${PGURI}" -c "select 'linksets_raw='||count(*) from ${PGSCHEMA}.linksets_raw;"
psql -tA "${PGURI}" -c "select 'advisory_chunks_raw='||count(*) from ${PGSCHEMA}.advisory_chunks_raw;"
echo "==> Manifest summary"
cat "${WORKDIR}/manifest.json"
echo "Backfill complete."

View File

@@ -1,74 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Deterministic dataset builder for STORE-AOC-19-005-DEV.
# Generates linksets-stage-backfill.tar.zst from repo seed data.
# Usage:
# ./scripts/concelier/build-store-aoc-19-005-dataset.sh [output_tarball]
# Default output: out/linksets/linksets-stage-backfill.tar.zst
command -v tar >/dev/null || { echo "tar is required" >&2; exit 1; }
command -v sha256sum >/dev/null || { echo "sha256sum is required" >&2; exit 1; }
TAR_COMPRESS=()
if command -v zstd >/dev/null 2>&1; then
TAR_COMPRESS=(--zstd)
else
echo "zstd not found; building uncompressed tarball (extension kept for compatibility)" >&2
fi
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
SEED_DIR="${ROOT_DIR}/seed-data/concelier/store-aoc-19-005"
OUT_DIR="${ROOT_DIR}/out/linksets"
OUT_PATH="${1:-${OUT_DIR}/linksets-stage-backfill.tar.zst}"
GEN_TIME="2025-12-07T00:00:00Z"
for seed in linksets.ndjson advisory_chunks.ndjson; do
if [[ ! -f "${SEED_DIR}/${seed}" ]]; then
echo "Missing seed file: ${SEED_DIR}/${seed}" >&2
exit 1
fi
done
WORKDIR="$(mktemp -d)"
cleanup() { rm -rf "${WORKDIR}"; }
trap cleanup EXIT
cp "${SEED_DIR}/linksets.ndjson" "${WORKDIR}/linksets.ndjson"
cp "${SEED_DIR}/advisory_chunks.ndjson" "${WORKDIR}/advisory_chunks.ndjson"
linksets_sha=$(sha256sum "${WORKDIR}/linksets.ndjson" | awk '{print $1}')
advisory_sha=$(sha256sum "${WORKDIR}/advisory_chunks.ndjson" | awk '{print $1}')
linksets_count=$(wc -l < "${WORKDIR}/linksets.ndjson" | tr -d '[:space:]')
advisory_count=$(wc -l < "${WORKDIR}/advisory_chunks.ndjson" | tr -d '[:space:]')
cat >"${WORKDIR}/manifest.json" <<EOF
{
"datasetId": "store-aoc-19-005-dev",
"generatedAt": "${GEN_TIME}",
"source": "seed-data/concelier/store-aoc-19-005",
"records": {
"linksets": ${linksets_count},
"advisory_chunks": ${advisory_count}
},
"sha256": {
"linksets.ndjson": "${linksets_sha}",
"advisory_chunks.ndjson": "${advisory_sha}"
}
}
EOF
mkdir -p "${OUT_DIR}"
tar "${TAR_COMPRESS[@]}" \
--format=ustar \
--mtime='1970-01-01 00:00:00Z' \
--owner=0 --group=0 --numeric-owner \
-cf "${OUT_PATH}" \
-C "${WORKDIR}" \
linksets.ndjson advisory_chunks.ndjson manifest.json
sha256sum "${OUT_PATH}" > "${OUT_PATH}.sha256"
echo "Wrote ${OUT_PATH}"
cat "${OUT_PATH}.sha256"

View File

@@ -1,55 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Export Concelier linksets/advisory_chunks from Postgres to a tar.zst bundle.
# Usage:
# PGURI=postgres://user:pass@host:5432/db \
# ./scripts/concelier/export-linksets-tarball.sh out/linksets/linksets-stage-backfill.tar.zst
#
# Optional env:
# PGSCHEMA=public # schema that owns linksets/advisory_chunks
# LINKSETS_TABLE=linksets # table name for linksets
# CHUNKS_TABLE=advisory_chunks # table name for advisory chunks
# TMPDIR=/tmp/export-linksets # working directory (defaults to mktemp)
TARGET="${1:-}"
if [[ -z "${TARGET}" ]]; then
echo "Usage: PGURI=... $0 out/linksets/linksets-stage-backfill.tar.zst" >&2
exit 1
fi
if [[ -z "${PGURI:-}" ]]; then
echo "PGURI environment variable is required (postgres://...)" >&2
exit 1
fi
PGSCHEMA="${PGSCHEMA:-public}"
LINKSETS_TABLE="${LINKSETS_TABLE:-linksets}"
CHUNKS_TABLE="${CHUNKS_TABLE:-advisory_chunks}"
WORKDIR="${TMPDIR:-$(mktemp -d)}"
mkdir -p "${WORKDIR}"
OUTDIR="$(dirname "${TARGET}")"
mkdir -p "${OUTDIR}"
echo "==> Exporting linksets from ${PGSCHEMA}.${LINKSETS_TABLE}"
psql "${PGURI}" -c "\copy (select row_to_json(t) from ${PGSCHEMA}.${LINKSETS_TABLE} t) to '${WORKDIR}/linksets.ndjson'"
echo "==> Exporting advisory_chunks from ${PGSCHEMA}.${CHUNKS_TABLE}"
psql "${PGURI}" -c "\copy (select row_to_json(t) from ${PGSCHEMA}.${CHUNKS_TABLE} t) to '${WORKDIR}/advisory_chunks.ndjson'"
LINKSETS_COUNT="$(wc -l < "${WORKDIR}/linksets.ndjson")"
CHUNKS_COUNT="$(wc -l < "${WORKDIR}/advisory_chunks.ndjson")"
echo "==> Writing manifest.json"
jq -n --argjson linksets "${LINKSETS_COUNT}" --argjson advisory_chunks "${CHUNKS_COUNT}" \
'{linksets: $linksets, advisory_chunks: $advisory_chunks}' \
> "${WORKDIR}/manifest.json"
echo "==> Building tarball ${TARGET}"
tar -I "zstd -19" -cf "${TARGET}" -C "${WORKDIR}" linksets.ndjson advisory_chunks.ndjson manifest.json
echo "==> SHA-256"
sha256sum "${TARGET}"
echo "Done. Workdir: ${WORKDIR}"

View File

@@ -1,90 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Validates the store-aoc-19-005 dataset tarball.
# Usage: ./scripts/concelier/test-store-aoc-19-005-dataset.sh [tarball]
command -v tar >/dev/null || { echo "tar is required" >&2; exit 1; }
command -v sha256sum >/dev/null || { echo "sha256sum is required" >&2; exit 1; }
command -v python >/dev/null || { echo "python is required" >&2; exit 1; }
DATASET="${1:-out/linksets/linksets-stage-backfill.tar.zst}"
if [[ ! -f "${DATASET}" ]]; then
echo "Dataset not found: ${DATASET}" >&2
exit 1
fi
WORKDIR="$(mktemp -d)"
cleanup() { rm -rf "${WORKDIR}"; }
trap cleanup EXIT
tar -xf "${DATASET}" -C "${WORKDIR}"
for required in linksets.ndjson advisory_chunks.ndjson manifest.json; do
if [[ ! -f "${WORKDIR}/${required}" ]]; then
echo "Missing ${required} in dataset" >&2
exit 1
fi
done
manifest="${WORKDIR}/manifest.json"
expected_linksets=$(python - <<'PY' "${manifest}"
import json, sys
with open(sys.argv[1], "r", encoding="utf-8") as f:
data = json.load(f)
print(data["records"]["linksets"])
PY
)
expected_chunks=$(python - <<'PY' "${manifest}"
import json, sys
with open(sys.argv[1], "r", encoding="utf-8") as f:
data = json.load(f)
print(data["records"]["advisory_chunks"])
PY
)
expected_linksets_sha=$(python - <<'PY' "${manifest}"
import json, sys
with open(sys.argv[1], "r", encoding="utf-8") as f:
data = json.load(f)
print(data["sha256"]["linksets.ndjson"])
PY
)
expected_chunks_sha=$(python - <<'PY' "${manifest}"
import json, sys
with open(sys.argv[1], "r", encoding="utf-8") as f:
data = json.load(f)
print(data["sha256"]["advisory_chunks.ndjson"])
PY
)
actual_linksets=$(wc -l < "${WORKDIR}/linksets.ndjson" | tr -d '[:space:]')
actual_chunks=$(wc -l < "${WORKDIR}/advisory_chunks.ndjson" | tr -d '[:space:]')
actual_linksets_sha=$(sha256sum "${WORKDIR}/linksets.ndjson" | awk '{print $1}')
actual_chunks_sha=$(sha256sum "${WORKDIR}/advisory_chunks.ndjson" | awk '{print $1}')
if [[ "${expected_linksets}" != "${actual_linksets}" ]]; then
echo "linksets count mismatch: expected ${expected_linksets}, got ${actual_linksets}" >&2
exit 1
fi
if [[ "${expected_chunks}" != "${actual_chunks}" ]]; then
echo "advisory_chunks count mismatch: expected ${expected_chunks}, got ${actual_chunks}" >&2
exit 1
fi
if [[ "${expected_linksets_sha}" != "${actual_linksets_sha}" ]]; then
echo "linksets sha mismatch: expected ${expected_linksets_sha}, got ${actual_linksets_sha}" >&2
exit 1
fi
if [[ "${expected_chunks_sha}" != "${actual_chunks_sha}" ]]; then
echo "advisory_chunks sha mismatch: expected ${expected_chunks_sha}, got ${actual_chunks_sha}" >&2
exit 1
fi
echo "Dataset validation succeeded:"
echo " linksets: ${actual_linksets}"
echo " advisory_chunks: ${actual_chunks}"
echo " linksets.sha256=${actual_linksets_sha}"
echo " advisory_chunks.sha256=${actual_chunks_sha}"

View File

@@ -1,57 +0,0 @@
#!/usr/bin/env python3
"""Add a new corpus case from a template."""
from __future__ import annotations
import argparse
from datetime import datetime, timezone
from pathlib import Path
ROOT = Path(__file__).resolve().parents[2]
CORPUS = ROOT / "bench" / "golden-corpus" / "categories"
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument("--category", required=True)
parser.add_argument("--name", required=True)
args = parser.parse_args()
case_dir = CORPUS / args.category / args.name
(case_dir / "input").mkdir(parents=True, exist_ok=True)
(case_dir / "expected").mkdir(parents=True, exist_ok=True)
created_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
(case_dir / "case-manifest.json").write_text(
'{\n'
f' "id": "{args.name}",\n'
f' "category": "{args.category}",\n'
' "description": "New corpus case",\n'
f' "createdAt": "{created_at}",\n'
' "inputs": ["sbom-cyclonedx.json", "sbom-spdx.json", "image.tar.gz"],\n'
' "expected": ["verdict.json", "evidence-index.json", "unknowns.json", "delta-verdict.json"]\n'
'}\n',
encoding="utf-8",
)
for rel in [
"input/sbom-cyclonedx.json",
"input/sbom-spdx.json",
"input/image.tar.gz",
"expected/verdict.json",
"expected/evidence-index.json",
"expected/unknowns.json",
"expected/delta-verdict.json",
"run-manifest.json",
]:
target = case_dir / rel
if target.suffix == ".gz":
target.touch()
else:
target.write_text("{}\n", encoding="utf-8")
print(f"Created case at {case_dir}")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -1,48 +0,0 @@
#!/usr/bin/env python3
"""Check determinism by verifying manifest digests match stored values."""
from __future__ import annotations
import hashlib
import json
import sys
from pathlib import Path
ROOT = Path(__file__).resolve().parents[2]
MANIFEST = ROOT / "bench" / "golden-corpus" / "corpus-manifest.json"
def sha256(path: Path) -> str:
h = hashlib.sha256()
with path.open("rb") as fh:
while True:
chunk = fh.read(8192)
if not chunk:
break
h.update(chunk)
return h.hexdigest()
def main() -> int:
if not MANIFEST.exists():
print(f"Manifest not found: {MANIFEST}")
return 1
data = json.loads(MANIFEST.read_text(encoding="utf-8"))
mismatches = []
for case in data.get("cases", []):
path = ROOT / case["path"]
manifest_path = path / "case-manifest.json"
digest = f"sha256:{sha256(manifest_path)}"
if digest != case.get("manifestDigest"):
mismatches.append({"id": case.get("id"), "expected": case.get("manifestDigest"), "actual": digest})
if mismatches:
print(json.dumps({"status": "fail", "mismatches": mismatches}, indent=2))
return 1
print(json.dumps({"status": "ok", "checked": len(data.get("cases", []))}, indent=2))
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -1,47 +0,0 @@
#!/usr/bin/env python3
"""Generate corpus-manifest.json from case directories."""
from __future__ import annotations
import hashlib
import json
from datetime import datetime, timezone
from pathlib import Path
ROOT = Path(__file__).resolve().parents[2]
CORPUS = ROOT / "bench" / "golden-corpus" / "categories"
OUTPUT = ROOT / "bench" / "golden-corpus" / "corpus-manifest.json"
def sha256(path: Path) -> str:
h = hashlib.sha256()
with path.open("rb") as fh:
while True:
chunk = fh.read(8192)
if not chunk:
break
h.update(chunk)
return h.hexdigest()
def main() -> int:
cases = []
for case_dir in sorted([p for p in CORPUS.rglob("*") if p.is_dir() and (p / "case-manifest.json").exists()]):
manifest_path = case_dir / "case-manifest.json"
cases.append({
"id": case_dir.name,
"path": str(case_dir.relative_to(ROOT)).replace("\\", "/"),
"manifestDigest": f"sha256:{sha256(manifest_path)}",
})
payload = {
"generatedAt": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
"caseCount": len(cases),
"cases": cases,
}
OUTPUT.write_text(json.dumps(payload, indent=2) + "\n", encoding="utf-8")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -1,54 +0,0 @@
#!/usr/bin/env python3
"""Validate golden corpus case structure."""
from __future__ import annotations
import json
import sys
from pathlib import Path
ROOT = Path(__file__).resolve().parents[2]
CORPUS = ROOT / "bench" / "golden-corpus" / "categories"
REQUIRED = [
"case-manifest.json",
"run-manifest.json",
"input/sbom-cyclonedx.json",
"input/sbom-spdx.json",
"input/image.tar.gz",
"expected/verdict.json",
"expected/evidence-index.json",
"expected/unknowns.json",
"expected/delta-verdict.json",
]
def validate_case(case_dir: Path) -> list[str]:
missing = []
for rel in REQUIRED:
if not (case_dir / rel).exists():
missing.append(rel)
return missing
def main() -> int:
if not CORPUS.exists():
print(f"Corpus path not found: {CORPUS}")
return 1
errors = []
cases = sorted([p for p in CORPUS.rglob("*") if p.is_dir() and (p / "case-manifest.json").exists()])
for case in cases:
missing = validate_case(case)
if missing:
errors.append({"case": str(case.relative_to(ROOT)), "missing": missing})
if errors:
print(json.dumps({"status": "fail", "errors": errors}, indent=2))
return 1
print(json.dumps({"status": "ok", "cases": len(cases)}, indent=2))
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -1,220 +0,0 @@
#!/usr/bin/env node
/**
* CryptoPro CSP downloader (Playwright-driven).
*
* Navigates cryptopro.ru downloads page, optionally fills login form, and selects
* Linux packages (.rpm/.deb/.tar.gz/.tgz/.bin) under the CSP Linux section.
*
* Environment:
* - CRYPTOPRO_URL (default: https://cryptopro.ru/products/csp/downloads#latest_csp50r3_linux)
* - CRYPTOPRO_EMAIL / CRYPTOPRO_PASSWORD (default demo creds: contact@stella-ops.org / Hoko33JD3nj3aJD.)
* - CRYPTOPRO_DRY_RUN (default: 1) -> list candidates, do not download
* - CRYPTOPRO_OUTPUT_DIR (default: /opt/cryptopro/downloads)
* - CRYPTOPRO_OUTPUT_FILE (optional: force a specific output filename/path)
* - CRYPTOPRO_UNPACK (default: 0) -> attempt to unpack tar.gz/tgz/rpm/deb
*/
const path = require('path');
const fs = require('fs');
const { spawnSync } = require('child_process');
const { chromium } = require('playwright-chromium');
const url = process.env.CRYPTOPRO_URL || 'https://cryptopro.ru/products/csp/downloads#latest_csp50r3_linux';
const email = process.env.CRYPTOPRO_EMAIL || 'contact@stella-ops.org';
const password = process.env.CRYPTOPRO_PASSWORD || 'Hoko33JD3nj3aJD.';
const dryRun = (process.env.CRYPTOPRO_DRY_RUN || '1') !== '0';
const outputDir = process.env.CRYPTOPRO_OUTPUT_DIR || '/opt/cryptopro/downloads';
const outputFile = process.env.CRYPTOPRO_OUTPUT_FILE;
const unpack = (process.env.CRYPTOPRO_UNPACK || '0') === '1';
const navTimeout = parseInt(process.env.CRYPTOPRO_NAV_TIMEOUT || '60000', 10);
const linuxPattern = /\.(rpm|deb|tar\.gz|tgz|bin)(\?|$)/i;
const debugLinks = (process.env.CRYPTOPRO_DEBUG || '0') === '1';
function log(msg) {
process.stdout.write(`${msg}\n`);
}
function warn(msg) {
process.stderr.write(`[WARN] ${msg}\n`);
}
async function maybeLogin(page) {
const emailSelector = 'input[type="email"], input[name*="email" i], input[name*="login" i], input[name="name"]';
const passwordSelector = 'input[type="password"], input[name*="password" i]';
const submitSelector = 'button[type="submit"], input[type="submit"]';
const emailInput = await page.$(emailSelector);
const passwordInput = await page.$(passwordSelector);
if (emailInput && passwordInput) {
log('[login] Form detected; submitting credentials');
await emailInput.fill(email);
await passwordInput.fill(password);
const submit = await page.$(submitSelector);
if (submit) {
await Promise.all([
page.waitForNavigation({ waitUntil: 'networkidle', timeout: 15000 }).catch(() => {}),
submit.click()
]);
} else {
await passwordInput.press('Enter');
await page.waitForTimeout(2000);
}
} else {
log('[login] No login form detected; continuing anonymously');
}
}
async function findLinuxLinks(page) {
const targets = [page, ...page.frames()];
const hrefs = [];
// Collect href/data-href/data-url across main page + frames
for (const target of targets) {
try {
const collected = await target.$$eval('a[href], [data-href], [data-url]', (els) =>
els
.map((el) => el.getAttribute('href') || el.getAttribute('data-href') || el.getAttribute('data-url'))
.filter((href) => typeof href === 'string')
);
hrefs.push(...collected);
} catch (err) {
warn(`[scan] Failed to collect links from frame: ${err.message}`);
}
}
const unique = Array.from(new Set(hrefs));
return unique.filter((href) => linuxPattern.test(href));
}
function unpackIfSupported(filePath) {
if (!unpack) {
return;
}
const cwd = path.dirname(filePath);
if (filePath.endsWith('.tar.gz') || filePath.endsWith('.tgz')) {
const res = spawnSync('tar', ['-xzf', filePath, '-C', cwd], { stdio: 'inherit' });
if (res.status === 0) {
log(`[unpack] Extracted ${filePath}`);
} else {
warn(`[unpack] Failed to extract ${filePath}`);
}
} else if (filePath.endsWith('.rpm')) {
const res = spawnSync('bash', ['-lc', `rpm2cpio "${filePath}" | cpio -idmv`], { stdio: 'inherit', cwd });
if (res.status === 0) {
log(`[unpack] Extracted RPM ${filePath}`);
} else {
warn(`[unpack] Failed to extract RPM ${filePath}`);
}
} else if (filePath.endsWith('.deb')) {
const res = spawnSync('dpkg-deb', ['-x', filePath, cwd], { stdio: 'inherit' });
if (res.status === 0) {
log(`[unpack] Extracted DEB ${filePath}`);
} else {
warn(`[unpack] Failed to extract DEB ${filePath}`);
}
} else if (filePath.endsWith('.bin')) {
const res = spawnSync('chmod', ['+x', filePath], { stdio: 'inherit' });
if (res.status === 0) {
log(`[unpack] Marked ${filePath} as executable (self-extract expected)`);
} else {
warn(`[unpack] Could not mark ${filePath} executable`);
}
} else {
warn(`[unpack] Skipping unsupported archive type for ${filePath}`);
}
}
async function main() {
if (email === 'contact@stella-ops.org' && password === 'Hoko33JD3nj3aJD.') {
warn('Using default demo credentials; set CRYPTOPRO_EMAIL/CRYPTOPRO_PASSWORD to real customer creds.');
}
const browser = await chromium.launch({ headless: true });
const context = await browser.newContext({
acceptDownloads: true,
httpCredentials: { username: email, password }
});
const page = await context.newPage();
log(`[nav] Opening ${url}`);
try {
await page.goto(url, { waitUntil: 'networkidle', timeout: navTimeout });
} catch (err) {
warn(`[nav] Navigation at networkidle failed (${err.message}); retrying with waitUntil=load`);
await page.goto(url, { waitUntil: 'load', timeout: navTimeout });
}
log(`[nav] Landed on ${page.url()}`);
await maybeLogin(page);
await page.waitForTimeout(2000);
const loginGate =
page.url().includes('/user') ||
(await page.$('form#user-login, form[id*="user-login"], .captcha, #captcha-container'));
if (loginGate) {
warn('[auth] Login/captcha gate detected on downloads page; automated fetch blocked. Provide session/cookies or run headful to solve manually.');
await browser.close();
return 2;
}
let links = await findLinuxLinks(page);
if (links.length === 0) {
await page.waitForTimeout(1500);
await page.evaluate(() => window.scrollTo(0, document.body.scrollHeight));
await page.waitForTimeout(2000);
links = await findLinuxLinks(page);
}
if (links.length === 0) {
if (debugLinks) {
const targetDir = outputFile ? path.dirname(outputFile) : outputDir;
await fs.promises.mkdir(targetDir, { recursive: true });
const debugHtml = path.join(targetDir, 'cryptopro-download-page.html');
await fs.promises.writeFile(debugHtml, await page.content(), 'utf8');
log(`[debug] Saved page HTML to ${debugHtml}`);
const allLinks = await page.$$eval('a[href], [data-href], [data-url]', (els) =>
els
.map((el) => el.getAttribute('href') || el.getAttribute('data-href') || el.getAttribute('data-url'))
.filter((href) => typeof href === 'string')
);
log(`[debug] Total link-like attributes: ${allLinks.length}`);
allLinks.slice(0, 20).forEach((href, idx) => log(` [all ${idx + 1}] ${href}`));
}
warn('No Linux download links found on page.');
await browser.close();
return 1;
}
log(`[scan] Found ${links.length} Linux candidate links`);
links.slice(0, 10).forEach((href, idx) => log(` [${idx + 1}] ${href}`));
if (dryRun) {
log('[mode] Dry-run enabled; not downloading. Set CRYPTOPRO_DRY_RUN=0 to fetch.');
await browser.close();
return 0;
}
const target = links[0];
log(`[download] Fetching ${target}`);
const [download] = await Promise.all([
page.waitForEvent('download', { timeout: 30000 }),
page.goto(target).catch(() => page.click(`a[href="${target}"]`).catch(() => {}))
]);
const targetDir = outputFile ? path.dirname(outputFile) : outputDir;
await fs.promises.mkdir(targetDir, { recursive: true });
const suggested = download.suggestedFilename();
const outPath = outputFile ? outputFile : path.join(outputDir, suggested);
await download.saveAs(outPath);
log(`[download] Saved to ${outPath}`);
unpackIfSupported(outPath);
await browser.close();
return 0;
}
main()
.then((code) => process.exit(code))
.catch((err) => {
console.error(err);
process.exit(1);
});

View File

@@ -1,69 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(git rev-parse --show-toplevel)"
TIMESTAMP="$(date -u +%Y%m%dT%H%M%SZ)"
OUTPUT_ROOT="${1:-${ROOT_DIR}/build/rootpack_ru_${TIMESTAMP}}"
ARTIFACT_DIR="${OUTPUT_ROOT}/artifacts"
DOC_DIR="${OUTPUT_ROOT}/docs"
CONFIG_DIR="${OUTPUT_ROOT}/config"
TRUST_DIR="${OUTPUT_ROOT}/trust"
mkdir -p "$ARTIFACT_DIR" "$DOC_DIR" "$CONFIG_DIR" "$TRUST_DIR"
publish_plugin() {
local project="$1"
local name="$2"
local publish_dir="${ARTIFACT_DIR}/${name}"
echo "[rootpack-ru] Publishing ${project} -> ${publish_dir}"
dotnet publish "$project" -c Release -o "$publish_dir" --nologo >/dev/null
}
publish_plugin "src/__Libraries/StellaOps.Cryptography.Plugin.CryptoPro/StellaOps.Cryptography.Plugin.CryptoPro.csproj" "StellaOps.Cryptography.Plugin.CryptoPro"
publish_plugin "src/__Libraries/StellaOps.Cryptography.Plugin.Pkcs11Gost/StellaOps.Cryptography.Plugin.Pkcs11Gost.csproj" "StellaOps.Cryptography.Plugin.Pkcs11Gost"
cp docs/security/rootpack_ru_validation.md "$DOC_DIR/"
cp docs/security/crypto-routing-audit-2025-11-07.md "$DOC_DIR/"
cp docs/security/rootpack_ru_package.md "$DOC_DIR/"
cp etc/rootpack/ru/crypto.profile.yaml "$CONFIG_DIR/rootpack_ru.crypto.yaml"
if [ "${INCLUDE_GOST_VALIDATION:-1}" != "0" ]; then
candidate="${OPENSSL_GOST_LOG_DIR:-}"
if [ -z "$candidate" ]; then
candidate="$(ls -d "${ROOT_DIR}"/logs/openssl_gost_validation_* "${ROOT_DIR}"/logs/rootpack_ru_*/openssl_gost 2>/dev/null | sort | tail -n 1 || true)"
fi
if [ -n "$candidate" ] && [ -d "$candidate" ]; then
mkdir -p "${DOC_DIR}/gost-validation"
cp -r "$candidate" "${DOC_DIR}/gost-validation/latest"
fi
fi
shopt -s nullglob
for pem in "$ROOT_DIR"/certificates/russian_trusted_*; do
cp "$pem" "$TRUST_DIR/"
done
shopt -u nullglob
cat <<README >"${OUTPUT_ROOT}/README.txt"
RootPack_RU bundle (${TIMESTAMP})
--------------------------------
Contents:
- artifacts/ : Sovereign crypto plug-ins published for net10.0 (CryptoPro + PKCS#11)
- config/rootpack_ru.crypto.yaml : example configuration binding registry profiles
- docs/ : validation + audit documentation
- trust/ : Russian trust anchor PEM bundle copied from certificates/
Usage:
1. Review docs/rootpack_ru_package.md for installation steps.
2. Execute scripts/crypto/run-rootpack-ru-tests.sh (or CI equivalent) and attach the logs to this bundle.
3. Record hardware validation outputs per docs/rootpack_ru_validation.md and store alongside this directory.
README
if [[ "${PACKAGE_TAR:-1}" != "0" ]]; then
tarball="${OUTPUT_ROOT}.tar.gz"
echo "[rootpack-ru] Creating ${tarball}"
tar -czf "$tarball" -C "$(dirname "$OUTPUT_ROOT")" "$(basename "$OUTPUT_ROOT")"
fi
echo "[rootpack-ru] Bundle staged under $OUTPUT_ROOT"

View File

@@ -1,25 +0,0 @@
param(
[string]$Configuration = "Release"
)
if (-not $IsWindows) {
Write-Host "CryptoPro tests require Windows" -ForegroundColor Yellow
exit 0
}
if (-not (Get-Command dotnet -ErrorAction SilentlyContinue)) {
Write-Host "dotnet SDK not found" -ForegroundColor Red
exit 1
}
# Opt-in flag to avoid accidental runs on agents without CryptoPro CSP installed
$env:STELLAOPS_CRYPTO_PRO_ENABLED = "1"
Write-Host "Running CryptoPro-only tests..." -ForegroundColor Cyan
pushd $PSScriptRoot\..\..
try {
dotnet test src/__Libraries/__Tests/StellaOps.Cryptography.Tests/StellaOps.Cryptography.Tests.csproj -c $Configuration --filter CryptoProGostSignerTests
} finally {
popd
}

View File

@@ -1,96 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(git rev-parse --show-toplevel)"
DEFAULT_LOG_ROOT="${ROOT_DIR}/logs/rootpack_ru_$(date -u +%Y%m%dT%H%M%SZ)"
LOG_ROOT="${ROOTPACK_LOG_DIR:-$DEFAULT_LOG_ROOT}"
ALLOW_PARTIAL="${ALLOW_PARTIAL:-1}"
mkdir -p "$LOG_ROOT"
PROJECTS=(
"src/__Libraries/__Tests/StellaOps.Cryptography.Tests/StellaOps.Cryptography.Tests.csproj"
"src/Scanner/__Tests/StellaOps.Scanner.Worker.Tests/StellaOps.Scanner.Worker.Tests.csproj"
"src/Scanner/__Tests/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests.csproj"
)
if [ "${RUN_SCANNER:-1}" != "1" ]; then
PROJECTS=("${PROJECTS[0]}")
echo "[rootpack-ru] RUN_SCANNER=0 set; skipping scanner test suites"
fi
run_test() {
local project="$1"
local extra_props=""
if [ "${STELLAOPS_ENABLE_CRYPTO_PRO:-""}" = "1" ]; then
extra_props+=" /p:StellaOpsEnableCryptoPro=true"
fi
if [ "${STELLAOPS_ENABLE_PKCS11:-""}" = "1" ]; then
extra_props+=" /p:StellaOpsEnablePkcs11=true"
fi
local safe_name
safe_name="$(basename "${project%.csproj}")"
local log_file="${LOG_ROOT}/${safe_name}.log"
local trx_name="${safe_name}.trx"
echo "[rootpack-ru] Running tests for ${project}" | tee "$log_file"
dotnet test "$project" \
--nologo \
--verbosity minimal \
--results-directory "$LOG_ROOT" \
--logger "trx;LogFileName=${trx_name}" ${extra_props} | tee -a "$log_file"
}
PROJECT_SUMMARY=()
for project in "${PROJECTS[@]}"; do
safe_name="$(basename "${project%.csproj}")"
if run_test "$project"; then
PROJECT_SUMMARY+=("$project|$safe_name|PASS")
echo "[rootpack-ru] Wrote logs for ${project} -> ${LOG_ROOT}/${safe_name}.log"
else
PROJECT_SUMMARY+=("$project|$safe_name|FAIL")
echo "[rootpack-ru] Test run failed for ${project}; see ${LOG_ROOT}/${safe_name}.log"
if [ "${ALLOW_PARTIAL}" != "1" ]; then
echo "[rootpack-ru] ALLOW_PARTIAL=0; aborting harness."
exit 1
fi
fi
done
GOST_SUMMARY="skipped (docker not available)"
if [ "${RUN_GOST_VALIDATION:-1}" = "1" ]; then
if command -v docker >/dev/null 2>&1; then
echo "[rootpack-ru] Running OpenSSL GOST validation harness"
OPENSSL_GOST_LOG_DIR="${LOG_ROOT}/openssl_gost"
if OPENSSL_GOST_LOG_DIR="${OPENSSL_GOST_LOG_DIR}" bash "${ROOT_DIR}/scripts/crypto/validate-openssl-gost.sh"; then
if [ -d "${OPENSSL_GOST_LOG_DIR}" ] && [ -f "${OPENSSL_GOST_LOG_DIR}/summary.txt" ]; then
GOST_SUMMARY="$(cat "${OPENSSL_GOST_LOG_DIR}/summary.txt")"
else
GOST_SUMMARY="completed (see logs/openssl_gost_validation_*)"
fi
else
GOST_SUMMARY="failed (see logs/openssl_gost_validation_*)"
fi
else
echo "[rootpack-ru] Docker not available; skipping OpenSSL GOST validation."
fi
fi
{
echo "RootPack_RU deterministic test harness"
echo "Generated: $(date -u +%Y-%m-%dT%H:%M:%SZ)"
echo "Log Directory: $LOG_ROOT"
echo ""
echo "Projects:"
for entry in "${PROJECT_SUMMARY[@]}"; do
project_path="${entry%%|*}"
rest="${entry#*|}"
safe_name="${rest%%|*}"
status="${rest##*|}"
printf ' - %s (log: %s.log, trx: %s.trx) [%s]\n' "$project_path" "$safe_name" "$safe_name" "$status"
done
echo ""
echo "GOST validation: ${GOST_SUMMARY}"
} > "$LOG_ROOT/README.tests"
echo "Logs and TRX files available under $LOG_ROOT"

View File

@@ -1,42 +0,0 @@
param(
[string] $BaseUrl = "http://localhost:5000",
[string] $SimProfile = "sm"
)
$ErrorActionPreference = "Stop"
$repoRoot = Resolve-Path "$PSScriptRoot/../.."
Push-Location $repoRoot
$job = $null
try {
Write-Host "Building sim service and smoke harness..."
dotnet build ops/crypto/sim-crypto-service/SimCryptoService.csproj -c Release | Out-Host
dotnet build ops/crypto/sim-crypto-smoke/SimCryptoSmoke.csproj -c Release | Out-Host
Write-Host "Starting sim service at $BaseUrl ..."
$job = Start-Job -ArgumentList $repoRoot, $BaseUrl -ScriptBlock {
param($path, $url)
Set-Location $path
$env:ASPNETCORE_URLS = $url
dotnet run --project ops/crypto/sim-crypto-service/SimCryptoService.csproj --no-build -c Release
}
Start-Sleep -Seconds 6
$env:STELLAOPS_CRYPTO_SIM_URL = $BaseUrl
$env:SIM_PROFILE = $SimProfile
Write-Host "Running smoke harness (profile=$SimProfile, url=$BaseUrl)..."
dotnet run --project ops/crypto/sim-crypto-smoke/SimCryptoSmoke.csproj --no-build -c Release
$exitCode = $LASTEXITCODE
if ($exitCode -ne 0) {
throw "Smoke harness failed with exit code $exitCode"
}
}
finally {
if ($job) {
Stop-Job $job -ErrorAction SilentlyContinue | Out-Null
Receive-Job $job -ErrorAction SilentlyContinue | Out-Null
Remove-Job $job -ErrorAction SilentlyContinue | Out-Null
}
Pop-Location
}

View File

@@ -1,108 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
if ! command -v docker >/dev/null 2>&1; then
echo "[gost-validate] docker is required but not found on PATH" >&2
exit 1
fi
ROOT_DIR="$(git rev-parse --show-toplevel)"
TIMESTAMP="$(date -u +%Y%m%dT%H%M%SZ)"
LOG_ROOT="${OPENSSL_GOST_LOG_DIR:-${ROOT_DIR}/logs/openssl_gost_validation_${TIMESTAMP}}"
IMAGE="${OPENSSL_GOST_IMAGE:-rnix/openssl-gost:latest}"
MOUNT_PATH="${LOG_ROOT}"
UNAME_OUT="$(uname -s || true)"
case "${UNAME_OUT}" in
MINGW*|MSYS*|CYGWIN*)
if command -v wslpath >/dev/null 2>&1; then
# Docker Desktop on Windows prefers Windows-style mount paths.
MOUNT_PATH="$(wslpath -m "${LOG_ROOT}")"
fi
;;
*)
MOUNT_PATH="${LOG_ROOT}"
;;
esac
mkdir -p "${LOG_ROOT}"
cat >"${LOG_ROOT}/message.txt" <<'EOF'
StellaOps OpenSSL GOST validation message (md_gost12_256)
EOF
echo "[gost-validate] Using image ${IMAGE}"
docker pull "${IMAGE}" >/dev/null
CONTAINER_SCRIPT_PATH="${LOG_ROOT}/container-script.sh"
cat > "${CONTAINER_SCRIPT_PATH}" <<'CONTAINER_SCRIPT'
set -eu
MESSAGE="/out/message.txt"
openssl version -a > /out/openssl-version.txt
openssl engine -c > /out/engine-list.txt
openssl genpkey -engine gost -algorithm gost2012_256 -pkeyopt paramset:A -out /tmp/gost.key.pem >/dev/null
openssl pkey -engine gost -in /tmp/gost.key.pem -pubout -out /out/gost.pub.pem >/dev/null
DIGEST_LINE="$(openssl dgst -engine gost -md_gost12_256 "${MESSAGE}")"
echo "${DIGEST_LINE}" > /out/digest.txt
DIGEST="$(printf "%s" "${DIGEST_LINE}" | awk -F'= ' '{print $2}')"
openssl dgst -engine gost -md_gost12_256 -sign /tmp/gost.key.pem -out /tmp/signature1.bin "${MESSAGE}"
openssl dgst -engine gost -md_gost12_256 -sign /tmp/gost.key.pem -out /tmp/signature2.bin "${MESSAGE}"
openssl dgst -engine gost -md_gost12_256 -verify /out/gost.pub.pem -signature /tmp/signature1.bin "${MESSAGE}" > /out/verify1.txt
openssl dgst -engine gost -md_gost12_256 -verify /out/gost.pub.pem -signature /tmp/signature2.bin "${MESSAGE}" > /out/verify2.txt
SIG1_SHA="$(sha256sum /tmp/signature1.bin | awk '{print $1}')"
SIG2_SHA="$(sha256sum /tmp/signature2.bin | awk '{print $1}')"
MSG_SHA="$(sha256sum "${MESSAGE}" | awk '{print $1}')"
cp /tmp/signature1.bin /out/signature1.bin
cp /tmp/signature2.bin /out/signature2.bin
DETERMINISTIC_BOOL=false
DETERMINISTIC_LABEL="no"
if [ "${SIG1_SHA}" = "${SIG2_SHA}" ]; then
DETERMINISTIC_BOOL=true
DETERMINISTIC_LABEL="yes"
fi
cat > /out/summary.txt <<SUMMARY
OpenSSL GOST validation (Linux engine)
Image: ${VALIDATION_IMAGE:-unknown}
Digest algorithm: md_gost12_256
Message SHA256: ${MSG_SHA}
Digest: ${DIGEST}
Signature1 SHA256: ${SIG1_SHA}
Signature2 SHA256: ${SIG2_SHA}
Signatures deterministic: ${DETERMINISTIC_LABEL}
SUMMARY
cat > /out/summary.json <<SUMMARYJSON
{
"image": "${VALIDATION_IMAGE:-unknown}",
"digest_algorithm": "md_gost12_256",
"message_sha256": "${MSG_SHA}",
"digest": "${DIGEST}",
"signature1_sha256": "${SIG1_SHA}",
"signature2_sha256": "${SIG2_SHA}",
"signatures_deterministic": ${DETERMINISTIC_BOOL}
}
SUMMARYJSON
CONTAINER_SCRIPT
docker run --rm \
-e VALIDATION_IMAGE="${IMAGE}" \
-v "${MOUNT_PATH}:/out" \
"${IMAGE}" /bin/sh "/out/$(basename "${CONTAINER_SCRIPT_PATH}")"
rm -f "${CONTAINER_SCRIPT_PATH}"
echo "[gost-validate] Artifacts written to ${LOG_ROOT}"
echo "[gost-validate] Summary:"
cat "${LOG_ROOT}/summary.txt"

View File

@@ -1,160 +0,0 @@
#!/usr/bin/env python3
"""
Cross-platform hash comparison for determinism verification.
Sprint: SPRINT_20251226_007_BE_determinism_gaps
Task: DET-GAP-13 - Cross-platform hash comparison report generation
"""
import argparse
import json
import sys
from datetime import datetime, timezone
from pathlib import Path
from typing import Any
def load_hashes(path: str) -> dict[str, str]:
"""Load hash file from path."""
with open(path) as f:
data = json.load(f)
return data.get("hashes", data)
def compare_hashes(
linux: dict[str, str],
windows: dict[str, str],
macos: dict[str, str]
) -> tuple[list[dict], list[str]]:
"""
Compare hashes across platforms.
Returns (divergences, matched_keys).
"""
all_keys = set(linux.keys()) | set(windows.keys()) | set(macos.keys())
divergences = []
matched = []
for key in sorted(all_keys):
linux_hash = linux.get(key, "MISSING")
windows_hash = windows.get(key, "MISSING")
macos_hash = macos.get(key, "MISSING")
if linux_hash == windows_hash == macos_hash:
matched.append(key)
else:
divergences.append({
"key": key,
"linux": linux_hash,
"windows": windows_hash,
"macos": macos_hash
})
return divergences, matched
def generate_markdown_report(
divergences: list[dict],
matched: list[str],
linux_path: str,
windows_path: str,
macos_path: str
) -> str:
"""Generate Markdown report."""
lines = [
f"**Generated:** {datetime.now(timezone.utc).isoformat()}",
"",
"### Summary",
"",
f"- ✅ **Matched:** {len(matched)} hashes",
f"- {'' if divergences else ''} **Divergences:** {len(divergences)} hashes",
"",
]
if divergences:
lines.extend([
"### Divergences",
"",
"| Key | Linux | Windows | macOS |",
"|-----|-------|---------|-------|",
])
for d in divergences:
linux_short = d["linux"][:16] + "..." if len(d["linux"]) > 16 else d["linux"]
windows_short = d["windows"][:16] + "..." if len(d["windows"]) > 16 else d["windows"]
macos_short = d["macos"][:16] + "..." if len(d["macos"]) > 16 else d["macos"]
lines.append(f"| `{d['key']}` | `{linux_short}` | `{windows_short}` | `{macos_short}` |")
lines.append("")
lines.extend([
"### Matched Hashes",
"",
f"<details><summary>Show {len(matched)} matched hashes</summary>",
"",
])
for key in matched[:50]: # Limit display
lines.append(f"- `{key}`")
if len(matched) > 50:
lines.append(f"- ... and {len(matched) - 50} more")
lines.extend(["", "</details>", ""])
return "\n".join(lines)
def main():
parser = argparse.ArgumentParser(description="Compare determinism hashes across platforms")
parser.add_argument("--linux", required=True, help="Path to Linux hashes JSON")
parser.add_argument("--windows", required=True, help="Path to Windows hashes JSON")
parser.add_argument("--macos", required=True, help="Path to macOS hashes JSON")
parser.add_argument("--output", required=True, help="Output JSON report path")
parser.add_argument("--markdown", required=True, help="Output Markdown report path")
args = parser.parse_args()
# Load hashes
linux_hashes = load_hashes(args.linux)
windows_hashes = load_hashes(args.windows)
macos_hashes = load_hashes(args.macos)
# Compare
divergences, matched = compare_hashes(linux_hashes, windows_hashes, macos_hashes)
# Generate reports
report = {
"timestamp": datetime.now(timezone.utc).isoformat(),
"sources": {
"linux": args.linux,
"windows": args.windows,
"macos": args.macos
},
"summary": {
"matched": len(matched),
"divergences": len(divergences),
"total": len(matched) + len(divergences)
},
"divergences": divergences,
"matched": matched
}
# Write JSON report
Path(args.output).parent.mkdir(parents=True, exist_ok=True)
with open(args.output, "w") as f:
json.dump(report, f, indent=2)
# Write Markdown report
markdown = generate_markdown_report(
divergences, matched,
args.linux, args.windows, args.macos
)
with open(args.markdown, "w") as f:
f.write(markdown)
# Print summary
print(f"Comparison complete:")
print(f" Matched: {len(matched)}")
print(f" Divergences: {len(divergences)}")
# Exit with error if divergences found
if divergences:
print("\nERROR: Hash divergences detected!")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,46 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Cleans common build/test artifacts to reclaim disk space in this repo.
# Defaults to a safe set; pass SAFE_ONLY=0 to include bin/obj.
DRY_RUN=${DRY_RUN:-0}
SAFE_ONLY=${SAFE_ONLY:-1}
log() { printf "[cleanup] %s\n" "$*"; }
run() {
if [[ "$DRY_RUN" == "1" ]]; then
log "DRY_RUN: $*"
else
eval "$@"
fi
}
ROOT="$(git rev-parse --show-toplevel 2>/dev/null || pwd)"
cd "$ROOT"
paths=(
"out"
"ops/devops/artifacts"
"ops/devops/ci-110-runner/artifacts"
"ops/devops/sealed-mode-ci/artifacts"
"TestResults"
"src/__Tests/TestResults"
".nuget/packages"
".nuget/packages"
)
if [[ "$SAFE_ONLY" != "1" ]]; then
while IFS= read -r dir; do
paths+=("$dir")
done < <(find . -maxdepth 4 -type d \( -name bin -o -name obj -o -name TestResults \) 2>/dev/null)
fi
log "Safe only: $SAFE_ONLY ; Dry run: $DRY_RUN"
for p in "${paths[@]}"; do
if [[ -d "$p" ]]; then
log "Removing $p"
run "rm -rf '$p'"
fi
done
log "Done."

View File

@@ -1,7 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Bring up local SMTP+syslog stack for sealed-mode tests (DEVOPS-AIRGAP-58-001)
ROOT=${ROOT:-$(git rev-parse --show-toplevel)}
COMPOSE_FILE=${COMPOSE_FILE:-$ROOT/ops/devops/airgap/smtp-syslog-compose.yml}
export COMPOSE_FILE
exec docker compose up -d

View File

@@ -1,48 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# DEVOPS-DEVPORT-63-001 / 64-001: devportal build + offline bundle
ROOT="$(git rev-parse --show-toplevel)"
pushd "$ROOT" >/dev/null
OUT_ROOT="out/devportal"
RUN_ID="$(date -u +%Y%m%dT%H%M%SZ)"
RUN_DIR="${OUT_ROOT}/${RUN_ID}"
mkdir -p "$RUN_DIR"
export NODE_ENV=production
export PNPM_HOME="${ROOT}/.pnpm"
export PATH="$PNPM_HOME:$PATH"
if ! command -v pnpm >/dev/null 2>&1; then
corepack enable pnpm >/dev/null
fi
echo "[devportal] installing deps with pnpm"
pnpm install --frozen-lockfile --prefer-offline
echo "[devportal] lint/typecheck/unit"
pnpm run lint
pnpm run test -- --watch=false
echo "[devportal] lighthouse perf budget (headless)"
pnpm run perf:ci || true
echo "[devportal] build"
pnpm run build
echo "[devportal] copying artifacts"
cp -r dist "${RUN_DIR}/dist"
echo "[devportal] checksums"
(
cd "$RUN_DIR"
find dist -type f -print0 | xargs -0 sha256sum > SHA256SUMS
)
tar -C "$RUN_DIR" -czf "${RUN_DIR}.tgz" dist SHA256SUMS
echo "$RUN_DIR.tgz" > "${OUT_ROOT}/latest.txt"
echo "[devportal] bundle created at ${RUN_DIR}.tgz"
popd >/dev/null

View File

@@ -1,26 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Ensures OpenSSL 1.1 shim is discoverable for Mongo2Go by exporting LD_LIBRARY_PATH.
# Safe for repeated invocation; respects STELLAOPS_OPENSSL11_SHIM override.
ROOT=${STELLAOPS_REPO_ROOT:-$(git rev-parse --show-toplevel 2>/dev/null || pwd)}
SHIM_DIR=${STELLAOPS_OPENSSL11_SHIM:-"${ROOT}/src/__Tests/native/openssl-1.1/linux-x64"}
if [[ ! -d "${SHIM_DIR}" ]]; then
echo "::warning ::OpenSSL 1.1 shim directory not found at ${SHIM_DIR}; Mongo2Go tests may fail" >&2
exit 0
fi
export LD_LIBRARY_PATH="${SHIM_DIR}:${LD_LIBRARY_PATH:-}"
export STELLAOPS_OPENSSL11_SHIM="${SHIM_DIR}"
# Persist for subsequent CI steps when available
if [[ -n "${GITHUB_ENV:-}" ]]; then
{
echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}"
echo "STELLAOPS_OPENSSL11_SHIM=${STELLAOPS_OPENSSL11_SHIM}"
} >> "${GITHUB_ENV}"
fi
echo "OpenSSL 1.1 shim enabled (LD_LIBRARY_PATH=${LD_LIBRARY_PATH})"

View File

@@ -1,11 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)"
OUTPUT_DIR="${1:-$REPO_ROOT/docs/schemas}"
pushd "$REPO_ROOT" > /dev/null
dotnet run --project src/Tools/PolicySchemaExporter -- "$OUTPUT_DIR"
popd > /dev/null

View File

@@ -1,22 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Verify OCI distribution path works (push/pull loop).
IMAGE=${IMAGE:-"ghcr.io/stella-ops/exporter:edge"}
TMP="out/export-oci"
mkdir -p "$TMP"
echo "[export-oci] pulling $IMAGE"
docker pull "$IMAGE"
echo "[export-oci] retagging and pushing to local cache"
LOCAL="localhost:5001/exporter:test"
docker tag "$IMAGE" "$LOCAL"
docker push "$LOCAL" || echo "[export-oci] push skipped (no local registry?)"
echo "[export-oci] pulling back for verification"
docker pull "$LOCAL" || true
echo "[export-oci] done"

View File

@@ -1,24 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# DEVOPS-EXPORT-36-001: Trivy compatibility & signing checks
IMAGE=${IMAGE:-"ghcr.io/stella-ops/exporter:edge"}
OUT="out/export-compat"
mkdir -p "$OUT"
echo "[export-compat] pulling image $IMAGE"
docker pull "$IMAGE"
echo "[export-compat] running trivy image --severity HIGH,CRITICAL"
trivy image --severity HIGH,CRITICAL --quiet "$IMAGE" > "$OUT/trivy.txt" || true
echo "[export-compat] verifying cosign signature if present"
if command -v cosign >/dev/null 2>&1; then
cosign verify "$IMAGE" > "$OUT/cosign.txt" || true
fi
echo "[export-compat] trivy module db import smoke"
trivy module db import --file "$OUT/trivy-module.db" 2>/dev/null || true
echo "[export-compat] done; outputs in $OUT"

View File

@@ -1,467 +0,0 @@
#!/usr/bin/env python3
"""
ICS/KISA feed refresh runner.
Runs the SOP v0.2 workflow to emit NDJSON advisories, delta, fetch log, and hash
manifest under out/feeds/icscisa-kisa/<YYYYMMDD>/.
Defaults to live fetch with offline-safe fallback to baked-in samples. You can
force live/offline via env or CLI flags.
"""
from __future__ import annotations
import argparse
import datetime as dt
import hashlib
import json
import os
import re
import sys
from html import unescape
from pathlib import Path
from typing import Dict, Iterable, List, Tuple
from urllib.error import URLError, HTTPError
from urllib.parse import urlparse, urlunparse
from urllib.request import Request, urlopen
from xml.etree import ElementTree
DEFAULT_OUTPUT_ROOT = Path("out/feeds/icscisa-kisa")
DEFAULT_ICSCISA_URL = "https://www.cisa.gov/news-events/ics-advisories/icsa.xml"
DEFAULT_KISA_URL = "https://knvd.krcert.or.kr/rss/securityInfo.do"
DEFAULT_GATEWAY_HOST = "concelier-webservice"
DEFAULT_GATEWAY_SCHEME = "http"
USER_AGENT = "StellaOpsFeedRefresh/1.0 (+https://stella-ops.org)"
def utcnow() -> dt.datetime:
return dt.datetime.utcnow().replace(tzinfo=dt.timezone.utc)
def iso(ts: dt.datetime) -> str:
return ts.strftime("%Y-%m-%dT%H:%M:%SZ")
def sha256_bytes(data: bytes) -> str:
return hashlib.sha256(data).hexdigest()
def strip_html(value: str) -> str:
return re.sub(r"<[^>]+>", "", value or "").strip()
def safe_request(url: str) -> bytes:
req = Request(url, headers={"User-Agent": USER_AGENT})
with urlopen(req, timeout=30) as resp:
return resp.read()
def parse_rss_items(xml_bytes: bytes) -> Iterable[Dict[str, str]]:
root = ElementTree.fromstring(xml_bytes)
for item in root.findall(".//item"):
title = (item.findtext("title") or "").strip()
link = (item.findtext("link") or "").strip()
description = strip_html(unescape(item.findtext("description") or ""))
pub_date = (item.findtext("pubDate") or "").strip()
yield {
"title": title,
"link": link,
"description": description,
"pub_date": pub_date,
}
def normalize_icscisa_record(item: Dict[str, str], fetched_at: str, run_id: str) -> Dict[str, object]:
advisory_id = item["title"].split(":")[0].strip() or "icsa-unknown"
summary = item["description"] or item["title"]
raw_payload = f"{item['title']}\n{item['link']}\n{item['description']}"
record = {
"advisory_id": advisory_id,
"source": "icscisa",
"source_url": item["link"] or DEFAULT_ICSCISA_URL,
"title": item["title"] or advisory_id,
"summary": summary,
"published": iso(parse_pubdate(item["pub_date"])),
"updated": iso(parse_pubdate(item["pub_date"])),
"severity": "unknown",
"cvss": None,
"cwe": [],
"affected_products": [],
"references": [url for url in (item["link"],) if url],
"signature": {"status": "missing", "reason": "unsigned_source"},
"fetched_at": fetched_at,
"run_id": run_id,
"payload_sha256": sha256_bytes(raw_payload.encode("utf-8")),
}
return record
def normalize_kisa_record(item: Dict[str, str], fetched_at: str, run_id: str) -> Dict[str, object]:
advisory_id = extract_kisa_id(item)
raw_payload = f"{item['title']}\n{item['link']}\n{item['description']}"
record = {
"advisory_id": advisory_id,
"source": "kisa",
"source_url": item["link"] or DEFAULT_KISA_URL,
"title": item["title"] or advisory_id,
"summary": item["description"] or item["title"],
"published": iso(parse_pubdate(item["pub_date"])),
"updated": iso(parse_pubdate(item["pub_date"])),
"severity": "unknown",
"cvss": None,
"cwe": [],
"affected_products": [],
"references": [url for url in (item["link"], DEFAULT_KISA_URL) if url],
"signature": {"status": "missing", "reason": "unsigned_source"},
"fetched_at": fetched_at,
"run_id": run_id,
"payload_sha256": sha256_bytes(raw_payload.encode("utf-8")),
}
return record
def extract_kisa_id(item: Dict[str, str]) -> str:
link = item["link"]
match = re.search(r"IDX=([0-9]+)", link)
if match:
return f"KISA-{match.group(1)}"
return (item["title"].split()[0] if item["title"] else "KISA-unknown").strip()
def parse_pubdate(value: str) -> dt.datetime:
if not value:
return utcnow()
try:
# RFC1123-ish
return dt.datetime.strptime(value, "%a, %d %b %Y %H:%M:%S %Z").replace(tzinfo=dt.timezone.utc)
except ValueError:
try:
return dt.datetime.fromisoformat(value.replace("Z", "+00:00"))
except ValueError:
return utcnow()
def sample_records() -> List[Dict[str, object]]:
now_iso = iso(utcnow())
return [
{
"advisory_id": "ICSA-25-123-01",
"source": "icscisa",
"source_url": "https://www.cisa.gov/news-events/ics-advisories/icsa-25-123-01",
"title": "Example ICS Advisory",
"summary": "Example Corp ControlSuite RCE via exposed management service.",
"published": "2025-10-13T12:00:00Z",
"updated": "2025-11-30T00:00:00Z",
"severity": "High",
"cvss": {"version": "3.1", "vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", "score": 9.8},
"cwe": ["CWE-269"],
"affected_products": [{"vendor": "Example Corp", "product": "ControlSuite", "versions": ["4.2.0", "4.2.1"]}],
"references": [
"https://example.com/security/icsa-25-123-01.pdf",
"https://www.cisa.gov/news-events/ics-advisories/icsa-25-123-01",
],
"signature": {"status": "missing", "reason": "unsigned_source"},
"fetched_at": now_iso,
"run_id": "",
"payload_sha256": sha256_bytes(b"ICSA-25-123-01 Example ControlSuite advisory payload"),
},
{
"advisory_id": "ICSMA-25-045-01",
"source": "icscisa",
"source_url": "https://www.cisa.gov/news-events/ics-medical-advisories/icsma-25-045-01",
"title": "Example Medical Advisory",
"summary": "HealthTech infusion pump vulnerabilities including two CVEs.",
"published": "2025-10-14T09:30:00Z",
"updated": "2025-12-01T00:00:00Z",
"severity": "Medium",
"cvss": {"version": "3.1", "vector": "CVSS:3.1/AV:N/AC:H/PR:L/UI:R/S:U/C:L/I:L/A:L", "score": 6.3},
"cwe": ["CWE-319"],
"affected_products": [{"vendor": "HealthTech", "product": "InfusionManager", "versions": ["2.1.0", "2.1.1"]}],
"references": [
"https://www.cisa.gov/news-events/ics-medical-advisories/icsma-25-045-01",
"https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2025-11111",
],
"signature": {"status": "missing", "reason": "unsigned_source"},
"fetched_at": now_iso,
"run_id": "",
"payload_sha256": sha256_bytes(b"ICSMA-25-045-01 Example medical advisory payload"),
},
{
"advisory_id": "KISA-2025-5859",
"source": "kisa",
"source_url": "https://knvd.krcert.or.kr/detailDos.do?IDX=5859",
"title": "KISA sample advisory 5859",
"summary": "Remote code execution in ControlBoard service (offline HTML snapshot).",
"published": "2025-11-03T22:53:00Z",
"updated": "2025-12-02T00:00:00Z",
"severity": "High",
"cvss": {"version": "3.1", "vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", "score": 9.8},
"cwe": ["CWE-787"],
"affected_products": [{"vendor": "ACME", "product": "ControlBoard", "versions": ["1.0.1.0084", "2.0.1.0034"]}],
"references": [
"https://knvd.krcert.or.kr/rss/securityInfo.do",
"https://knvd.krcert.or.kr/detailDos.do?IDX=5859",
],
"signature": {"status": "missing", "reason": "unsigned_source"},
"fetched_at": now_iso,
"run_id": "",
"payload_sha256": sha256_bytes(b"KISA advisory IDX 5859 cached HTML payload"),
},
{
"advisory_id": "KISA-2025-5860",
"source": "kisa",
"source_url": "https://knvd.krcert.or.kr/detailDos.do?IDX=5860",
"title": "KISA sample advisory 5860",
"summary": "Authentication bypass via default credentials in NetGateway appliance.",
"published": "2025-11-03T22:53:00Z",
"updated": "2025-12-02T00:00:00Z",
"severity": "Medium",
"cvss": {"version": "3.1", "vector": "CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:L/I:L/A:L", "score": 7.3},
"cwe": ["CWE-798"],
"affected_products": [{"vendor": "NetGateway", "product": "Edge", "versions": ["3.4.2", "3.4.3"]}],
"references": [
"https://knvd.krcert.or.kr/rss/securityInfo.do",
"https://knvd.krcert.or.kr/detailDos.do?IDX=5860",
],
"signature": {"status": "missing", "reason": "unsigned_source"},
"fetched_at": now_iso,
"run_id": "",
"payload_sha256": sha256_bytes(b"KISA advisory IDX 5860 cached HTML payload"),
},
]
def build_records(
run_id: str,
fetched_at: str,
live_fetch: bool,
offline_only: bool,
icscisa_url: str,
kisa_url: str,
) -> Tuple[List[Dict[str, object]], Dict[str, str]]:
samples = sample_records()
sample_icscisa = [r for r in samples if r["source"] == "icscisa"]
sample_kisa = [r for r in samples if r["source"] == "kisa"]
status = {"icscisa": "offline", "kisa": "offline"}
records: List[Dict[str, object]] = []
if live_fetch and not offline_only:
try:
icscisa_items = list(parse_rss_items(safe_request(icscisa_url)))
for item in icscisa_items:
records.append(normalize_icscisa_record(item, fetched_at, run_id))
status["icscisa"] = f"live:{len(icscisa_items)}"
except (URLError, HTTPError, ElementTree.ParseError, TimeoutError) as exc:
print(f"[warn] ICS CISA fetch failed ({exc}); falling back to samples.", file=sys.stderr)
try:
kisa_items = list(parse_rss_items(safe_request(kisa_url)))
for item in kisa_items:
records.append(normalize_kisa_record(item, fetched_at, run_id))
status["kisa"] = f"live:{len(kisa_items)}"
except (URLError, HTTPError, ElementTree.ParseError, TimeoutError) as exc:
print(f"[warn] KISA fetch failed ({exc}); falling back to samples.", file=sys.stderr)
if not records or status["icscisa"].startswith("live") is False:
records.extend(apply_run_metadata(sample_icscisa, run_id, fetched_at))
status["icscisa"] = status.get("icscisa") or "offline"
if not any(r["source"] == "kisa" for r in records):
records.extend(apply_run_metadata(sample_kisa, run_id, fetched_at))
status["kisa"] = status.get("kisa") or "offline"
return records, status
def apply_run_metadata(records: Iterable[Dict[str, object]], run_id: str, fetched_at: str) -> List[Dict[str, object]]:
updated = []
for record in records:
copy = dict(record)
copy["run_id"] = run_id
copy["fetched_at"] = fetched_at
copy["payload_sha256"] = record.get("payload_sha256") or sha256_bytes(json.dumps(record, sort_keys=True).encode("utf-8"))
updated.append(copy)
return updated
def find_previous_snapshot(base_dir: Path, current_run_date: str) -> Path | None:
if not base_dir.exists():
return None
candidates = sorted(p for p in base_dir.iterdir() if p.is_dir() and p.name != current_run_date)
if not candidates:
return None
return candidates[-1] / "advisories.ndjson"
def load_previous_hash(path: Path | None) -> str | None:
if path and path.exists():
return sha256_bytes(path.read_bytes())
return None
def compute_delta(new_records: List[Dict[str, object]], previous_path: Path | None) -> Dict[str, object]:
prev_records = {}
if previous_path and previous_path.exists():
with previous_path.open("r", encoding="utf-8") as handle:
for line in handle:
if line.strip():
rec = json.loads(line)
prev_records[rec["advisory_id"]] = rec
new_by_id = {r["advisory_id"]: r for r in new_records}
added = [rid for rid in new_by_id if rid not in prev_records]
updated = [
rid
for rid, rec in new_by_id.items()
if rid in prev_records and rec.get("payload_sha256") != prev_records[rid].get("payload_sha256")
]
removed = [rid for rid in prev_records if rid not in new_by_id]
return {
"added": {"icscisa": [rid for rid in added if new_by_id[rid]["source"] == "icscisa"],
"kisa": [rid for rid in added if new_by_id[rid]["source"] == "kisa"]},
"updated": {"icscisa": [rid for rid in updated if new_by_id[rid]["source"] == "icscisa"],
"kisa": [rid for rid in updated if new_by_id[rid]["source"] == "kisa"]},
"removed": {"icscisa": [rid for rid in removed if prev_records[rid]["source"] == "icscisa"],
"kisa": [rid for rid in removed if prev_records[rid]["source"] == "kisa"]},
"totals": {
"icscisa": {
"added": len([rid for rid in added if new_by_id[rid]["source"] == "icscisa"]),
"updated": len([rid for rid in updated if new_by_id[rid]["source"] == "icscisa"]),
"removed": len([rid for rid in removed if prev_records[rid]["source"] == "icscisa"]),
"remaining": len([rid for rid, rec in new_by_id.items() if rec["source"] == "icscisa"]),
},
"kisa": {
"added": len([rid for rid in added if new_by_id[rid]["source"] == "kisa"]),
"updated": len([rid for rid in updated if new_by_id[rid]["source"] == "kisa"]),
"removed": len([rid for rid in removed if prev_records[rid]["source"] == "kisa"]),
"remaining": len([rid for rid, rec in new_by_id.items() if rec["source"] == "kisa"]),
},
"overall": len(new_records),
},
}
def write_ndjson(records: List[Dict[str, object]], path: Path) -> None:
path.write_text("\n".join(json.dumps(r, sort_keys=True, separators=(",", ":")) for r in records) + "\n", encoding="utf-8")
def write_fetch_log(
path: Path,
run_id: str,
start: str,
end: str,
status: Dict[str, str],
gateway_host: str,
gateway_scheme: str,
icscisa_url: str,
kisa_url: str,
live_fetch: bool,
offline_only: bool,
) -> None:
lines = [
f"run_id={run_id} start={start} end={end}",
f"sources=icscisa,kisa cadence=weekly backlog_window=60d live_fetch={str(live_fetch).lower()} offline_only={str(offline_only).lower()}",
f"gateway={gateway_scheme}://{gateway_host}",
f"icscisa_url={icscisa_url} status={status.get('icscisa','offline')} retries=0",
f"kisa_url={kisa_url} status={status.get('kisa','offline')} retries=0",
"outputs=advisories.ndjson,delta.json,hashes.sha256",
]
path.write_text("\n".join(lines) + "\n", encoding="utf-8")
def write_hashes(dir_path: Path) -> None:
entries = []
for name in ["advisories.ndjson", "delta.json", "fetch.log"]:
file_path = dir_path / name
entries.append(f"{sha256_bytes(file_path.read_bytes())} {name}")
(dir_path / "hashes.sha256").write_text("\n".join(entries) + "\n", encoding="utf-8")
def main() -> None:
parser = argparse.ArgumentParser(description="Run ICS/KISA feed refresh SOP v0.2")
parser.add_argument("--out-dir", default=str(DEFAULT_OUTPUT_ROOT), help="Base output directory (default: out/feeds/icscisa-kisa)")
parser.add_argument("--run-date", default=None, help="Override run date (YYYYMMDD)")
parser.add_argument("--run-id", default=None, help="Override run id")
parser.add_argument("--live", action="store_true", default=False, help="Force live fetch (default: enabled via env LIVE_FETCH=true)")
parser.add_argument("--offline", action="store_true", default=False, help="Force offline samples only")
args = parser.parse_args()
now = utcnow()
run_date = args.run_date or now.strftime("%Y%m%d")
run_id = args.run_id or f"icscisa-kisa-{now.strftime('%Y%m%dT%H%M%SZ')}"
fetched_at = iso(now)
start = fetched_at
live_fetch = args.live or os.getenv("LIVE_FETCH", "true").lower() == "true"
offline_only = args.offline or os.getenv("OFFLINE_SNAPSHOT", "false").lower() == "true"
output_root = Path(args.out_dir)
output_dir = output_root / run_date
output_dir.mkdir(parents=True, exist_ok=True)
previous_path = find_previous_snapshot(output_root, run_date)
gateway_host = os.getenv("FEED_GATEWAY_HOST", DEFAULT_GATEWAY_HOST)
gateway_scheme = os.getenv("FEED_GATEWAY_SCHEME", DEFAULT_GATEWAY_SCHEME)
def resolve_feed(url_env: str, default_url: str) -> str:
if url_env:
return url_env
parsed = urlparse(default_url)
# Replace host/scheme to allow on-prem DNS (docker network) defaults.
rewritten = parsed._replace(netloc=gateway_host, scheme=gateway_scheme)
return urlunparse(rewritten)
resolved_icscisa_url = resolve_feed(os.getenv("ICSCISA_FEED_URL"), DEFAULT_ICSCISA_URL)
resolved_kisa_url = resolve_feed(os.getenv("KISA_FEED_URL"), DEFAULT_KISA_URL)
records, status = build_records(
run_id=run_id,
fetched_at=fetched_at,
live_fetch=live_fetch,
offline_only=offline_only,
icscisa_url=resolved_icscisa_url,
kisa_url=resolved_kisa_url,
)
write_ndjson(records, output_dir / "advisories.ndjson")
delta = compute_delta(records, previous_path)
delta_payload = {
"run_id": run_id,
"generated_at": iso(utcnow()),
**delta,
"previous_snapshot_sha256": load_previous_hash(previous_path),
}
(output_dir / "delta.json").write_text(json.dumps(delta_payload, separators=(",", ":")) + "\n", encoding="utf-8")
end = iso(utcnow())
write_fetch_log(
output_dir / "fetch.log",
run_id,
start,
end,
status,
gateway_host=gateway_host,
gateway_scheme=gateway_scheme,
icscisa_url=resolved_icscisa_url,
kisa_url=resolved_kisa_url,
live_fetch=live_fetch and not offline_only,
offline_only=offline_only,
)
write_hashes(output_dir)
print(f"[ok] wrote {len(records)} advisories to {output_dir}")
print(f" run_id={run_id} live_fetch={live_fetch and not offline_only} offline_only={offline_only}")
print(f" gateway={gateway_scheme}://{gateway_host}")
print(f" icscisa_url={resolved_icscisa_url}")
print(f" kisa_url={resolved_kisa_url}")
print(f" status={status}")
if previous_path:
print(f" previous_snapshot={previous_path}")
if __name__ == "__main__":
main()

View File

@@ -1,38 +0,0 @@
param(
[string]$Destination = "$(Join-Path (Split-Path -Parent $PSCommandPath) '..' | Resolve-Path)/seed-data/ics-cisa"
)
$ErrorActionPreference = 'Stop'
New-Item -Path $Destination -ItemType Directory -Force | Out-Null
Function Write-Info($Message) { Write-Host "[ics-seed] $Message" }
Function Write-ErrorLine($Message) { Write-Host "[ics-seed][error] $Message" -ForegroundColor Red }
Function Download-File($Url, $Path) {
Write-Info "Downloading $(Split-Path $Path -Leaf)"
Invoke-WebRequest -Uri $Url -OutFile $Path -UseBasicParsing
$hash = Get-FileHash -Path $Path -Algorithm SHA256
$hash.Hash | Out-File -FilePath "$Path.sha256" -Encoding ascii
}
$base = 'https://raw.githubusercontent.com/icsadvprj/ICS-Advisory-Project/main/ICS-CERT_ADV'
$master = 'CISA_ICS_ADV_Master.csv'
$snapshot = 'CISA_ICS_ADV_2025_10_09.csv'
Write-Info 'Fetching ICS advisories seed data (ODbL v1.0)'
Download-File "$base/$master" (Join-Path $Destination $master)
Download-File "$base/$snapshot" (Join-Path $Destination $snapshot)
$medicalUrl = 'https://raw.githubusercontent.com/batarr22/ICSMA_CSV/main/ICSMA_CSV_4-20-2023.xlsx'
$medicalFile = 'ICSMA_CSV_4-20-2023.xlsx'
Write-Info 'Fetching community ICSMA snapshot'
try {
Download-File $medicalUrl (Join-Path $Destination $medicalFile)
}
catch {
Write-ErrorLine "Unable to download $medicalFile (optional): $_"
Remove-Item (Join-Path $Destination $medicalFile) -ErrorAction SilentlyContinue
}
Write-Info "Seed data ready in $Destination"
Write-Info 'Remember: data is licensed under ODbL v1.0 (see seed README).'

View File

@@ -1,38 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
DEST_DIR="${1:-$ROOT_DIR/seed-data/ics-cisa}"
mkdir -p "$DEST_DIR"
info() { printf "[ics-seed] %s\n" "$*"; }
error() { printf "[ics-seed][error] %s\n" "$*" >&2; }
download() {
local url="$1"
local target="$2"
info "Downloading $(basename "$target")"
curl -fL "$url" -o "$target"
sha256sum "$target" > "$target.sha256"
}
BASE="https://raw.githubusercontent.com/icsadvprj/ICS-Advisory-Project/main/ICS-CERT_ADV"
MASTER_FILE="CISA_ICS_ADV_Master.csv"
SNAPSHOT_2025="CISA_ICS_ADV_2025_10_09.csv"
info "Fetching ICS advisories seed data (ODbL v1.0)"
download "$BASE/$MASTER_FILE" "$DEST_DIR/$MASTER_FILE"
download "$BASE/$SNAPSHOT_2025" "$DEST_DIR/$SNAPSHOT_2025"
MEDICAL_URL="https://raw.githubusercontent.com/batarr22/ICSMA_CSV/main/ICSMA_CSV_4-20-2023.xlsx"
MEDICAL_FILE="ICSMA_CSV_4-20-2023.xlsx"
info "Fetching community ICSMA snapshot"
if curl -fL "$MEDICAL_URL" -o "$DEST_DIR/$MEDICAL_FILE"; then
sha256sum "$DEST_DIR/$MEDICAL_FILE" > "$DEST_DIR/$MEDICAL_FILE.sha256"
else
error "Unable to download $MEDICAL_FILE (optional)."
rm -f "$DEST_DIR/$MEDICAL_FILE"
fi
info "Seed data ready in $DEST_DIR"
info "Remember: data is licensed under ODbL v1.0 (see seed README)."

View File

@@ -1,47 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# DEVOPS-GRAPH-24-001: load test graph index/adjacency APIs
TARGET=${TARGET:-"http://localhost:5000"}
OUT="out/graph-load"
mkdir -p "$OUT"
USERS=${USERS:-8}
DURATION=${DURATION:-60}
RATE=${RATE:-200}
cat > "${OUT}/k6-graph.js" <<'EOF'
import http from 'k6/http';
import { sleep } from 'k6';
export const options = {
vus: __USERS__,
duration: '__DURATION__s',
thresholds: {
http_req_duration: ['p(95)<500'],
http_req_failed: ['rate<0.01'],
},
};
const targets = [
'/graph/api/index',
'/graph/api/adjacency?limit=100',
'/graph/api/search?q=log4j',
];
export default function () {
const host = __TARGET__;
targets.forEach(path => http.get(`${host}${path}`));
sleep(1);
}
EOF
sed -i "s/__USERS__/${USERS}/g" "${OUT}/k6-graph.js"
sed -i "s/__DURATION__/${DURATION}/g" "${OUT}/k6-graph.js"
sed -i "s@__TARGET__@\"${TARGET}\"@g" "${OUT}/k6-graph.js"
echo "[graph-load] running k6..."
k6 run "${OUT}/k6-graph.js" --summary-export "${OUT}/summary.json" --http-debug="off"
echo "[graph-load] summary written to ${OUT}/summary.json"

View File

@@ -1,21 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# DEVOPS-GRAPH-24-003: simulation endpoint smoke
TARGET=${TARGET:-"http://localhost:5000"}
OUT="out/graph-sim"
mkdir -p "$OUT"
echo "[graph-sim] hitting simulation endpoints"
curl -sSf "${TARGET}/graph/api/simulation/ping" > "${OUT}/ping.json"
curl -sSf "${TARGET}/graph/api/simulation/run?limit=5" > "${OUT}/run.json"
cat > "${OUT}/summary.txt" <<EOF
ping: $(jq -r '.status' "${OUT}/ping.json" 2>/dev/null || echo "unknown")
run_len: $(jq '. | length' "${OUT}/run.json" 2>/dev/null || echo "0")
EOF
echo "[graph-sim] completed; summary:"
cat "${OUT}/summary.txt"

View File

@@ -1,30 +0,0 @@
import { chromium } from 'playwright';
import fs from 'fs';
const BASE_URL = process.env.GRAPH_UI_BASE ?? 'http://localhost:4200';
const OUT = process.env.OUT ?? 'out/graph-ui-perf';
const BUDGET_MS = Number(process.env.GRAPH_UI_BUDGET_MS ?? '3000');
(async () => {
fs.mkdirSync(OUT, { recursive: true });
const browser = await chromium.launch({ headless: true });
const page = await browser.newPage();
const start = Date.now();
await page.goto(`${BASE_URL}/graph`, { waitUntil: 'networkidle' });
await page.click('text=Explore'); // assumes nav element
await page.waitForSelector('canvas');
const duration = Date.now() - start;
const metrics = await page.evaluate(() => JSON.stringify(window.performance.timing));
fs.writeFileSync(`${OUT}/timing.json`, metrics);
fs.writeFileSync(`${OUT}/duration.txt`, `${duration}`);
if (duration > BUDGET_MS) {
console.error(`[graph-ui] perf budget exceeded: ${duration}ms > ${BUDGET_MS}ms`);
process.exit(1);
}
await browser.close();
console.log(`[graph-ui] load duration ${duration}ms (budget ${BUDGET_MS}ms)`);
})();

View File

@@ -1,75 +0,0 @@
#!/usr/bin/env python3
"""Download KISA/KNVD advisory HTML pages for offline analysis."""
from __future__ import annotations
import argparse
import datetime as dt
import sys
import xml.etree.ElementTree as ET
from pathlib import Path
from urllib.error import HTTPError, URLError
from urllib.parse import parse_qs, urlsplit
from urllib.request import Request, urlopen
FEED_URL = "https://knvd.krcert.or.kr/rss/securityInfo.do"
USER_AGENT = "Mozilla/5.0 (compatible; StellaOpsOffline/1.0)"
def fetch(url: str) -> bytes:
req = Request(url, headers={"User-Agent": USER_AGENT})
with urlopen(req, timeout=15) as resp:
return resp.read()
def iter_idxs(feed_xml: bytes) -> list[tuple[str, str]]:
root = ET.fromstring(feed_xml)
items = []
for item in root.findall(".//item"):
title = (item.findtext("title") or "").strip()
link = item.findtext("link") or ""
idx = parse_qs(urlsplit(link).query).get("IDX", [None])[0]
if idx:
items.append((idx, title))
return items
def capture(idx: str, title: str, out_dir: Path) -> Path:
url = f"https://knvd.krcert.or.kr/detailDos.do?IDX={idx}"
html = fetch(url)
target = out_dir / f"{idx}.html"
target.write_bytes(html)
print(f"saved {target} ({title})")
return target
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument("--out", type=Path, default=Path("seed-data/kisa/html"))
parser.add_argument("--limit", type=int, default=10, help="Maximum advisories to download")
args = parser.parse_args()
args.out.mkdir(parents=True, exist_ok=True)
print(f"[{dt.datetime.utcnow():%Y-%m-%d %H:%M:%S}Z] fetching RSS feed…")
try:
feed = fetch(FEED_URL)
except (URLError, HTTPError) as exc:
print("RSS fetch failed:", exc, file=sys.stderr)
return 1
items = iter_idxs(feed)[: args.limit]
if not items:
print("No advisories found in feed", file=sys.stderr)
return 1
for idx, title in items:
try:
capture(idx, title, args.out)
except (URLError, HTTPError) as exc:
print(f"failed {idx}: {exc}", file=sys.stderr)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -1,14 +0,0 @@
# Mirror signing helpers
- `make-thin-v1.sh`: builds thin bundle v1, computes checksums, emits bundle meta (offline/rekor/mirror gaps), optional DSSE+TUF signing when `SIGN_KEY` is set, and runs verifier.
- `sign_thin_bundle.py`: signs manifest (DSSE), bundle meta (DSSE), and root/targets/snapshot/timestamp JSON using an Ed25519 PEM key.
- `verify_thin_bundle.py`: checks SHA256 sidecars, manifest schema, tar determinism, required layers, optional bundle meta and DSSE signatures; accepts `--bundle-meta`, `--pubkey`, `--tenant`, `--environment`.
- `ci-sign.sh`: CI wrapper. Set `MIRROR_SIGN_KEY_B64` (base64-encoded Ed25519 PEM) and run; it builds, signs, and verifies in one step, emitting `milestone.json` with manifest/tar/bundle hashes.
- `verify_oci_layout.py`: validates OCI layout/index/manifest and blob digests when `OCI=1` is used.
- `mirror-create.sh`: convenience wrapper to build + verify thin bundles (optional SIGN_KEY, time anchor, OCI flag).
- `mirror-verify.sh`: wrapper around `verify_thin_bundle.py` for quick hash/DSSE checks.
- `schedule-export-center-run.sh`: schedules an Export Center run for mirror bundles via HTTP POST; set `EXPORT_CENTER_BASE_URL`, `EXPORT_CENTER_TENANT`, `EXPORT_CENTER_TOKEN` (Bearer), optional `EXPORT_CENTER_PROJECT`; logs to `AUDIT_LOG_PATH` (default `logs/export-center-schedule.log`). Set `EXPORT_CENTER_ARTIFACTS_JSON` to inject bundle metadata into the request payload.
- `export-center-wire.sh`: builds `export-center-handoff.json` from `out/mirror/thin/milestone.json`, emits recommended Export Center targets, and (when `EXPORT_CENTER_AUTO_SCHEDULE=1`) calls `schedule-export-center-run.sh` to push the run. Outputs live under `out/mirror/thin/export-center/`.
- CI: `.gitea/workflows/mirror-sign.yml` runs this script after signing; scheduling remains opt-in via secrets `EXPORT_CENTER_BASE_URL`, `EXPORT_CENTER_TOKEN`, `EXPORT_CENTER_TENANT`, `EXPORT_CENTER_PROJECT`, `EXPORT_CENTER_AUTO_SCHEDULE`.
Artifacts live under `out/mirror/thin/`.

View File

@@ -1,20 +0,0 @@
#!/usr/bin/env bash
# Verifies signing prerequisites without requiring the actual key contents.
set -euo pipefail
if [[ -z "${MIRROR_SIGN_KEY_B64:-}" ]]; then
if [[ "${REQUIRE_PROD_SIGNING:-0}" == "1" ]]; then
echo "[error] MIRROR_SIGN_KEY_B64 is required for production signing; set the secret before running." >&2
exit 2
fi
echo "[warn] MIRROR_SIGN_KEY_B64 is not set; ci-sign.sh will fall back to embedded test key (non-production)." >&2
fi
# basic base64 sanity check
if ! printf "%s" "$MIRROR_SIGN_KEY_B64" | base64 -d >/dev/null 2>&1; then
echo "MIRROR_SIGN_KEY_B64 is not valid base64" >&2
exit 3
fi
# ensure scripts exist
for f in scripts/mirror/ci-sign.sh scripts/mirror/sign_thin_bundle.py scripts/mirror/verify_thin_bundle.py; do
[[ -x "$f" || -f "$f" ]] || { echo "$f missing" >&2; exit 4; }
done
echo "Signing prerequisites present (key env set, scripts available)."

View File

@@ -1,116 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Allow CI to fall back to a deterministic test key when MIRROR_SIGN_KEY_B64 is unset,
# but forbid this on release/tag builds when REQUIRE_PROD_SIGNING=1.
# Throwaway dev key (Ed25519) generated 2025-11-23; matches the value documented in
# docs/modules/mirror/signing-runbook.md. Safe for non-production smoke only.
DEFAULT_TEST_KEY_B64="LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1DNENBUUF3QlFZREsyVndCQ0lFSURqb3pDRVdKVVFUdW1xZ2gyRmZXcVBaemlQbkdaSzRvOFZRTThGYkZCSEcKLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo="
if [[ -z "${MIRROR_SIGN_KEY_B64:-}" ]]; then
if [[ "${REQUIRE_PROD_SIGNING:-0}" == "1" ]]; then
echo "[error] MIRROR_SIGN_KEY_B64 is required for production signing; refusing to use test key." >&2
exit 1
fi
echo "[warn] MIRROR_SIGN_KEY_B64 not set; using embedded test key (non-production) for CI signing" >&2
MIRROR_SIGN_KEY_B64="$DEFAULT_TEST_KEY_B64"
fi
ROOT=$(cd "$(dirname "$0")/../.." && pwd)
KEYDIR="$ROOT/out/mirror/thin/tuf/keys"
mkdir -p "$KEYDIR"
KEYFILE="$KEYDIR/ci-ed25519.pem"
printf "%s" "$MIRROR_SIGN_KEY_B64" | base64 -d > "$KEYFILE"
chmod 600 "$KEYFILE"
# Export public key for TUF keyid calculation
openssl pkey -in "$KEYFILE" -pubout -out "$KEYDIR/ci-ed25519.pub" >/dev/null 2>&1
STAGE=${STAGE:-$ROOT/out/mirror/thin/stage-v1}
CREATED=${CREATED:-$(date -u +%Y-%m-%dT%H:%M:%SZ)}
TENANT_SCOPE=${TENANT_SCOPE:-tenant-demo}
ENV_SCOPE=${ENV_SCOPE:-lab}
CHUNK_SIZE=${CHUNK_SIZE:-5242880}
CHECKPOINT_FRESHNESS=${CHECKPOINT_FRESHNESS:-86400}
OCI=${OCI:-1}
SIGN_KEY="$KEYFILE" STAGE="$STAGE" CREATED="$CREATED" TENANT_SCOPE="$TENANT_SCOPE" ENV_SCOPE="$ENV_SCOPE" CHUNK_SIZE="$CHUNK_SIZE" CHECKPOINT_FRESHNESS="$CHECKPOINT_FRESHNESS" OCI="$OCI" "$ROOT/src/Mirror/StellaOps.Mirror.Creator/make-thin-v1.sh"
# Default to staged time-anchor unless caller overrides
TIME_ANCHOR_FILE=${TIME_ANCHOR_FILE:-$ROOT/out/mirror/thin/stage-v1/layers/time-anchor.json}
# Emit milestone summary with hashes for downstream consumers
MANIFEST_PATH="$ROOT/out/mirror/thin/mirror-thin-v1.manifest.json"
TAR_PATH="$ROOT/out/mirror/thin/mirror-thin-v1.tar.gz"
DSSE_PATH="$ROOT/out/mirror/thin/mirror-thin-v1.manifest.dsse.json"
BUNDLE_PATH="$ROOT/out/mirror/thin/mirror-thin-v1.bundle.json"
BUNDLE_DSSE_PATH="$ROOT/out/mirror/thin/mirror-thin-v1.bundle.dsse.json"
TIME_ANCHOR_DSSE_PATH="$TIME_ANCHOR_FILE.dsse.json"
TRANSPORT_PATH="$ROOT/out/mirror/thin/stage-v1/layers/transport-plan.json"
REKOR_POLICY_PATH="$ROOT/out/mirror/thin/stage-v1/layers/rekor-policy.json"
MIRROR_POLICY_PATH="$ROOT/out/mirror/thin/stage-v1/layers/mirror-policy.json"
OFFLINE_POLICY_PATH="$ROOT/out/mirror/thin/stage-v1/layers/offline-kit-policy.json"
SUMMARY_PATH="$ROOT/out/mirror/thin/milestone.json"
sha256() {
sha256sum "$1" | awk '{print $1}'
}
# Sign manifest, bundle meta, and time-anchor (if present)
python "$ROOT/scripts/mirror/sign_thin_bundle.py" \
--key "$KEYFILE" \
--manifest "$MANIFEST_PATH" \
--tar "$TAR_PATH" \
--tuf-dir "$ROOT/out/mirror/thin/tuf" \
--bundle "$BUNDLE_PATH" \
--time-anchor "$TIME_ANCHOR_FILE"
# Normalize time-anchor DSSE location for bundle meta/summary
if [[ -f "$TIME_ANCHOR_FILE.dsse.json" ]]; then
cp "$TIME_ANCHOR_FILE.dsse.json" "$TIME_ANCHOR_DSSE_PATH"
fi
# Refresh bundle meta hashes now that DSSE files exist
python - <<'PY'
import json, pathlib, hashlib
root = pathlib.Path("$ROOT")
bundle_path = pathlib.Path("$BUNDLE_PATH")
manifest_dsse = pathlib.Path("$DSSE_PATH")
bundle_dsse = pathlib.Path("$BUNDLE_DSSE_PATH")
time_anchor_dsse = pathlib.Path("$TIME_ANCHOR_DSSE_PATH")
def sha(path: pathlib.Path) -> str:
h = hashlib.sha256()
with path.open('rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
h.update(chunk)
return h.hexdigest()
data = json.loads(bundle_path.read_text())
art = data.setdefault('artifacts', {})
if manifest_dsse.exists():
art.setdefault('manifest_dsse', {})['sha256'] = sha(manifest_dsse)
if bundle_dsse.exists():
art.setdefault('bundle_dsse', {})['sha256'] = sha(bundle_dsse)
if time_anchor_dsse.exists():
art.setdefault('time_anchor_dsse', {})['sha256'] = sha(time_anchor_dsse)
bundle_path.write_text(json.dumps(data, indent=2, sort_keys=True) + "\n")
sha_path = bundle_path.with_suffix(bundle_path.suffix + '.sha256')
sha_path.write_text(f"{sha(bundle_path)} {bundle_path.name}\n")
PY
cat > "$SUMMARY_PATH" <<JSON
{
"created": "$CREATED",
"manifest": {"path": "$(basename "$MANIFEST_PATH")", "sha256": "$(sha256 "$MANIFEST_PATH")"},
"tarball": {"path": "$(basename "$TAR_PATH")", "sha256": "$(sha256 "$TAR_PATH")"},
"dsse": $( [[ -f "$DSSE_PATH" ]] && echo "{\"path\": \"$(basename "$DSSE_PATH")\", \"sha256\": \"$(sha256 "$DSSE_PATH")\"}" || echo "null" ),
"bundle": $( [[ -f "$BUNDLE_PATH" ]] && echo "{\"path\": \"$(basename "$BUNDLE_PATH")\", \"sha256\": \"$(sha256 "$BUNDLE_PATH")\"}" || echo "null" ),
"bundle_dsse": $( [[ -f "$BUNDLE_DSSE_PATH" ]] && echo "{\"path\": \"$(basename "$BUNDLE_DSSE_PATH")\", \"sha256\": \"$(sha256 "$BUNDLE_DSSE_PATH")\"}" || echo "null" ),
"time_anchor": $( [[ -n "${TIME_ANCHOR_FILE:-}" && -f "$TIME_ANCHOR_FILE" ]] && echo "{\"path\": \"$(basename "$TIME_ANCHOR_FILE")\", \"sha256\": \"$(sha256 "$TIME_ANCHOR_FILE")\"}" || echo "null" ),
"time_anchor_dsse": $( [[ -f "$TIME_ANCHOR_DSSE_PATH" ]] && echo "{\"path\": \"$(basename "$TIME_ANCHOR_DSSE_PATH")\", \"sha256\": \"$(sha256 "$TIME_ANCHOR_DSSE_PATH")\"}" || echo "null" )
,"policies": {
"transport": {"path": "$(basename "$TRANSPORT_PATH")", "sha256": "$(sha256 "$TRANSPORT_PATH")"},
"rekor": {"path": "$(basename "$REKOR_POLICY_PATH")", "sha256": "$(sha256 "$REKOR_POLICY_PATH")"},
"mirror": {"path": "$(basename "$MIRROR_POLICY_PATH")", "sha256": "$(sha256 "$MIRROR_POLICY_PATH")"},
"offline": {"path": "$(basename "$OFFLINE_POLICY_PATH")", "sha256": "$(sha256 "$OFFLINE_POLICY_PATH")"}
}
}
JSON
echo "Milestone summary written to $SUMMARY_PATH"

View File

@@ -1,122 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Prepare Export Center handoff metadata for mirror thin bundles and optionally schedule a run.
# Usage (handoff only):
# scripts/mirror/export-center-wire.sh
# Usage (handoff + schedule when secrets exist):
# EXPORT_CENTER_BASE_URL=https://export.example.com \
# EXPORT_CENTER_TOKEN=token123 \
# EXPORT_CENTER_TENANT=tenant-a \
# EXPORT_CENTER_AUTO_SCHEDULE=1 \
# scripts/mirror/export-center-wire.sh
# Inputs:
# - MILESTONE_PATH: path to milestone.json (default: out/mirror/thin/milestone.json)
# - EXPORT_CENTER_OUT_DIR: output directory for handoff files (default: out/mirror/thin/export-center)
# - EXPORT_CENTER_PROFILE_ID: profile identifier for the Export Center run (default: mirror:thin)
# - EXPORT_CENTER_TARGETS_JSON: override targets array sent to Export Center (JSON array string)
# - EXPORT_CENTER_FORMATS_JSON: override formats array (JSON array string; default: ["tar.gz","json","dsse"])
# - EXPORT_CENTER_AUTO_SCHEDULE: when "1", schedule a run using schedule-export-center-run.sh
# - EXPORT_CENTER_BASE_URL / EXPORT_CENTER_TENANT / EXPORT_CENTER_PROJECT / EXPORT_CENTER_TOKEN: forwarded to scheduler
# - EXPORT_CENTER_AUDIT_LOG: optional override for scheduler audit log path
MILESTONE_PATH="${MILESTONE_PATH:-out/mirror/thin/milestone.json}"
OUT_DIR="${EXPORT_CENTER_OUT_DIR:-out/mirror/thin/export-center}"
PROFILE_ID="${EXPORT_CENTER_PROFILE_ID:-mirror:thin}"
FORMATS_JSON="${EXPORT_CENTER_FORMATS_JSON:-[\"tar.gz\",\"json\",\"dsse\"]}"
AUTO_SCHEDULE="${EXPORT_CENTER_AUTO_SCHEDULE:-0}"
HANDOFF_PATH="${OUT_DIR}/export-center-handoff.json"
TARGETS_PATH="${OUT_DIR}/export-center-targets.json"
RESPONSE_PATH="${OUT_DIR}/schedule-response.json"
export HANDOFF_PATH TARGETS_PATH RESPONSE_PATH PROFILE_ID MILESTONE_PATH
mkdir -p "${OUT_DIR}"
PROFILE_ID="${PROFILE_ID}" MILESTONE_PATH="${MILESTONE_PATH}" HANDOFF_PATH="${HANDOFF_PATH}" TARGETS_PATH="${TARGETS_PATH}" python3 - <<'PY'
import datetime
import json
import os
import sys
from typing import Dict, Any
milestone_path = os.environ["MILESTONE_PATH"]
handoff_path = os.environ["HANDOFF_PATH"]
targets_path = os.environ["TARGETS_PATH"]
profile = os.environ.get("PROFILE_ID", "mirror:thin")
try:
with open(milestone_path, encoding="utf-8") as f:
milestone = json.load(f)
except FileNotFoundError:
print(f"milestone file not found: {milestone_path}", file=sys.stderr)
sys.exit(1)
artifacts = []
def add_artifact(name: str, entry: Dict[str, Any] | None) -> None:
if not isinstance(entry, dict):
return
path = entry.get("path")
sha = entry.get("sha256")
if path and sha:
artifacts.append({"name": name, "path": path, "sha256": sha})
add_artifact("manifest", milestone.get("manifest"))
add_artifact("manifest_dsse", milestone.get("dsse"))
add_artifact("bundle", milestone.get("tarball"))
add_artifact("bundle_meta", milestone.get("bundle"))
add_artifact("bundle_meta_dsse", milestone.get("bundle_dsse"))
add_artifact("time_anchor", milestone.get("time_anchor"))
for name, entry in sorted((milestone.get("policies") or {}).items()):
add_artifact(f"policy_{name}", entry)
handoff = {
"profileId": profile,
"generatedAt": datetime.datetime.now(datetime.timezone.utc).replace(microsecond=0).isoformat().replace("+00:00", "Z"),
"sourceMilestone": os.path.abspath(milestone_path),
"artifacts": artifacts,
}
with open(handoff_path, "w", encoding="utf-8") as f:
json.dump(handoff, f, indent=2)
with open(targets_path, "w", encoding="utf-8") as f:
json.dump([a["name"] for a in artifacts], f)
PY
ARTIFACTS_JSON=$(python3 - <<'PY'
import json
import os
with open(os.environ["HANDOFF_PATH"], encoding="utf-8") as f:
data = json.load(f)
print(json.dumps(data.get("artifacts", [])))
PY
)
ARTIFACTS_JSON="${ARTIFACTS_JSON//$'\n'/}"
TARGETS_JSON_DEFAULT=$(tr -d '\r\n' < "${TARGETS_PATH}")
TARGETS_JSON="${EXPORT_CENTER_TARGETS_JSON:-$TARGETS_JSON_DEFAULT}"
echo "[info] Export Center handoff written to ${HANDOFF_PATH}"
echo "[info] Recommended targets: ${TARGETS_JSON}"
schedule_note="AUTO_SCHEDULE=0"
if [[ "${AUTO_SCHEDULE}" == "1" ]]; then
schedule_note="missing EXPORT_CENTER_BASE_URL"
if [[ -n "${EXPORT_CENTER_BASE_URL:-}" ]]; then
export EXPORT_CENTER_ARTIFACTS_JSON="${ARTIFACTS_JSON}"
schedule_note="scheduled"
bash src/Mirror/StellaOps.Mirror.Creator/schedule-export-center-run.sh "${PROFILE_ID}" "${TARGETS_JSON}" "${FORMATS_JSON}" | tee "${RESPONSE_PATH}"
fi
fi
if [[ ! -f "${RESPONSE_PATH}" ]]; then
cat > "${RESPONSE_PATH}" <<JSON
{"scheduled": false, "reason": "${schedule_note}"}
JSON
fi
echo "[info] Scheduler response captured at ${RESPONSE_PATH}"

View File

@@ -1,45 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Deterministic wrapper for building mirror-thin-v1 bundles.
# Usage: mirror-create.sh [--out out/mirror/thin] [--sign-key path.pem] [--oci] [--time-anchor path.json]
OUT="out/mirror/thin"
SIGN_KEY=""
TIME_ANCHOR=""
OCI=0
usage() {
echo "Usage: $0 [--out <dir>] [--sign-key key.pem] [--oci] [--time-anchor path.json]" >&2
exit 2
}
while [[ $# -gt 0 ]]; do
case "$1" in
--out) OUT=${2:-}; shift ;;
--sign-key) SIGN_KEY=${2:-}; shift ;;
--time-anchor) TIME_ANCHOR=${2:-}; shift ;;
--oci) OCI=1 ;;
*) usage ;;
esac
shift
done
ROOT=$(cd "$(dirname "$0")/.." && pwd)
pushd "$ROOT/.." >/dev/null
export SIGN_KEY
export TIME_ANCHOR_FILE=${TIME_ANCHOR:-}
export OCI
export OUT
src/Mirror/StellaOps.Mirror.Creator/make-thin-v1.sh
echo "Bundle built under $OUT"
python scripts/mirror/verify_thin_bundle.py \
"$OUT/mirror-thin-v1.manifest.json" \
"$OUT/mirror-thin-v1.tar.gz" \
--bundle-meta "$OUT/mirror-thin-v1.bundle.json"
popd >/dev/null
echo "Create/verify completed"

View File

@@ -1,37 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Verify a mirror-thin-v1 bundle and optional DSSE signatures.
# Usage: mirror-verify.sh manifest.json bundle.tar.gz [--bundle-meta bundle.json] [--pubkey key.pub] [--tenant t] [--environment env]
manifest=${1:-}
bundle=${2:-}
shift 2 || true
bundle_meta=""
pubkey=""
tenant=""
environment=""
while [[ $# -gt 0 ]]; do
case "$1" in
--bundle-meta) bundle_meta=${2:-}; shift ;;
--pubkey) pubkey=${2:-}; shift ;;
--tenant) tenant=${2:-}; shift ;;
--environment) environment=${2:-}; shift ;;
*) echo "Unknown arg $1" >&2; exit 2 ;;
esac
shift
done
[[ -z "$manifest" || -z "$bundle" ]] && { echo "manifest and bundle required" >&2; exit 2; }
args=("$manifest" "$bundle")
[[ -n "$bundle_meta" ]] && args+=("--bundle-meta" "$bundle_meta")
[[ -n "$pubkey" ]] && args+=("--pubkey" "$pubkey")
[[ -n "$tenant" ]] && args+=("--tenant" "$tenant")
[[ -n "$environment" ]] && args+=("--environment" "$environment")
python scripts/mirror/verify_thin_bundle.py "${args[@]}"
echo "Mirror bundle verification passed."

View File

@@ -1,105 +0,0 @@
#!/usr/bin/env python3
"""
Sign mirror-thin-v1 artefacts using an Ed25519 key and emit DSSE + TUF signatures.
Usage:
python scripts/mirror/sign_thin_bundle.py \
--key out/mirror/thin/tuf/keys/mirror-ed25519-test-1.pem \
--manifest out/mirror/thin/mirror-thin-v1.manifest.json \
--tar out/mirror/thin/mirror-thin-v1.tar.gz \
--tuf-dir out/mirror/thin/tuf \
--time-anchor out/mirror/thin/stage-v1/layers/time-anchor.json
Writes:
- mirror-thin-v1.manifest.dsse.json
- mirror-thin-v1.bundle.dsse.json (optional, when --bundle is provided)
- updates signatures in root.json, targets.json, snapshot.json, timestamp.json
"""
import argparse, base64, json, pathlib, hashlib
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
def b64url(data: bytes) -> str:
return base64.urlsafe_b64encode(data).rstrip(b"=").decode()
def load_key(path: pathlib.Path) -> Ed25519PrivateKey:
return serialization.load_pem_private_key(path.read_bytes(), password=None)
def keyid_from_pub(pub_path: pathlib.Path) -> str:
raw = pub_path.read_bytes()
return hashlib.sha256(raw).hexdigest()
def sign_bytes(key: Ed25519PrivateKey, data: bytes) -> bytes:
return key.sign(data)
def write_json(path: pathlib.Path, obj):
path.write_text(json.dumps(obj, indent=2, sort_keys=True) + "\n")
def sign_tuf(path: pathlib.Path, keyid: str, key: Ed25519PrivateKey):
data = path.read_bytes()
sig = sign_bytes(key, data)
obj = json.loads(data)
obj["signatures"] = [{"keyid": keyid, "sig": b64url(sig)}]
write_json(path, obj)
def main():
ap = argparse.ArgumentParser()
ap.add_argument("--key", required=True, type=pathlib.Path)
ap.add_argument("--manifest", required=True, type=pathlib.Path)
ap.add_argument("--tar", required=True, type=pathlib.Path)
ap.add_argument("--tuf-dir", required=True, type=pathlib.Path)
ap.add_argument("--bundle", required=False, type=pathlib.Path)
ap.add_argument("--time-anchor", required=False, type=pathlib.Path)
args = ap.parse_args()
key = load_key(args.key)
pub_path = args.key.with_suffix(".pub")
keyid = keyid_from_pub(pub_path)
manifest_bytes = args.manifest.read_bytes()
sig = sign_bytes(key, manifest_bytes)
dsse = {
"payloadType": "application/vnd.stellaops.mirror.manifest+json",
"payload": b64url(manifest_bytes),
"signatures": [{"keyid": keyid, "sig": b64url(sig)}],
}
dsse_path = args.manifest.with_suffix(".dsse.json")
write_json(dsse_path, dsse)
if args.bundle:
bundle_bytes = args.bundle.read_bytes()
bundle_sig = sign_bytes(key, bundle_bytes)
bundle_dsse = {
"payloadType": "application/vnd.stellaops.mirror.bundle+json",
"payload": b64url(bundle_bytes),
"signatures": [{"keyid": keyid, "sig": b64url(bundle_sig)}],
}
bundle_dsse_path = args.bundle.with_suffix(".dsse.json")
write_json(bundle_dsse_path, bundle_dsse)
anchor_dsse_path = None
if args.time_anchor:
anchor_bytes = args.time_anchor.read_bytes()
anchor_sig = sign_bytes(key, anchor_bytes)
anchor_dsse = {
"payloadType": "application/vnd.stellaops.time-anchor+json",
"payload": b64url(anchor_bytes),
"signatures": [{"keyid": keyid, "sig": b64url(anchor_sig)}],
}
anchor_dsse_path = args.time_anchor.with_suffix(".dsse.json")
write_json(anchor_dsse_path, anchor_dsse)
# update TUF metadata
for name in ["root.json", "targets.json", "snapshot.json", "timestamp.json"]:
sign_tuf(args.tuf_dir / name, keyid, key)
parts = [f"manifest DSSE -> {dsse_path}"]
if args.bundle:
parts.append(f"bundle DSSE -> {bundle_dsse_path}")
if anchor_dsse_path:
parts.append(f"time anchor DSSE -> {anchor_dsse_path}")
parts.append("TUF metadata updated")
print(f"Signed DSSE + TUF using keyid {keyid}; " + ", ".join(parts))
if __name__ == "__main__":
main()

View File

@@ -1,77 +0,0 @@
#!/usr/bin/env python3
"""
Verify OCI layout emitted by make-thin-v1.sh when OCI=1.
Checks:
1) oci-layout exists and version is 1.0.0
2) index.json manifest digest/size match manifest.json hash/size
3) manifest.json references config/layers present in blobs with matching sha256 and size
Usage:
python scripts/mirror/verify_oci_layout.py out/mirror/thin/oci
Exit 0 on success, non-zero on failure with message.
"""
import hashlib, json, pathlib, sys
def sha256(path: pathlib.Path) -> str:
h = hashlib.sha256()
with path.open('rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
h.update(chunk)
return h.hexdigest()
def main():
if len(sys.argv) != 2:
print(__doc__)
sys.exit(2)
root = pathlib.Path(sys.argv[1])
layout = root / "oci-layout"
index = root / "index.json"
manifest = root / "manifest.json"
if not layout.exists() or not index.exists() or not manifest.exists():
raise SystemExit("missing oci-layout/index.json/manifest.json")
layout_obj = json.loads(layout.read_text())
if layout_obj.get("imageLayoutVersion") != "1.0.0":
raise SystemExit("oci-layout version not 1.0.0")
idx_obj = json.loads(index.read_text())
if not idx_obj.get("manifests"):
raise SystemExit("index.json manifests empty")
man_digest = idx_obj["manifests"][0]["digest"]
man_size = idx_obj["manifests"][0]["size"]
actual_man_sha = sha256(manifest)
if man_digest != f"sha256:{actual_man_sha}":
raise SystemExit(f"manifest digest mismatch: {man_digest} vs sha256:{actual_man_sha}")
if man_size != manifest.stat().st_size:
raise SystemExit("manifest size mismatch")
man_obj = json.loads(manifest.read_text())
blobs = root / "blobs" / "sha256"
# config
cfg_digest = man_obj["config"]["digest"].split(":",1)[1]
cfg_size = man_obj["config"]["size"]
cfg_path = blobs / cfg_digest
if not cfg_path.exists():
raise SystemExit(f"config blob missing: {cfg_path}")
if cfg_path.stat().st_size != cfg_size:
raise SystemExit("config size mismatch")
if sha256(cfg_path) != cfg_digest:
raise SystemExit("config digest mismatch")
for layer in man_obj.get("layers", []):
ldigest = layer["digest"].split(":",1)[1]
lsize = layer["size"]
lpath = blobs / ldigest
if not lpath.exists():
raise SystemExit(f"layer blob missing: {lpath}")
if lpath.stat().st_size != lsize:
raise SystemExit("layer size mismatch")
if sha256(lpath) != ldigest:
raise SystemExit("layer digest mismatch")
print("OK: OCI layout verified")
if __name__ == "__main__":
main()

View File

@@ -1,293 +0,0 @@
#!/usr/bin/env python3
"""
Verifier for mirror-thin-v1 artefacts and bundle meta.
Checks:
1) SHA256 of manifest/tarball (and optional bundle meta) matches sidecars.
2) Manifest schema contains required fields and required layer files exist.
3) Tarball headers deterministic (sorted paths, uid/gid=0, mtime=0).
4) Tar contents match manifest digests.
5) Optional: verify DSSE signatures for manifest/bundle when a public key is provided.
6) Optional: validate bundle meta (tenant/env scope, policy hashes, gap coverage counts).
Usage:
python scripts/mirror/verify_thin_bundle.py \
out/mirror/thin/mirror-thin-v1.manifest.json \
out/mirror/thin/mirror-thin-v1.tar.gz \
--bundle-meta out/mirror/thin/mirror-thin-v1.bundle.json \
--pubkey out/mirror/thin/tuf/keys/ci-ed25519.pub \
--tenant tenant-demo --environment lab
Exit code 0 on success; non-zero on any check failure.
"""
import argparse
import base64
import hashlib
import json
import pathlib
import sys
import tarfile
from typing import Optional
try:
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PublicKey
CRYPTO_AVAILABLE = True
except ImportError: # pragma: no cover - surfaced as runtime guidance
CRYPTO_AVAILABLE = False
REQUIRED_FIELDS = ["version", "created", "layers", "indexes"]
REQUIRED_LAYER_FILES = {
"layers/observations.ndjson",
"layers/time-anchor.json",
"layers/transport-plan.json",
"layers/rekor-policy.json",
"layers/mirror-policy.json",
"layers/offline-kit-policy.json",
"layers/artifact-hashes.json",
"indexes/observations.index",
}
def _b64url_decode(data: str) -> bytes:
padding = "=" * (-len(data) % 4)
return base64.urlsafe_b64decode(data + padding)
def sha256_file(path: pathlib.Path) -> str:
h = hashlib.sha256()
with path.open("rb") as f:
for chunk in iter(lambda: f.read(8192), b""):
h.update(chunk)
return h.hexdigest()
def load_sha256_sidecar(path: pathlib.Path) -> str:
sidecar = path.with_suffix(path.suffix + ".sha256")
if not sidecar.exists():
raise SystemExit(f"missing sidecar {sidecar}")
return sidecar.read_text().strip().split()[0]
def check_schema(manifest: dict):
missing = [f for f in REQUIRED_FIELDS if f not in manifest]
if missing:
raise SystemExit(f"manifest missing fields: {missing}")
def normalize(name: str) -> str:
return name[2:] if name.startswith("./") else name
def check_tar_determinism(tar_path: pathlib.Path):
with tarfile.open(tar_path, "r:gz") as tf:
names = [normalize(n) for n in tf.getnames()]
if names != sorted(names):
raise SystemExit("tar entries not sorted")
for m in tf.getmembers():
if m.uid != 0 or m.gid != 0:
raise SystemExit(f"tar header uid/gid not zero for {m.name}")
if m.mtime != 0:
raise SystemExit(f"tar header mtime not zero for {m.name}")
def check_required_layers(tar_path: pathlib.Path):
with tarfile.open(tar_path, "r:gz") as tf:
names = {normalize(n) for n in tf.getnames()}
for required in REQUIRED_LAYER_FILES:
if required not in names:
raise SystemExit(f"required file missing from bundle: {required}")
def check_content_hashes(manifest: dict, tar_path: pathlib.Path):
with tarfile.open(tar_path, "r:gz") as tf:
def get(name: str):
try:
return tf.getmember(name)
except KeyError:
return tf.getmember(f"./{name}")
for layer in manifest.get("layers", []):
name = layer["path"]
info = get(name)
data = tf.extractfile(info).read()
digest = hashlib.sha256(data).hexdigest()
if layer["digest"] != f"sha256:{digest}":
raise SystemExit(f"layer digest mismatch {name}: {digest}")
for idx in manifest.get("indexes", []):
name = idx['name']
if not name.startswith("indexes/"):
name = f"indexes/{name}"
info = get(name)
data = tf.extractfile(info).read()
digest = hashlib.sha256(data).hexdigest()
if idx["digest"] != f"sha256:{digest}":
raise SystemExit(f"index digest mismatch {name}: {digest}")
def read_tar_entry(tar_path: pathlib.Path, name: str) -> bytes:
with tarfile.open(tar_path, "r:gz") as tf:
try:
info = tf.getmember(name)
except KeyError:
info = tf.getmember(f"./{name}")
data = tf.extractfile(info).read()
return data
def load_pubkey(path: pathlib.Path) -> Ed25519PublicKey:
if not CRYPTO_AVAILABLE:
raise SystemExit("cryptography is required for DSSE verification; install before using --pubkey")
return serialization.load_pem_public_key(path.read_bytes())
def verify_dsse(dsse_path: pathlib.Path, pubkey_path: pathlib.Path, expected_payload: pathlib.Path, expected_type: str):
dsse_obj = json.loads(dsse_path.read_text())
if dsse_obj.get("payloadType") != expected_type:
raise SystemExit(f"DSSE payloadType mismatch for {dsse_path}")
payload = _b64url_decode(dsse_obj.get("payload", ""))
if payload != expected_payload.read_bytes():
raise SystemExit(f"DSSE payload mismatch for {dsse_path}")
sigs = dsse_obj.get("signatures") or []
if not sigs:
raise SystemExit(f"DSSE missing signatures: {dsse_path}")
pub = load_pubkey(pubkey_path)
try:
pub.verify(_b64url_decode(sigs[0]["sig"]), payload)
except Exception as exc: # pragma: no cover - cryptography raises InvalidSignature
raise SystemExit(f"DSSE signature verification failed for {dsse_path}: {exc}")
def check_bundle_meta(meta_path: pathlib.Path, manifest_path: pathlib.Path, tar_path: pathlib.Path, tenant: Optional[str], environment: Optional[str]):
meta = json.loads(meta_path.read_text())
for field in ["bundle", "version", "artifacts", "gaps", "tooling"]:
if field not in meta:
raise SystemExit(f"bundle meta missing field {field}")
if tenant and meta.get("tenant") != tenant:
raise SystemExit(f"bundle tenant mismatch: {meta.get('tenant')} != {tenant}")
if environment and meta.get("environment") != environment:
raise SystemExit(f"bundle environment mismatch: {meta.get('environment')} != {environment}")
artifacts = meta["artifacts"]
def expect(name: str, path: pathlib.Path):
recorded = artifacts.get(name)
if not recorded:
raise SystemExit(f"bundle meta missing artifact entry: {name}")
expected = recorded.get("sha256")
if expected and expected != sha256_file(path):
raise SystemExit(f"bundle meta digest mismatch for {name}")
expect("manifest", manifest_path)
expect("tarball", tar_path)
# DSSE sidecars are optional but if present, validate hashes
dsse_manifest = artifacts.get("manifest_dsse")
if dsse_manifest and dsse_manifest.get("path"):
expect("manifest_dsse", meta_path.parent / dsse_manifest["path"])
dsse_bundle = artifacts.get("bundle_dsse")
if dsse_bundle and dsse_bundle.get("path"):
expect("bundle_dsse", meta_path.parent / dsse_bundle["path"])
dsse_anchor = artifacts.get("time_anchor_dsse")
if dsse_anchor and dsse_anchor.get("path"):
expect("time_anchor_dsse", meta_path.parent / dsse_anchor["path"])
for extra in ["time_anchor", "transport_plan", "rekor_policy", "mirror_policy", "offline_policy", "artifact_hashes"]:
rec = artifacts.get(extra)
if not rec:
raise SystemExit(f"bundle meta missing artifact entry: {extra}")
if not rec.get("path"):
raise SystemExit(f"bundle meta missing path for {extra}")
time_anchor_dsse = artifacts.get("time_anchor_dsse")
if time_anchor_dsse:
if not time_anchor_dsse.get("path"):
raise SystemExit("bundle meta missing path for time_anchor_dsse")
if not (meta_path.parent / time_anchor_dsse["path"]).exists():
raise SystemExit("time_anchor_dsse referenced but file missing")
for group, expected_count in [("ok", 10), ("rk", 10), ("ms", 10)]:
if len(meta.get("gaps", {}).get(group, [])) != expected_count:
raise SystemExit(f"bundle meta gaps.{group} expected {expected_count} entries")
root_guess = manifest_path.parents[3] if len(manifest_path.parents) > 3 else manifest_path.parents[-1]
tool_expectations = {
'make_thin_v1_sh': root_guess / 'src' / 'Mirror' / 'StellaOps.Mirror.Creator' / 'make-thin-v1.sh',
'sign_script': root_guess / 'scripts' / 'mirror' / 'sign_thin_bundle.py',
'verify_script': root_guess / 'scripts' / 'mirror' / 'verify_thin_bundle.py',
'verify_oci': root_guess / 'scripts' / 'mirror' / 'verify_oci_layout.py'
}
for key, path in tool_expectations.items():
recorded = meta['tooling'].get(key)
if not recorded:
raise SystemExit(f"tool hash missing for {key}")
actual = sha256_file(path)
if recorded != actual:
raise SystemExit(f"tool hash mismatch for {key}")
if meta.get("checkpoint_freshness_seconds", 0) <= 0:
raise SystemExit("checkpoint_freshness_seconds must be positive")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("manifest", type=pathlib.Path)
parser.add_argument("tar", type=pathlib.Path)
parser.add_argument("--bundle-meta", type=pathlib.Path)
parser.add_argument("--pubkey", type=pathlib.Path)
parser.add_argument("--tenant", type=str)
parser.add_argument("--environment", type=str)
args = parser.parse_args()
manifest_path = args.manifest
tar_path = args.tar
bundle_meta = args.bundle_meta
bundle_dsse = bundle_meta.with_suffix(".dsse.json") if bundle_meta else None
manifest_dsse = manifest_path.with_suffix(".dsse.json")
time_anchor_dsse = None
time_anchor_path = tar_path.parent / "stage-v1" / "layers" / "time-anchor.json"
man_expected = load_sha256_sidecar(manifest_path)
tar_expected = load_sha256_sidecar(tar_path)
if sha256_file(manifest_path) != man_expected:
raise SystemExit("manifest sha256 mismatch")
if sha256_file(tar_path) != tar_expected:
raise SystemExit("tarball sha256 mismatch")
manifest = json.loads(manifest_path.read_text())
check_schema(manifest)
check_tar_determinism(tar_path)
check_required_layers(tar_path)
check_content_hashes(manifest, tar_path)
if bundle_meta:
if not bundle_meta.exists():
raise SystemExit(f"bundle meta missing: {bundle_meta}")
meta_expected = load_sha256_sidecar(bundle_meta)
if sha256_file(bundle_meta) != meta_expected:
raise SystemExit("bundle meta sha256 mismatch")
check_bundle_meta(bundle_meta, manifest_path, tar_path, args.tenant, args.environment)
meta = json.loads(bundle_meta.read_text())
ta_entry = meta.get("artifacts", {}).get("time_anchor_dsse")
if ta_entry and ta_entry.get("path"):
ta_path = bundle_meta.parent / ta_entry["path"]
if sha256_file(ta_path) != ta_entry.get("sha256"):
raise SystemExit("time_anchor_dsse sha256 mismatch")
time_anchor_dsse = ta_path
if args.pubkey:
pubkey = args.pubkey
if manifest_dsse.exists():
verify_dsse(manifest_dsse, pubkey, manifest_path, "application/vnd.stellaops.mirror.manifest+json")
if bundle_dsse and bundle_dsse.exists():
verify_dsse(bundle_dsse, pubkey, bundle_meta, "application/vnd.stellaops.mirror.bundle+json")
if time_anchor_dsse and time_anchor_dsse.exists() and time_anchor_path.exists():
anchor_bytes = read_tar_entry(tar_path, "layers/time-anchor.json")
tmp_anchor = tar_path.parent / "time-anchor.verify.json"
tmp_anchor.write_bytes(anchor_bytes)
verify_dsse(time_anchor_dsse, pubkey, tmp_anchor, "application/vnd.stellaops.time-anchor+json")
tmp_anchor.unlink(missing_ok=True)
print("OK: mirror-thin bundle verified")
if __name__ == "__main__":
main()

View File

@@ -1,143 +0,0 @@
#!/usr/bin/env python3
"""
DSSE signing utility for notification schemas and offline kit manifests.
Uses HMAC-SHA256 with Pre-Authentication Encoding (PAE) per DSSE spec.
Development key: etc/secrets/dsse-dev.signing.json
CI/Production: Use secrets.COSIGN_KEY_REF or equivalent HSM-backed key.
Usage:
python scripts/notifications/sign-dsse.py <input.dsse.json> [--key <key-file>] [--output <output.dsse.json>]
python scripts/notifications/sign-dsse.py docs/notifications/schemas/notify-schemas-catalog.dsse.json
"""
import argparse
import base64
import hashlib
import hmac
import json
import struct
import sys
from datetime import datetime, timezone
from pathlib import Path
def build_pae(payload_type: str, payload_bytes: bytes) -> bytes:
"""Build Pre-Authentication Encoding per DSSE spec."""
prefix = b"DSSEv1"
type_bytes = payload_type.encode("utf-8") if payload_type else b""
# PAE format: "DSSEv1" + count(2) + len(type) + type + len(payload) + payload
pae = (
prefix +
struct.pack(">Q", 2) + # count = 2 (type + payload)
struct.pack(">Q", len(type_bytes)) +
type_bytes +
struct.pack(">Q", len(payload_bytes)) +
payload_bytes
)
return pae
def compute_hmac_signature(secret_b64: str, pae: bytes) -> str:
"""Compute HMAC-SHA256 signature and return base64."""
secret_bytes = base64.b64decode(secret_b64)
signature = hmac.new(secret_bytes, pae, hashlib.sha256).digest()
return base64.b64encode(signature).decode("utf-8")
def load_key(key_path: Path) -> dict:
"""Load signing key from JSON file."""
with open(key_path, "r", encoding="utf-8") as f:
key_data = json.load(f)
required = ["keyId", "secret", "algorithm"]
for field in required:
if field not in key_data:
raise ValueError(f"Key file missing required field: {field}")
if key_data["algorithm"].upper() != "HMACSHA256":
raise ValueError(f"Unsupported algorithm: {key_data['algorithm']}")
return key_data
def sign_dsse(input_path: Path, key_data: dict, output_path: Path | None = None) -> dict:
"""Sign a DSSE envelope file."""
with open(input_path, "r", encoding="utf-8") as f:
envelope = json.load(f)
if "payloadType" not in envelope or "payload" not in envelope:
raise ValueError("Input file is not a valid DSSE envelope (missing payloadType or payload)")
payload_type = envelope["payloadType"]
payload_b64 = envelope["payload"]
payload_bytes = base64.b64decode(payload_b64)
# Build PAE and compute signature
pae = build_pae(payload_type, payload_bytes)
signature = compute_hmac_signature(key_data["secret"], pae)
# Create signature object
sig_obj = {
"sig": signature,
"keyid": key_data["keyId"]
}
# Add timestamp if not already present
if "signedAt" not in sig_obj:
sig_obj["signedAt"] = datetime.now(timezone.utc).isoformat(timespec="seconds")
# Update envelope with signature
if "signatures" not in envelope or not envelope["signatures"]:
envelope["signatures"] = []
# Remove any existing signature with the same keyId
envelope["signatures"] = [s for s in envelope["signatures"] if s.get("keyid") != key_data["keyId"]]
envelope["signatures"].append(sig_obj)
# Remove note field if present (was a placeholder)
envelope.pop("note", None)
# Write output
out_path = output_path or input_path
with open(out_path, "w", encoding="utf-8") as f:
json.dump(envelope, f, indent=2, ensure_ascii=False)
f.write("\n")
return envelope
def main():
parser = argparse.ArgumentParser(description="Sign DSSE envelope files with HMAC-SHA256")
parser.add_argument("input", type=Path, help="Input DSSE envelope file")
parser.add_argument("--key", "-k", type=Path,
default=Path("etc/secrets/dsse-dev.signing.json"),
help="Signing key JSON file (default: etc/secrets/dsse-dev.signing.json)")
parser.add_argument("--output", "-o", type=Path, help="Output file (default: overwrite input)")
args = parser.parse_args()
if not args.input.exists():
print(f"Error: Input file not found: {args.input}", file=sys.stderr)
sys.exit(1)
if not args.key.exists():
print(f"Error: Key file not found: {args.key}", file=sys.stderr)
sys.exit(1)
try:
key_data = load_key(args.key)
result = sign_dsse(args.input, key_data, args.output)
out_path = args.output or args.input
sig = result["signatures"][-1]
print(f"Signed {args.input} with key {sig['keyid']}")
print(f" Signature: {sig['sig'][:32]}...")
print(f" Output: {out_path}")
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,134 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Incident mode automation
# - Enables a feature-flag JSON when burn rate crosses threshold
# - Writes retention override parameters for downstream storage/ingest systems
# - Resets automatically after a cooldown period once burn subsides
# All inputs are provided via CLI flags or env vars to remain offline-friendly.
usage() {
cat <<'USAGE'
Usage: incident-mode.sh --burn-rate <float> [--threshold 2.0] [--reset-threshold 0.5] \
[--state-dir out/incident-mode] [--retention-hours 24] \
[--cooldown-mins 30] [--note "text"]
Environment overrides:
INCIDENT_STATE_DIR default: out/incident-mode
INCIDENT_THRESHOLD default: 2.0 (fast burn multiple)
INCIDENT_RESET_TH default: 0.5 (burn multiple to exit)
INCIDENT_COOLDOWN default: 30 (minutes below reset threshold)
INCIDENT_RETENTION_H default: 24 (hours)
Outputs (in state dir):
flag.json feature flag payload (enabled/disabled + metadata)
retention.json retention override (hours, applied_at)
last_burn.txt last burn rate observed
cooldown.txt consecutive minutes below reset threshold
Examples:
incident-mode.sh --burn-rate 3.1 --note "fast burn" # enter incident mode
incident-mode.sh --burn-rate 0.2 # progress cooldown / exit
USAGE
}
if [[ $# -eq 0 ]]; then usage; exit 1; fi
BURN_RATE=""
NOTE=""
STATE_DIR=${INCIDENT_STATE_DIR:-out/incident-mode}
THRESHOLD=${INCIDENT_THRESHOLD:-2.0}
RESET_TH=${INCIDENT_RESET_TH:-0.5}
COOLDOWN_MINS=${INCIDENT_COOLDOWN:-30}
RETENTION_H=${INCIDENT_RETENTION_H:-24}
while [[ $# -gt 0 ]]; do
case "$1" in
--burn-rate) BURN_RATE="$2"; shift 2;;
--threshold) THRESHOLD="$2"; shift 2;;
--reset-threshold) RESET_TH="$2"; shift 2;;
--state-dir) STATE_DIR="$2"; shift 2;;
--retention-hours) RETENTION_H="$2"; shift 2;;
--cooldown-mins) COOLDOWN_MINS="$2"; shift 2;;
--note) NOTE="$2"; shift 2;;
-h|--help) usage; exit 0;;
*) echo "Unknown arg: $1" >&2; usage; exit 1;;
esac
done
if [[ -z "$BURN_RATE" ]]; then echo "--burn-rate is required" >&2; exit 1; fi
mkdir -p "$STATE_DIR"
FLAG_FILE="$STATE_DIR/flag.json"
RET_FILE="$STATE_DIR/retention.json"
LAST_FILE="$STATE_DIR/last_burn.txt"
COOLDOWN_FILE="$STATE_DIR/cooldown.txt"
jq_escape() { python - <<PY "$1"
import json,sys
print(json.dumps(sys.argv[1]))
PY
}
now_utc=$(date -u +%Y-%m-%dT%H:%M:%SZ)
burn_float=$(python - <<PY "$BURN_RATE"
import sys
print(float(sys.argv[1]))
PY)
cooldown_current=0
if [[ -f "$COOLDOWN_FILE" ]]; then
cooldown_current=$(cat "$COOLDOWN_FILE")
fi
enter_incident=false
exit_incident=false
if (( $(echo "$burn_float >= $THRESHOLD" | bc -l) )); then
enter_incident=true
cooldown_current=0
elif (( $(echo "$burn_float <= $RESET_TH" | bc -l) )); then
cooldown_current=$((cooldown_current + 1))
if (( cooldown_current >= COOLDOWN_MINS )); then
exit_incident=true
fi
else
cooldown_current=0
fi
echo "$burn_float" > "$LAST_FILE"
echo "$cooldown_current" > "$COOLDOWN_FILE"
write_flag() {
local enabled="$1"
cat > "$FLAG_FILE" <<JSON
{
"enabled": $enabled,
"updated_at": "$now_utc",
"reason": "incident-mode",
"note": $(jq_escape "$NOTE"),
"burn_rate": $burn_float
}
JSON
}
if $enter_incident; then
write_flag true
cat > "$RET_FILE" <<JSON
{
"retention_hours": $RETENTION_H,
"applied_at": "$now_utc"
}
JSON
echo "incident-mode: activated (burn_rate=$burn_float)" >&2
elif $exit_incident; then
write_flag false
echo "incident-mode: cleared after cooldown (burn_rate=$burn_float)" >&2
else
# no change; preserve prior flag if exists
if [[ ! -f "$FLAG_FILE" ]]; then
write_flag false
fi
echo "incident-mode: steady (burn_rate=$burn_float, cooldown=$cooldown_current/$COOLDOWN_MINS)" >&2
fi
exit 0

View File

@@ -1,21 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# DEVOPS-OBS-51-001: simple SLO burn-rate evaluator
PROM_URL=${PROM_URL:-"http://localhost:9090"}
OUT="out/obs-slo"
mkdir -p "$OUT"
query() {
local q="$1"
curl -sG "${PROM_URL}/api/v1/query" --data-urlencode "query=${q}"
}
echo "[slo] querying error rate (5m)"
query "(rate(service_request_errors_total[5m]) / rate(service_requests_total[5m]))" > "${OUT}/error-rate-5m.json"
echo "[slo] querying error rate (1h)"
query "(rate(service_request_errors_total[1h]) / rate(service_requests_total[1h]))" > "${OUT}/error-rate-1h.json"
echo "[slo] done; results in ${OUT}"

View File

@@ -1,19 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# DEVOPS-OBS-52-001: validate streaming pipeline knobs
OUT="out/obs-stream"
mkdir -p "$OUT"
echo "[obs-stream] checking NATS connectivity"
if command -v nats >/dev/null 2>&1; then
nats --server "${NATS_URL:-nats://localhost:4222}" req health.ping ping || true
else
echo "nats CLI not installed; skipping connectivity check" > "${OUT}/nats.txt"
fi
echo "[obs-stream] dumping retention/partitions (Kafka-like env variables)"
env | grep -E 'KAFKA_|REDIS_|NATS_' | sort > "${OUT}/env.txt"
echo "[obs-stream] done; outputs in $OUT"

View File

@@ -1,51 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Synthetic probe for orchestrator infra (postgres, mongo, nats).
# Runs lightweight checks and writes a status file under out/orchestrator-probe/.
COMPOSE_FILE=${COMPOSE_FILE:-ops/devops/orchestrator/docker-compose.orchestrator.yml}
STATE_DIR=${STATE_DIR:-out/orchestrator-probe}
mkdir -p "$STATE_DIR"
log() { printf "[probe] %s\n" "$*"; }
require() { command -v "$1" >/dev/null 2>&1 || { echo "missing $1" >&2; exit 1; }; }
require docker
timestamp() { date -u +%Y-%m-%dT%H:%M:%SZ; }
log "compose file: $COMPOSE_FILE"
PG_OK=0
MONGO_OK=0
NATS_OK=0
if docker compose -f "$COMPOSE_FILE" ps orchestrator-postgres >/dev/null 2>&1; then
if docker compose -f "$COMPOSE_FILE" exec -T orchestrator-postgres psql -U orch -tAc "select 1" | grep -q 1; then
PG_OK=1
fi
fi
if docker compose -f "$COMPOSE_FILE" ps orchestrator-mongo >/dev/null 2>&1; then
if docker compose -f "$COMPOSE_FILE" exec -T orchestrator-mongo mongosh --quiet --eval "db.adminCommand('ping').ok" | grep -q 1; then
MONGO_OK=1
fi
fi
if docker compose -f "$COMPOSE_FILE" ps orchestrator-nats >/dev/null 2>&1; then
if docker compose -f "$COMPOSE_FILE" exec -T orchestrator-nats nats --server localhost:4222 ping >/dev/null 2>&1; then
# publish & request to ensure traffic path works
docker compose -f "$COMPOSE_FILE" exec -T orchestrator-nats nats --server localhost:4222 pub probe.ping "ok" >/dev/null 2>&1 || true
NATS_OK=1
fi
fi
cat > "$STATE_DIR/status.txt" <<EOF
timestamp=$(timestamp)
postgres_ok=$PG_OK
mongo_ok=$MONGO_OK
nats_ok=$NATS_OK
EOF
log "probe complete (pg=$PG_OK mongo=$MONGO_OK nats=$NATS_OK)"

View File

@@ -1,17 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Replay smoke: restart infra and rerun baseline smoke to validate persistence/readiness.
COMPOSE_FILE=${COMPOSE_FILE:-ops/devops/orchestrator/docker-compose.orchestrator.yml}
STATE_DIR=${STATE_DIR:-out/orchestrator-smoke}
log() { printf "[replay-smoke] %s\n" "$*"; }
log "restarting orchestrator infra (compose: $COMPOSE_FILE)"
docker compose -f "$COMPOSE_FILE" down
docker compose -f "$COMPOSE_FILE" up -d
log "running baseline smoke"
COMPOSE_FILE="$COMPOSE_FILE" STATE_DIR="$STATE_DIR" scripts/orchestrator/smoke.sh
log "replay smoke done; readiness at $STATE_DIR/readiness.txt"

View File

@@ -1,59 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT=$(cd "$(dirname "$0")/.." && pwd)
COMPOSE_FILE="${COMPOSE_FILE:-$ROOT/devops/orchestrator/docker-compose.orchestrator.yml}"
STATE_DIR="${STATE_DIR:-$ROOT/out/orchestrator-smoke}"
usage() {
cat <<'USAGE'
Orchestrator infra smoke test
- Starts postgres + mongo + nats via docker-compose
- Verifies basic connectivity and prints ready endpoints
Env/flags:
COMPOSE_FILE path to compose file (default: ops/devops/orchestrator/docker-compose.orchestrator.yml)
STATE_DIR path for logs (default: out/orchestrator-smoke)
SKIP_UP set to 1 to skip compose up (assumes already running)
USAGE
}
if [[ ${1:-} == "-h" || ${1:-} == "--help" ]]; then usage; exit 0; fi
mkdir -p "$STATE_DIR"
if [[ "${SKIP_UP:-0}" != "1" ]]; then
docker compose -f "$COMPOSE_FILE" up -d
fi
log() { echo "[smoke] $*"; }
log "waiting for postgres..."
for i in {1..12}; do
if docker compose -f "$COMPOSE_FILE" exec -T orchestrator-postgres pg_isready -U orch >/dev/null 2>&1; then break; fi
sleep 5;
done
log "waiting for mongo..."
for i in {1..12}; do
if docker compose -f "$COMPOSE_FILE" exec -T orchestrator-mongo mongosh --quiet --eval "db.adminCommand('ping')" >/dev/null 2>&1; then break; fi
sleep 5;
done
log "waiting for nats..."
for i in {1..12}; do
if docker compose -f "$COMPOSE_FILE" exec -T orchestrator-nats nats --server localhost:4222 ping >/dev/null 2>&1; then break; fi
sleep 5;
done
log "postgres DSN: postgres://orch:orchpass@localhost:55432/orchestrator"
log "mongo uri: mongodb://localhost:57017"
log "nats uri: nats://localhost:4222"
# Write readiness summary
cat > "$STATE_DIR/readiness.txt" <<EOF
postgres=postgres://orch:orchpass@localhost:55432/orchestrator
mongo=mongodb://localhost:57017
nats=nats://localhost:4222
ready_at=$(date -u +%Y-%m-%dT%H:%M:%SZ)
EOF
log "smoke completed; summary at $STATE_DIR/readiness.txt"

View File

@@ -1,7 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
root_dir=$(cd "$(dirname "$0")/.." && pwd)
verifier="$root_dir/packs/verify_offline_bundle.py"
python3 "$verifier" --bundle "$root_dir/packs/__fixtures__/good" --manifest bundle.json --require-dsse
python3 "$verifier" --bundle "$root_dir/packs/__fixtures__/bad" --manifest bundle-missing-quota.json --require-dsse && exit 1 || true
echo "fixture checks completed"

View File

@@ -1,50 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Signs a policy file with cosign and verifies it. Intended for CI and offline use.
# Requires COSIGN_KEY_B64 (private key PEM base64) or KMS envs; optional COSIGN_PASSWORD.
usage() {
cat <<'USAGE'
Usage: sign-policy.sh --file <path> [--out-dir out/policy-sign]
Env:
COSIGN_KEY_B64 base64-encoded PEM private key (if not using KMS)
COSIGN_PASSWORD passphrase for the key (can be empty for test keys)
COSIGN_PUBLIC_KEY_PATH optional path to write public key for verify step
USAGE
}
FILE=""
OUT_DIR="out/policy-sign"
while [[ $# -gt 0 ]]; do
case "$1" in
--file) FILE="$2"; shift 2;;
--out-dir) OUT_DIR="$2"; shift 2;;
-h|--help) usage; exit 0;;
*) echo "Unknown arg: $1" >&2; usage; exit 1;;
esac
done
if [[ -z "$FILE" ]]; then echo "--file is required" >&2; exit 1; fi
if [[ ! -f "$FILE" ]]; then echo "file not found: $FILE" >&2; exit 1; fi
mkdir -p "$OUT_DIR"
BASENAME=$(basename "$FILE")
SIG="$OUT_DIR/${BASENAME}.sig"
PUB_OUT="${COSIGN_PUBLIC_KEY_PATH:-$OUT_DIR/cosign.pub}"
if [[ -n "${COSIGN_KEY_B64:-}" ]]; then
KEYFILE="$OUT_DIR/cosign.key"
printf "%s" "$COSIGN_KEY_B64" | base64 -d > "$KEYFILE"
chmod 600 "$KEYFILE"
export COSIGN_KEY="$KEYFILE"
fi
export COSIGN_PASSWORD=${COSIGN_PASSWORD:-}
cosign version >/dev/null
cosign sign-blob "$FILE" --output-signature "$SIG"
cosign public-key --key "$COSIGN_KEY" > "$PUB_OUT"
cosign verify-blob --key "$PUB_OUT" --signature "$SIG" "$FILE"
printf "Signed %s -> %s\nPublic key -> %s\n" "$FILE" "$SIG" "$PUB_OUT"

View File

@@ -1,115 +0,0 @@
#!/usr/bin/env python3
"""
Deterministic provenance backfill helper for Sprint 401.
Reads the attestation inventory NDJSON and subject→Rekor map, emits a sorted
NDJSON log of resolved backfill actions. No network calls are performed.
Usage:
python scripts/provenance_backfill.py \
--inventory docs/provenance/attestation-inventory-2025-11-18.ndjson \
--subject-map docs/provenance/subject-rekor-map-2025-11-18.json \
--out logs/provenance-backfill-2025-11-18.ndjson
"""
from __future__ import annotations
import argparse
import json
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Iterable, List, Optional
@dataclass(frozen=True)
class InventoryRecord:
subject: str
dsse_hash: str
rekor_entry: str
@staticmethod
def from_json(obj: dict) -> "InventoryRecord":
return InventoryRecord(
subject=obj["subject"],
dsse_hash=obj["dsseHash"],
rekor_entry=obj.get("rekorEntry", ""),
)
def load_inventory(path: Path) -> List[InventoryRecord]:
records: List[InventoryRecord] = []
with path.open("r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
records.append(InventoryRecord.from_json(json.loads(line)))
return records
def load_subject_map(path: Path) -> Dict[str, str]:
with path.open("r", encoding="utf-8") as f:
return json.load(f)
def validate_hash(prefix: str, value: str) -> None:
if not value.startswith("sha256:") or len(value) <= len("sha256:"):
raise ValueError(f"{prefix} must be sha256:<hex>: got '{value}'")
def build_backfill_entries(
inventory: Iterable[InventoryRecord],
subject_map: Dict[str, str],
) -> List[dict]:
entries: List[dict] = []
for rec in inventory:
validate_hash("dsseHash", rec.dsse_hash)
resolved_rekor = subject_map.get(rec.subject)
status = "resolved" if resolved_rekor else "missing_rekor_entry"
rekor_entry = resolved_rekor or rec.rekor_entry
if rekor_entry:
validate_hash("rekorEntry", rekor_entry)
entries.append(
{
"subject": rec.subject,
"dsseHash": rec.dsse_hash,
"rekorEntry": rekor_entry,
"status": status,
}
)
entries.sort(key=lambda o: (o["subject"], o["rekorEntry"] or ""))
return entries
def write_ndjson(path: Path, entries: Iterable[dict]) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
with path.open("w", encoding="utf-8") as f:
for entry in entries:
f.write(json.dumps(entry, separators=(",", ":"), sort_keys=True))
f.write("\n")
def parse_args(argv: Optional[List[str]] = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Deterministic provenance backfill helper.")
parser.add_argument("--inventory", required=True, type=Path, help="Path to attestation inventory NDJSON.")
parser.add_argument("--subject-map", required=True, type=Path, help="Path to subject→Rekor JSON map.")
parser.add_argument("--out", required=True, type=Path, help="Output NDJSON log path.")
return parser.parse_args(argv)
def main(argv: Optional[List[str]] = None) -> int:
args = parse_args(argv)
inventory = load_inventory(args.inventory)
subject_map = load_subject_map(args.subject_map)
entries = build_backfill_entries(inventory, subject_map)
write_ndjson(args.out, entries)
resolved = sum(1 for e in entries if e["status"] == "resolved")
missing = sum(1 for e in entries if e["status"] != "resolved")
print(f"wrote {len(entries)} entries -> {args.out} (resolved={resolved}, missing={missing})")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,68 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Inputs (typically provided by CI/CD)
IMAGE_REF="${IMAGE_REF:?missing IMAGE_REF}" # e.g. ghcr.io/org/app:tag
ATTEST_PATH="${ATTEST_PATH:?missing ATTEST_PATH}" # DSSE envelope file path
REKOR_URL="${REKOR_URL:-https://rekor.sigstore.dev}"
KEY_REF="${KEY_REF:-cosign.key}" # could be KMS / keyless etc.
OUT_META_JSON="${OUT_META_JSON:-provenance-meta.json}"
# 1) Upload DSSE envelope to Rekor with JSON output
rekor-cli upload \
--rekor_server "${REKOR_URL}" \
--artifact "${ATTEST_PATH}" \
--type dsse \
--format json > rekor-upload.json
LOG_INDEX=$(jq '.LogIndex' rekor-upload.json)
UUID=$(jq -r '.UUID' rekor-upload.json)
INTEGRATED_TIME=$(jq '.IntegratedTime' rekor-upload.json)
# 2) Compute envelope SHA256
ENVELOPE_SHA256=$(sha256sum "${ATTEST_PATH}" | awk '{print $1}')
# 3) Extract key metadata (example for local file key; adapt for Fulcio/KMS)
# For keyless/Fulcio youd normally extract cert from cosign verify-attestation.
KEY_ID="${KEY_ID:-${KEY_REF}}"
KEY_ALGO="${KEY_ALGO:-unknown}"
KEY_ISSUER="${KEY_ISSUER:-unknown}"
# 4) Optional: resolve image digest (if not already known in CI)
IMAGE_DIGEST="${IMAGE_DIGEST:-}"
if [ -z "${IMAGE_DIGEST}" ]; then
IMAGE_DIGEST="$(cosign triangulate "${IMAGE_REF}")"
fi
# 5) Emit provenance sidecar
cat > "${OUT_META_JSON}" <<EOF
{
"subject": {
"imageRef": "${IMAGE_REF}",
"digest": {
"sha256": "${IMAGE_DIGEST}"
}
},
"attestation": {
"path": "${ATTEST_PATH}",
"envelopeDigest": "sha256:${ENVELOPE_SHA256}",
"payloadType": "application/vnd.in-toto+json"
},
"dsse": {
"envelopeDigest": "sha256:${ENVELOPE_SHA256}",
"payloadType": "application/vnd.in-toto+json",
"key": {
"keyId": "${KEY_ID}",
"issuer": "${KEY_ISSUER}",
"algo": "${KEY_ALGO}"
},
"rekor": {
"logIndex": ${LOG_INDEX},
"uuid": "${UUID}",
"integratedTime": ${INTEGRATED_TIME}
}
}
}
EOF
echo "Provenance metadata written to ${OUT_META_JSON}"

View File

@@ -1,95 +0,0 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# QA-CORPUS-401-031: Deterministic runner for reachability corpus tests (Windows)
[CmdletBinding()]
param(
[Parameter(HelpMessage = "xUnit filter pattern (e.g., 'CorpusFixtureTests')")]
[string]$Filter,
[Parameter(HelpMessage = "Test verbosity level")]
[ValidateSet("quiet", "minimal", "normal", "detailed", "diagnostic")]
[string]$Verbosity = "normal",
[Parameter(HelpMessage = "Build configuration")]
[ValidateSet("Debug", "Release")]
[string]$Configuration = "Release",
[Parameter(HelpMessage = "Skip build step")]
[switch]$NoBuild
)
$ErrorActionPreference = "Stop"
$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path
$RepoRoot = (Resolve-Path (Join-Path $ScriptDir "..\..")).Path
$TestProject = Join-Path $RepoRoot "tests\reachability\StellaOps.Reachability.FixtureTests\StellaOps.Reachability.FixtureTests.csproj"
function Write-LogInfo { param($Message) Write-Host "[INFO] $Message" -ForegroundColor Green }
function Write-LogWarn { param($Message) Write-Host "[WARN] $Message" -ForegroundColor Yellow }
function Write-LogError { param($Message) Write-Host "[ERROR] $Message" -ForegroundColor Red }
Write-LogInfo "Reachability Corpus Test Runner (Windows)"
Write-LogInfo "Repository root: $RepoRoot"
Write-LogInfo "Test project: $TestProject"
# Verify prerequisites
$dotnetPath = Get-Command dotnet -ErrorAction SilentlyContinue
if (-not $dotnetPath) {
Write-LogError "dotnet CLI not found. Please install .NET SDK."
exit 1
}
# Verify corpus exists
$corpusManifest = Join-Path $RepoRoot "tests\reachability\corpus\manifest.json"
if (-not (Test-Path $corpusManifest)) {
Write-LogError "Corpus manifest not found at $corpusManifest"
exit 1
}
$reachbenchIndex = Join-Path $RepoRoot "tests\reachability\fixtures\reachbench-2025-expanded\INDEX.json"
if (-not (Test-Path $reachbenchIndex)) {
Write-LogError "Reachbench INDEX not found at $reachbenchIndex"
exit 1
}
# Build if needed
if (-not $NoBuild) {
Write-LogInfo "Building test project ($Configuration)..."
& dotnet build $TestProject -c $Configuration --nologo
if ($LASTEXITCODE -ne 0) {
Write-LogError "Build failed"
exit $LASTEXITCODE
}
}
# Build test command arguments
$testArgs = @(
"test"
$TestProject
"-c"
$Configuration
"--no-build"
"--verbosity"
$Verbosity
)
if ($Filter) {
$testArgs += "--filter"
$testArgs += "FullyQualifiedName~$Filter"
Write-LogInfo "Running tests with filter: $Filter"
} else {
Write-LogInfo "Running all fixture tests..."
}
# Run tests
Write-LogInfo "Executing: dotnet $($testArgs -join ' ')"
& dotnet @testArgs
$exitCode = $LASTEXITCODE
if ($exitCode -eq 0) {
Write-LogInfo "All tests passed!"
} else {
Write-LogError "Some tests failed (exit code: $exitCode)"
}
exit $exitCode

View File

@@ -1,118 +0,0 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: AGPL-3.0-or-later
# QA-CORPUS-401-031: Deterministic runner for reachability corpus tests
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
TEST_PROJECT="${REPO_ROOT}/src/__Tests/reachability/StellaOps.Reachability.FixtureTests/StellaOps.Reachability.FixtureTests.csproj"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
log_info() { echo -e "${GREEN}[INFO]${NC} $*"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
log_error() { echo -e "${RED}[ERROR]${NC} $*"; }
# Parse arguments
FILTER=""
VERBOSITY="normal"
CONFIGURATION="Release"
NO_BUILD=false
while [[ $# -gt 0 ]]; do
case $1 in
--filter)
FILTER="$2"
shift 2
;;
--verbosity|-v)
VERBOSITY="$2"
shift 2
;;
--configuration|-c)
CONFIGURATION="$2"
shift 2
;;
--no-build)
NO_BUILD=true
shift
;;
--help|-h)
echo "Usage: $0 [options]"
echo ""
echo "Options:"
echo " --filter <pattern> xUnit filter pattern (e.g., 'CorpusFixtureTests')"
echo " --verbosity, -v <level> Test verbosity (quiet, minimal, normal, detailed, diagnostic)"
echo " --configuration, -c Build configuration (Debug, Release)"
echo " --no-build Skip build step"
echo " --help, -h Show this help"
echo ""
echo "Examples:"
echo " $0 # Run all fixture tests"
echo " $0 --filter CorpusFixtureTests # Run only corpus tests"
echo " $0 --filter ReachbenchFixtureTests # Run only reachbench tests"
exit 0
;;
*)
log_error "Unknown option: $1"
exit 1
;;
esac
done
cd "${REPO_ROOT}"
log_info "Reachability Corpus Test Runner"
log_info "Repository root: ${REPO_ROOT}"
log_info "Test project: ${TEST_PROJECT}"
# Verify prerequisites
if ! command -v dotnet &> /dev/null; then
log_error "dotnet CLI not found. Please install .NET SDK."
exit 1
fi
# Verify corpus exists
if [[ ! -f "${REPO_ROOT}/src/__Tests/reachability/corpus/manifest.json" ]]; then
log_error "Corpus manifest not found at src/__Tests/reachability/corpus/manifest.json"
exit 1
fi
if [[ ! -f "${REPO_ROOT}/src/__Tests/reachability/fixtures/reachbench-2025-expanded/INDEX.json" ]]; then
log_error "Reachbench INDEX not found at src/__Tests/reachability/fixtures/reachbench-2025-expanded/INDEX.json"
exit 1
fi
# Build if needed
if [[ "${NO_BUILD}" == false ]]; then
log_info "Building test project (${CONFIGURATION})..."
dotnet build "${TEST_PROJECT}" -c "${CONFIGURATION}" --nologo
fi
# Build test command
TEST_CMD="dotnet test ${TEST_PROJECT} -c ${CONFIGURATION} --no-build --verbosity ${VERBOSITY}"
if [[ -n "${FILTER}" ]]; then
TEST_CMD="${TEST_CMD} --filter \"FullyQualifiedName~${FILTER}\""
log_info "Running tests with filter: ${FILTER}"
else
log_info "Running all fixture tests..."
fi
# Run tests
log_info "Executing: ${TEST_CMD}"
eval "${TEST_CMD}"
EXIT_CODE=$?
if [[ ${EXIT_CODE} -eq 0 ]]; then
log_info "All tests passed!"
else
log_error "Some tests failed (exit code: ${EXIT_CODE})"
fi
exit ${EXIT_CODE}

View File

@@ -1,73 +0,0 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: AGPL-3.0-or-later
# QA-CORPUS-401-031: Verify SHA-256 hashes in corpus manifest
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
CORPUS_DIR="${REPO_ROOT}/src/__Tests/reachability/corpus"
RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m'
log_info() { echo -e "${GREEN}[INFO]${NC} $*"; }
log_error() { echo -e "${RED}[ERROR]${NC} $*"; }
cd "${CORPUS_DIR}"
if [[ ! -f "manifest.json" ]]; then
log_error "manifest.json not found in ${CORPUS_DIR}"
exit 1
fi
log_info "Verifying corpus hashes..."
# Use Python for JSON parsing (more portable than jq)
python3 << 'PYTHON_SCRIPT'
import json
import hashlib
import os
import sys
with open('manifest.json') as f:
manifest = json.load(f)
errors = []
verified = 0
for entry in manifest:
case_id = entry['id']
lang = entry['language']
case_dir = os.path.join(lang, case_id)
if not os.path.isdir(case_dir):
errors.append(f"{case_id}: case directory missing ({case_dir})")
continue
for filename, expected_hash in entry['files'].items():
filepath = os.path.join(case_dir, filename)
if not os.path.exists(filepath):
errors.append(f"{case_id}: {filename} not found")
continue
with open(filepath, 'rb') as f:
actual_hash = hashlib.sha256(f.read()).hexdigest()
if actual_hash != expected_hash:
errors.append(f"{case_id}: {filename} hash mismatch")
errors.append(f" expected: {expected_hash}")
errors.append(f" actual: {actual_hash}")
else:
verified += 1
if errors:
print(f"\033[0;31m[ERROR]\033[0m Hash verification failed:")
for err in errors:
print(f" {err}")
sys.exit(1)
else:
print(f"\033[0;32m[INFO]\033[0m Verified {verified} files across {len(manifest)} corpus entries")
sys.exit(0)
PYTHON_SCRIPT

View File

@@ -1,274 +0,0 @@
#!/usr/bin/env python3
"""Render Markdown documentation under docs/ into a static HTML bundle.
The script converts every Markdown file into a standalone HTML document,
mirroring the original folder structure under the output directory. A
`manifest.json` file is also produced to list the generated documents and
surface basic metadata (title, source path, output path).
Usage:
python scripts/render_docs.py --source docs --output build/docs-site
Dependencies:
pip install markdown pygments
"""
from __future__ import annotations
import argparse
import json
import logging
import os
import shutil
import subprocess
from dataclasses import dataclass
from datetime import datetime, timezone
from pathlib import Path
from typing import Iterable, List
import markdown
# Enable fenced code blocks, tables, and definition lists. These cover the
# Markdown constructs heavily used across the documentation set.
MD_EXTENSIONS = [
"markdown.extensions.fenced_code",
"markdown.extensions.codehilite",
"markdown.extensions.tables",
"markdown.extensions.toc",
"markdown.extensions.def_list",
"markdown.extensions.admonition",
]
HTML_TEMPLATE = """<!DOCTYPE html>
<html lang=\"en\">
<head>
<meta charset=\"utf-8\" />
<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />
<title>{title}</title>
<style>
:root {{
color-scheme: light dark;
font-family: system-ui, -apple-system, Segoe UI, sans-serif;
line-height: 1.6;
}}
body {{
margin: 2.5rem auto;
padding: 0 1.5rem;
max-width: 70ch;
background: var(--background, #1118270d);
}}
pre {{
overflow: auto;
padding: 1rem;
background: #11182714;
border-radius: 0.5rem;
}}
code {{
font-family: SFMono-Regular, Consolas, 'Liberation Mono', monospace;
font-size: 0.95em;
}}
table {{
width: 100%;
border-collapse: collapse;
margin: 1rem 0;
}}
th, td {{
border: 1px solid #4b5563;
padding: 0.5rem;
text-align: left;
}}
a {{
color: #2563eb;
}}
footer {{
margin-top: 3rem;
font-size: 0.85rem;
color: #6b7280;
}}
</style>
</head>
<body>
<main>
{body}
</main>
<footer>
<p>Generated on {generated_at} UTC · Source: {source}</p>
</footer>
</body>
</html>
"""
@dataclass
class DocEntry:
source: Path
output: Path
title: str
def to_manifest(self) -> dict[str, str]:
return {
"source": self.source.as_posix(),
"output": self.output.as_posix(),
"title": self.title,
}
def discover_markdown_files(source_root: Path) -> Iterable[Path]:
for path in source_root.rglob("*.md"):
if path.is_file():
yield path
def read_title(markdown_text: str, fallback: str) -> str:
for raw_line in markdown_text.splitlines():
line = raw_line.strip()
if line.startswith("#"):
return line.lstrip("#").strip() or fallback
return fallback
def convert_markdown(path: Path, source_root: Path, output_root: Path) -> DocEntry:
relative = path.relative_to(source_root)
output_path = output_root / relative.with_suffix(".html")
output_path.parent.mkdir(parents=True, exist_ok=True)
text = path.read_text(encoding="utf-8")
html_body = markdown.markdown(text, extensions=MD_EXTENSIONS)
title = read_title(text, fallback=relative.stem.replace("_", " "))
generated_at = datetime.now(tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S")
output_path.write_text(
HTML_TEMPLATE.format(
title=title,
body=html_body,
generated_at=generated_at,
source=relative.as_posix(),
),
encoding="utf-8",
)
return DocEntry(source=relative, output=output_path.relative_to(output_root), title=title)
def copy_static_assets(source_root: Path, output_root: Path) -> None:
for path in source_root.rglob("*"):
if path.is_dir() or path.suffix.lower() == ".md":
# Skip Markdown (already rendered separately).
continue
relative = path.relative_to(source_root)
destination = output_root / relative
destination.parent.mkdir(parents=True, exist_ok=True)
destination.write_bytes(path.read_bytes())
logging.info("Copied asset %s", relative)
def write_manifest(entries: Iterable[DocEntry], output_root: Path) -> None:
manifest_path = output_root / "manifest.json"
manifest = [entry.to_manifest() for entry in entries]
manifest_path.write_text(json.dumps(manifest, indent=2), encoding="utf-8")
logging.info("Wrote manifest with %d entries", len(manifest))
def write_index(entries: List[DocEntry], output_root: Path) -> None:
index_path = output_root / "index.html"
generated_at = datetime.now(tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S")
items = "\n".join(
f" <li><a href='{entry.output.as_posix()}'>{entry.title}</a>" f" · <code>{entry.source.as_posix()}</code></li>"
for entry in sorted(entries, key=lambda e: e.title.lower())
)
html = f"""<!DOCTYPE html>
<html lang=\"en\">
<head>
<meta charset=\"utf-8\" />
<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />
<title>Stella Ops Documentation Index</title>
<style>
body {{
margin: 2.5rem auto;
padding: 0 1.5rem;
max-width: 70ch;
font-family: system-ui, -apple-system, 'Segoe UI', sans-serif;
line-height: 1.6;
}}
h1 {{ font-size: 2.25rem; margin-bottom: 1rem; }}
ul {{ list-style: none; padding: 0; }}
li {{ margin-bottom: 0.75rem; }}
code {{ background: #11182714; padding: 0.2rem 0.35rem; border-radius: 0.35rem; }}
</style>
</head>
<body>
<h1>Stella Ops Documentation</h1>
<p>Generated on {generated_at} UTC</p>
<ul>
{items}
</ul>
</body>
</html>
"""
index_path.write_text(html, encoding="utf-8")
logging.info("Wrote HTML index with %d entries", len(entries))
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Render documentation bundle")
parser.add_argument("--source", default="docs", type=Path, help="Directory containing Markdown sources")
parser.add_argument("--output", default=Path("build/docs-site"), type=Path, help="Directory for rendered output")
parser.add_argument("--clean", action="store_true", help="Remove the output directory before rendering")
return parser.parse_args()
def run_attestor_validation(repo_root: Path) -> None:
"""Execute the attestor schema + SDK validation prior to rendering docs."""
logging.info("Running attestor payload validation (npm run docs:attestor:validate)")
result = subprocess.run(
["npm", "run", "docs:attestor:validate"],
cwd=repo_root,
check=False,
)
if result.returncode != 0:
raise RuntimeError("Attestor payload validation failed; aborting docs render.")
def main() -> int:
logging.basicConfig(level=logging.INFO, format="%(levelname)s %(message)s")
args = parse_args()
source_root: Path = args.source.resolve()
output_root: Path = args.output.resolve()
repo_root = Path(__file__).resolve().parents[1]
if not source_root.exists():
logging.error("Source directory %s does not exist", source_root)
return os.EX_NOINPUT
try:
run_attestor_validation(repo_root)
except RuntimeError as exc:
logging.error("%s", exc)
return os.EX_DATAERR
if args.clean and output_root.exists():
logging.info("Cleaning existing output directory %s", output_root)
shutil.rmtree(output_root)
output_root.mkdir(parents=True, exist_ok=True)
entries: List[DocEntry] = []
for md_file in discover_markdown_files(source_root):
entry = convert_markdown(md_file, source_root, output_root)
entries.append(entry)
logging.info("Rendered %s -> %s", entry.source, entry.output)
write_manifest(entries, output_root)
write_index(entries, output_root)
copy_static_assets(source_root, output_root)
logging.info("Documentation bundle available at %s", output_root)
return os.EX_OK
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -1,88 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Offline verifier for policy-sim inputs lock (PS1PS10 remediation).
# Usage: verify-policy-sim-lock.sh lock.json --policy path --graph path --sbom path --time-anchor path --dataset path [--max-age-hours 24]
usage() {
echo "Usage: $0 lock.json --policy <file> --graph <file> --sbom <file> --time-anchor <file> --dataset <file> [--max-age-hours <n>]" >&2
exit 2
}
[[ $# -lt 11 ]] && usage
lock=""
policy=""
graph=""
sbom=""
time_anchor=""
dataset=""
max_age_hours=0
while [[ $# -gt 0 ]]; do
case "$1" in
--policy) policy=${2:-}; shift ;;
--graph) graph=${2:-}; shift ;;
--sbom) sbom=${2:-}; shift ;;
--time-anchor) time_anchor=${2:-}; shift ;;
--dataset) dataset=${2:-}; shift ;;
--max-age-hours) max_age_hours=${2:-0}; shift ;;
*) if [[ -z "$lock" ]]; then lock=$1; else usage; fi ;;
esac
shift
done
[[ -z "$lock" || -z "$policy" || -z "$graph" || -z "$sbom" || -z "$time_anchor" || -z "$dataset" ]] && usage
require() { command -v "$1" >/dev/null || { echo "$1 is required" >&2; exit 2; }; }
require jq
require sha256sum
calc_sha() { sha256sum "$1" | awk '{print $1}'; }
lock_policy=$(jq -r '.policyBundleSha256' "$lock")
lock_graph=$(jq -r '.graphSha256' "$lock")
lock_sbom=$(jq -r '.sbomSha256' "$lock")
lock_anchor=$(jq -r '.timeAnchorSha256' "$lock")
lock_dataset=$(jq -r '.datasetSha256' "$lock")
lock_shadow=$(jq -r '.shadowIsolation' "$lock")
lock_scopes=$(jq -r '.requiredScopes[]?' "$lock" | tr '\n' ' ')
lock_generated=$(jq -r '.generatedAt' "$lock")
sha_ok() {
[[ $1 =~ ^[A-Fa-f0-9]{64}$ ]]
}
for h in "$lock_policy" "$lock_graph" "$lock_sbom" "$lock_anchor" "$lock_dataset"; do
sha_ok "$h" || { echo "invalid digest format: $h" >&2; exit 3; }
done
[[ "$lock_shadow" == "true" ]] || { echo "shadowIsolation must be true" >&2; exit 5; }
if ! grep -qi "policy:simulate:shadow" <<< "$lock_scopes"; then
echo "requiredScopes missing policy:simulate:shadow" >&2; exit 5;
fi
[[ "$lock_policy" == "$(calc_sha "$policy")" ]] || { echo "policy digest mismatch" >&2; exit 3; }
[[ "$lock_graph" == "$(calc_sha "$graph")" ]] || { echo "graph digest mismatch" >&2; exit 3; }
[[ "$lock_sbom" == "$(calc_sha "$sbom")" ]] || { echo "sbom digest mismatch" >&2; exit 3; }
[[ "$lock_anchor" == "$(calc_sha "$time_anchor")" ]] || { echo "time anchor digest mismatch" >&2; exit 3; }
[[ "$lock_dataset" == "$(calc_sha "$dataset")" ]] || { echo "dataset digest mismatch" >&2; exit 3; }
if [[ $max_age_hours -gt 0 ]]; then
now=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
age_hours=$(python3 - <<'PY'
import sys,datetime
lock=sys.argv[1].replace('Z','+00:00')
now=sys.argv[2].replace('Z','+00:00')
l=datetime.datetime.fromisoformat(lock)
n=datetime.datetime.fromisoformat(now)
print((n-l).total_seconds()/3600)
PY
"$lock_generated" "$now")
if (( $(printf '%.0f' "$age_hours") > max_age_hours )); then
echo "lock stale: ${age_hours}h > ${max_age_hours}h" >&2
exit 4
fi
fi
echo "policy-sim lock verified (shadow mode enforced)."

View File

@@ -1,63 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage: rotate-policy-cli-secret.sh [--output <path>] [--dry-run]
Generates a new random shared secret suitable for the Authority
`policy-cli` client and optionally writes it to the target file
in `etc/secrets/` with the standard header comment.
Options:
--output <path> Destination file (default: etc/secrets/policy-cli.secret)
--dry-run Print the generated secret to stdout without writing.
-h, --help Show this help.
EOF
}
OUTPUT="etc/secrets/policy-cli.secret"
DRY_RUN=0
while [[ $# -gt 0 ]]; do
case "$1" in
--output)
OUTPUT="$2"
shift 2
;;
--dry-run)
DRY_RUN=1
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 1
;;
esac
done
if ! command -v openssl >/dev/null 2>&1; then
echo "openssl is required to generate secrets" >&2
exit 1
fi
# Generate a 48-byte random secret, base64 encoded without padding.
RAW_SECRET=$(openssl rand -base64 48 | tr -d '\n=')
SECRET="policy-cli-${RAW_SECRET}"
if [[ "$DRY_RUN" -eq 1 ]]; then
echo "$SECRET"
exit 0
fi
cat <<EOF > "$OUTPUT"
# generated $(date -u +%Y-%m-%dT%H:%M:%SZ) via scripts/rotate-policy-cli-secret.sh
$SECRET
EOF
echo "Wrote new policy-cli secret to $OUTPUT"

View File

@@ -1,81 +0,0 @@
#!/usr/bin/env bash
# Runs live TTL validation for Attestor dedupe stores against local MongoDB/Valkey.
set -euo pipefail
if ! command -v docker >/dev/null 2>&1; then
echo "docker CLI is required. Install Docker Desktop or ensure docker is on PATH." >&2
exit 1
fi
if ! docker compose version >/dev/null 2>&1; then
if command -v docker-compose >/dev/null 2>&1; then
compose_cmd="docker-compose"
else
echo "docker compose plugin (or docker-compose) is required." >&2
exit 1
fi
else
compose_cmd="docker compose"
fi
repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
compose_file="$(mktemp -t attestor-ttl-compose-XXXXXX.yaml)"
cleanup() {
$compose_cmd -f "$compose_file" down -v >/dev/null 2>&1 || true
rm -f "$compose_file"
}
trap cleanup EXIT
cat >"$compose_file" <<'YAML'
services:
mongo:
image: mongo:7.0
ports:
- "27017:27017"
healthcheck:
test: ["CMD", "mongosh", "--quiet", "localhost/test", "--eval", "db.runCommand({ ping: 1 })"]
interval: 5s
timeout: 3s
retries: 20
valkey:
image: valkey/valkey:8-alpine
command: ["valkey-server", "--save", "", "--appendonly", "no"]
ports:
- "6379:6379"
healthcheck:
test: ["CMD", "valkey-cli", "ping"]
interval: 5s
timeout: 3s
retries: 20
YAML
echo "Starting MongoDB and Valkey containers..."
$compose_cmd -f "$compose_file" up -d
wait_for_port() {
local host=$1
local port=$2
local name=$3
for attempt in {1..60}; do
if (echo > /dev/tcp/"$host"/"$port") >/dev/null 2>&1; then
echo "$name is accepting connections."
return 0
fi
sleep 1
done
echo "Timeout waiting for $name on $host:$port" >&2
return 1
}
wait_for_port 127.0.0.1 27017 "MongoDB"
wait_for_port 127.0.0.1 6379 "Valkey"
export ATTESTOR_LIVE_MONGO_URI="${ATTESTOR_LIVE_MONGO_URI:-mongodb://127.0.0.1:27017}"
export ATTESTOR_LIVE_VALKEY_URI="${ATTESTOR_LIVE_VALKEY_URI:-127.0.0.1:6379}"
echo "Running live TTL validation tests..."
dotnet test "$repo_root/src/Attestor/StellaOps.Attestor.sln" --no-build --filter "Category=LiveTTL" "$@"
echo "Live TTL validation complete. Shutting down containers."

View File

@@ -1,13 +0,0 @@
#!/usr/bin/env bash
# Convenience wrapper to run the isolated Node analyzer suite with cleanup enabled.
set -euo pipefail
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
# auto-clean workspace outputs before running tests (uses cleanup helper inside test script)
export CLEAN_BEFORE_NODE_TESTS="${CLEAN_BEFORE_NODE_TESTS:-1}"
export DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=1
export DOTNET_CLI_TELEMETRY_OPTOUT=1
export NUGET_PACKAGES="${ROOT}/offline/packages"
exec "${ROOT}/src/Scanner/__Tests/node-tests-isolated.sh"

View File

@@ -1,26 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
export DOTNET_CLI_HOME="${DOTNET_CLI_HOME:-${ROOT_DIR}/.dotnet-cli}"
export DOTNET_SKIP_FIRST_TIME_EXPERIENCE=1
export DOTNET_CLI_TELEMETRY_OPTOUT=1
export DOTNET_NOLOGO=1
export DOTNET_MULTILEVEL_LOOKUP=0
export MSBUILDDISABLENODEREUSE=1
export DOTNET_HOST_DISABLE_RESOLVER_FALLBACK=1
export DOTNET_RESTORE_DISABLE_PARALLEL=true
PROJECT="${ROOT_DIR}/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Node.SmokeTests/StellaOps.Scanner.Analyzers.Lang.Node.SmokeTests.csproj"
RESTORE_SRC="${ROOT_DIR}/.nuget/packages"
mkdir -p "$DOTNET_CLI_HOME"
DOTNET_RESTORE_ARGS=("restore" "$PROJECT" "--no-cache" "--disable-parallel" "/p:RestoreSources=${RESTORE_SRC}" "/p:DisableSdkResolverCache=true" "/p:DisableImplicitNuGetFallbackFolder=true" "/p:RestoreNoCache=true")
DOTNET_BUILD_ARGS=("build" "$PROJECT" "-c" "Release" "--no-restore" "-m:1" "/p:UseSharedCompilation=false" "/p:RestoreSources=${RESTORE_SRC}" "/p:DisableSdkResolverCache=true" "/p:DisableImplicitNuGetFallbackFolder=true")
DOTNET_TEST_ARGS=("test" "$PROJECT" "-c" "Release" "--no-build" "--no-restore" "-m:1" "/p:UseSharedCompilation=false" "--filter" "Phase22_Fixture_Matches_Golden" "--logger" "trx" "--results-directory" "${ROOT_DIR}/TestResults/phase22-smoke" "/p:RestoreSources=${RESTORE_SRC}" "/p:DisableSdkResolverCache=true" "/p:DisableImplicitNuGetFallbackFolder=true")
echo "[phase22-smoke] restoring from ${RESTORE_SRC} ..."
dotnet "${DOTNET_RESTORE_ARGS[@]}"
echo "[phase22-smoke] building smoke project ..."
dotnet "${DOTNET_BUILD_ARGS[@]}"
echo "[phase22-smoke] running test ..."
dotnet "${DOTNET_TEST_ARGS[@]}"

View File

@@ -1,22 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# DEVOPS-SCAN-90-004: run determinism harness/tests and collect report
ROOT="$(git rev-parse --show-toplevel)"
OUT="${ROOT}/out/scanner-determinism"
mkdir -p "$OUT"
PROJECT="src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Tests/StellaOps.Scanner.Analyzers.Lang.Tests.csproj"
echo "[determinism] running dotnet test (filter=Determinism)"
dotnet test "$PROJECT" --no-build --logger "trx;LogFileName=determinism.trx" --filter Determinism
find "$(dirname "$PROJECT")" -name "*.trx" -print -exec cp {} "$OUT/" \;
echo "[determinism] summarizing"
printf "project=%s\n" "$PROJECT" > "$OUT/summary.txt"
printf "timestamp=%s\n" "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" >> "$OUT/summary.txt"
tar -C "$OUT" -czf "$OUT/determinism-artifacts.tgz" .
echo "[determinism] artifacts at $OUT"

View File

@@ -1,34 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Generates an offline-friendly code-signing certificate (self-signed) for NuGet package signing.
OUT_DIR=${OUT_DIR:-out/sdk-signing}
SUBJECT=${SUBJECT:-"/CN=StellaOps SDK Signing/O=StellaOps"}
DAYS=${DAYS:-3650}
PFX_NAME=${PFX_NAME:-sdk-signing.pfx}
PASSWORD=${PASSWORD:-""}
mkdir -p "$OUT_DIR"
PRIV="$OUT_DIR/sdk-signing.key"
CRT="$OUT_DIR/sdk-signing.crt"
PFX="$OUT_DIR/$PFX_NAME"
openssl req -x509 -newkey rsa:4096 -sha256 -days "$DAYS" \
-nodes -subj "$SUBJECT" -keyout "$PRIV" -out "$CRT"
openssl pkcs12 -export -out "$PFX" -inkey "$PRIV" -in "$CRT" -passout pass:"$PASSWORD"
BASE64_PFX=$(base64 < "$PFX" | tr -d '\n')
cat > "$OUT_DIR/README.txt" <<EOF
PFX file: $PFX
Password: ${PASSWORD:-<empty>}
Base64:
$BASE64_PFX
Secrets to set:
SDK_SIGNING_CERT_B64=$BASE64_PFX
SDK_SIGNING_CERT_PASSWORD=$PASSWORD
EOF
printf "Generated signing cert -> %s (base64 in README)\n" "$PFX"

View File

@@ -1,36 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Publishes signed NuGet packages to a configured feed (file or HTTP).
PACKAGES_GLOB=${PACKAGES_GLOB:-"out/sdk/*.nupkg"}
SOURCE=${SDK_NUGET_SOURCE:-".nuget/packages/packages"}
API_KEY=${SDK_NUGET_API_KEY:-""}
mapfile -t packages < <(ls $PACKAGES_GLOB 2>/dev/null || true)
if [[ ${#packages[@]} -eq 0 ]]; then
echo "No packages found under glob '$PACKAGES_GLOB'; nothing to publish."
exit 0
fi
publish_file() {
local pkg="$1"
mkdir -p "$SOURCE"
cp "$pkg" "$SOURCE"/
}
publish_http() {
local pkg="$1"
dotnet nuget push "$pkg" --source "$SOURCE" --api-key "$API_KEY" --skip-duplicate
}
if [[ "$SOURCE" =~ ^https?:// ]]; then
if [[ -z "$API_KEY" ]]; then
echo "SDK_NUGET_API_KEY is required for HTTP source $SOURCE" >&2
exit 1
fi
for pkg in "${packages[@]}"; do publish_http "$pkg"; done
else
for pkg in "${packages[@]}"; do publish_file "$pkg"; done
fi
echo "Published ${#packages[@]} package(s) to $SOURCE"

View File

@@ -1,43 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Signs NuGet packages using a PKCS#12 (PFX) certificate.
PACKAGES_GLOB=${PACKAGES_GLOB:-"out/sdk/*.nupkg"}
OUT_DIR=${OUT_DIR:-out/sdk}
TIMESTAMP_URL=${TIMESTAMP_URL:-""} # optional; keep empty for offline
PFX_PATH=${PFX_PATH:-""}
PFX_B64=${SDK_SIGNING_CERT_B64:-}
PFX_PASSWORD=${SDK_SIGNING_CERT_PASSWORD:-}
mkdir -p "$OUT_DIR"
if [[ -z "$PFX_PATH" ]]; then
if [[ -z "$PFX_B64" ]]; then
echo "No signing cert provided (SDK_SIGNING_CERT_B64/PFX_PATH); skipping signing."
exit 0
fi
PFX_PATH="$OUT_DIR/sdk-signing.pfx"
printf "%s" "$PFX_B64" | base64 -d > "$PFX_PATH"
fi
mapfile -t packages < <(ls $PACKAGES_GLOB 2>/dev/null || true)
if [[ ${#packages[@]} -eq 0 ]]; then
echo "No packages found under glob '$PACKAGES_GLOB'; nothing to sign."
exit 0
fi
for pkg in "${packages[@]}"; do
echo "Signing $pkg"
ts_args=()
if [[ -n "$TIMESTAMP_URL" ]]; then
ts_args=(--timestamp-url "$TIMESTAMP_URL")
fi
dotnet nuget sign "$pkg" \
--certificate-path "$PFX_PATH" \
--certificate-password "$PFX_PASSWORD" \
--hash-algorithm sha256 \
"${ts_args[@]}"
done
echo "Signed ${#packages[@]} package(s)."

View File

@@ -1,15 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Build Signals image and export a tarball for offline use.
ROOT=${ROOT:-$(git rev-parse --show-toplevel)}
OUT_DIR=${OUT_DIR:-$ROOT/out/signals}
IMAGE_TAG=${IMAGE_TAG:-stellaops/signals:local}
DOCKERFILE=${DOCKERFILE:-ops/devops/signals/Dockerfile}
mkdir -p "$OUT_DIR"
docker build -f "$DOCKERFILE" -t "$IMAGE_TAG" "$ROOT"
docker save "$IMAGE_TAG" -o "$OUT_DIR/signals-image.tar"
printf "Image %s saved to %s/signals-image.tar\n" "$IMAGE_TAG" "$OUT_DIR"

View File

@@ -1,13 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Lightweight smoke for SIGNALS-24-004/005: run reachability scoring + cache/event tests.
# Uses existing unit tests as fixtures; intended for CI and local preflight.
ROOT="${1:-src/Signals/__Tests/StellaOps.Signals.Tests/StellaOps.Signals.Tests.csproj}"
FILTER="${FILTER:-ReachabilityScoringServiceTests|RuntimeFactsIngestionServiceTests.IngestAsync_AggregatesHits_AndRecomputesReachability|InMemoryEventsPublisherTests}"
echo "[info] Running reachability smoke against ${ROOT}"
dotnet test "${ROOT}" -c Release --no-build --filter "${FILTER}" --logger "console;verbosity=normal"
echo "[info] Reachability smoke succeeded."

View File

@@ -1,7 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Run the OTLP span sink for Excititor traces (DEVOPS-SPANSINK-31-003).
ROOT=${ROOT:-$(git rev-parse --show-toplevel)}
COMPOSE_FILE=${COMPOSE_FILE:-$ROOT/ops/devops/signals/docker-compose.spansink.yml}
export COMPOSE_FILE
exec docker compose up -d

View File

@@ -1,16 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# DEVOPS-SYMS-90-005: Deploy Symbols.Server (Helm) with MinIO/Mongo dependencies.
SYMS_CHART=${SYMS_CHART:-"charts/symbols-server"}
NAMESPACE=${NAMESPACE:-"symbols"}
VALUES=${VALUES:-"ops/devops/symbols/values.yaml"}
echo "[symbols] creating namespace $NAMESPACE"
kubectl create namespace "$NAMESPACE" --dry-run=client -o yaml | kubectl apply -f -
echo "[symbols] installing chart $SYMS_CHART"
helm upgrade --install symbols-server "$SYMS_CHART" -n "$NAMESPACE" -f "$VALUES"
echo "[symbols] deployment triggered"

View File

@@ -1,61 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
ROOT=$(cd "$SCRIPT_DIR/../.." && pwd)
COMPOSE_FILE="$ROOT/ops/devops/symbols/docker-compose.symbols.yaml"
PROJECT_NAME=${PROJECT_NAME:-symbolsci}
ARTIFACT_DIR=${ARTIFACT_DIR:-"$ROOT/out/symbols-ci"}
STAMP=$(date -u +"%Y%m%dT%H%M%SZ")
RUN_DIR="$ARTIFACT_DIR/$STAMP"
mkdir -p "$RUN_DIR"
log() { printf '[%s] %s\n' "$(date -u +%H:%M:%S)" "$*"; }
cleanup() {
local code=$?
log "Collecting compose logs"
docker compose -f "$COMPOSE_FILE" -p "$PROJECT_NAME" logs >"$RUN_DIR/compose.log" 2>&1 || true
log "Tearing down stack"
docker compose -f "$COMPOSE_FILE" -p "$PROJECT_NAME" down -v >/dev/null 2>&1 || true
log "Artifacts in $RUN_DIR"
exit $code
}
trap cleanup EXIT
log "Pulling images"
docker compose -f "$COMPOSE_FILE" -p "$PROJECT_NAME" pull --ignore-pull-failures >/dev/null 2>&1 || true
log "Starting services"
docker compose -f "$COMPOSE_FILE" -p "$PROJECT_NAME" up -d --remove-orphans
wait_http() {
local url=$1; local name=$2; local tries=${3:-30}
for i in $(seq 1 "$tries"); do
if curl -fsS --max-time 5 "$url" >/dev/null 2>&1; then
log "$name ready"
return 0
fi
sleep 2
done
log "$name not ready"
return 1
}
wait_http "http://localhost:9000/minio/health/ready" "MinIO" 25
wait_http "http://localhost:8080/healthz" "Symbols.Server" 25
log "Seeding bucket"
docker run --rm --network symbols-ci minio/mc:RELEASE.2024-08-17T00-00-00Z \
alias set symbols http://minio:9000 minio minio123 >/dev/null
docker run --rm --network symbols-ci minio/mc:RELEASE.2024-08-17T00-00-00Z \
mb -p symbols/symbols >/dev/null
log "Capture readiness endpoint"
curl -fsS http://localhost:8080/healthz -o "$RUN_DIR/healthz.json"
log "Smoke list request"
curl -fsS http://localhost:8080/ -o "$RUN_DIR/root.html" || true
echo "status=pass" > "$RUN_DIR/summary.txt"

View File

@@ -1,45 +0,0 @@
# scripts/test-lane.ps1
# Runs tests filtered by lane (Unit, Contract, Integration, Security, Performance, Live)
#
# Usage:
# .\scripts\test-lane.ps1 Unit
# .\scripts\test-lane.ps1 Integration -ResultsDirectory .\test-results
# .\scripts\test-lane.ps1 Security -Logger "trx;LogFileName=security-tests.trx"
[CmdletBinding()]
param(
[Parameter(Mandatory=$true, Position=0)]
[ValidateSet('Unit', 'Contract', 'Integration', 'Security', 'Performance', 'Live')]
[string]$Lane,
[Parameter(ValueFromRemainingArguments=$true)]
[string[]]$DotNetTestArgs
)
$ErrorActionPreference = 'Stop'
Write-Host "Running tests for lane: $Lane" -ForegroundColor Cyan
# Build trait filter for xUnit
# Format: --filter "Lane=$Lane"
$filterArg = "--filter", "Lane=$Lane"
# Build full dotnet test command
$testArgs = @(
'test'
$filterArg
'--configuration', 'Release'
'--no-build'
) + $DotNetTestArgs
Write-Host "Executing: dotnet $($testArgs -join ' ')" -ForegroundColor Gray
# Execute dotnet test
& dotnet $testArgs
if ($LASTEXITCODE -ne 0) {
Write-Error "Tests failed with exit code $LASTEXITCODE"
exit $LASTEXITCODE
}
Write-Host "Lane '$Lane' tests completed successfully" -ForegroundColor Green

View File

@@ -1,35 +0,0 @@
#!/usr/bin/env bash
# scripts/test-lane.sh
# Runs tests filtered by lane (Unit, Contract, Integration, Security, Performance, Live)
#
# Usage:
# ./scripts/test-lane.sh Unit
# ./scripts/test-lane.sh Integration --results-directory ./test-results
# ./scripts/test-lane.sh Security --logger "trx;LogFileName=security-tests.trx"
set -euo pipefail
LANE="${1:-Unit}"
shift || true
# Validate lane
case "$LANE" in
Unit|Contract|Integration|Security|Performance|Live)
;;
*)
echo "Error: Invalid lane '$LANE'. Must be one of: Unit, Contract, Integration, Security, Performance, Live"
exit 1
;;
esac
echo "Running tests for lane: $LANE"
# Build trait filter for xUnit
# Format: --filter "Lane=$LANE"
dotnet test \
--filter "Lane=$LANE" \
--configuration Release \
--no-build \
"$@"
echo "Lane '$LANE' tests completed"

View File

@@ -1,14 +0,0 @@
$ErrorActionPreference = "Stop"
# Runs PolicyValidationCliTests using the minimal policy-only solution with graph build disabled.
$Root = Split-Path -Parent (Split-Path -Parent $PSCommandPath)
Set-Location $Root
$env:DOTNET_DISABLE_BUILTIN_GRAPH = "1"
$solution = "src/Policy/StellaOps.Policy.only.sln"
dotnet restore $solution -v minimal
dotnet build src/Policy/__Tests/StellaOps.Policy.Tests/StellaOps.Policy.Tests.csproj -c Release --no-restore /p:BuildProjectReferences=false
dotnet test $solution -c Release --no-build --filter FullyQualifiedName~PolicyValidationCliTests

Some files were not shown because too many files have changed in this diff Show More