Add Authority Advisory AI and API Lifecycle Configuration
- Introduced AuthorityAdvisoryAiOptions and related classes for managing advisory AI configurations, including remote inference options and tenant-specific settings. - Added AuthorityApiLifecycleOptions to control API lifecycle settings, including legacy OAuth endpoint configurations. - Implemented validation and normalization methods for both advisory AI and API lifecycle options to ensure proper configuration. - Created AuthorityNotificationsOptions and its related classes for managing notification settings, including ack tokens, webhooks, and escalation options. - Developed IssuerDirectoryClient and related models for interacting with the issuer directory service, including caching mechanisms and HTTP client configurations. - Added support for dependency injection through ServiceCollectionExtensions for the Issuer Directory Client. - Updated project file to include necessary package references for the new Issuer Directory Client library.
This commit is contained in:
		@@ -15,11 +15,12 @@ Dependencies:
 | 
			
		||||
 | 
			
		||||
from __future__ import annotations
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import json
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
import shutil
 | 
			
		||||
import argparse
 | 
			
		||||
import json
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
import shutil
 | 
			
		||||
import subprocess
 | 
			
		||||
from dataclasses import dataclass
 | 
			
		||||
from datetime import datetime, timezone
 | 
			
		||||
from pathlib import Path
 | 
			
		||||
@@ -211,25 +212,44 @@ def write_index(entries: List[DocEntry], output_root: Path) -> None:
 | 
			
		||||
    logging.info("Wrote HTML index with %d entries", len(entries))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def parse_args() -> argparse.Namespace:
 | 
			
		||||
    parser = argparse.ArgumentParser(description="Render documentation bundle")
 | 
			
		||||
def parse_args() -> argparse.Namespace:
 | 
			
		||||
    parser = argparse.ArgumentParser(description="Render documentation bundle")
 | 
			
		||||
    parser.add_argument("--source", default="docs", type=Path, help="Directory containing Markdown sources")
 | 
			
		||||
    parser.add_argument("--output", default=Path("build/docs-site"), type=Path, help="Directory for rendered output")
 | 
			
		||||
    parser.add_argument("--clean", action="store_true", help="Remove the output directory before rendering")
 | 
			
		||||
    return parser.parse_args()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main() -> int:
 | 
			
		||||
    logging.basicConfig(level=logging.INFO, format="%(levelname)s %(message)s")
 | 
			
		||||
    args = parse_args()
 | 
			
		||||
 | 
			
		||||
    source_root: Path = args.source.resolve()
 | 
			
		||||
    output_root: Path = args.output.resolve()
 | 
			
		||||
 | 
			
		||||
    if not source_root.exists():
 | 
			
		||||
        logging.error("Source directory %s does not exist", source_root)
 | 
			
		||||
        return os.EX_NOINPUT
 | 
			
		||||
 | 
			
		||||
    return parser.parse_args()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def run_attestor_validation(repo_root: Path) -> None:
 | 
			
		||||
    """Execute the attestor schema + SDK validation prior to rendering docs."""
 | 
			
		||||
    logging.info("Running attestor payload validation (npm run docs:attestor:validate)")
 | 
			
		||||
    result = subprocess.run(
 | 
			
		||||
        ["npm", "run", "docs:attestor:validate"],
 | 
			
		||||
        cwd=repo_root,
 | 
			
		||||
        check=False,
 | 
			
		||||
    )
 | 
			
		||||
    if result.returncode != 0:
 | 
			
		||||
        raise RuntimeError("Attestor payload validation failed; aborting docs render.")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main() -> int:
 | 
			
		||||
    logging.basicConfig(level=logging.INFO, format="%(levelname)s %(message)s")
 | 
			
		||||
    args = parse_args()
 | 
			
		||||
 | 
			
		||||
    source_root: Path = args.source.resolve()
 | 
			
		||||
    output_root: Path = args.output.resolve()
 | 
			
		||||
    repo_root = Path(__file__).resolve().parents[1]
 | 
			
		||||
 | 
			
		||||
    if not source_root.exists():
 | 
			
		||||
        logging.error("Source directory %s does not exist", source_root)
 | 
			
		||||
        return os.EX_NOINPUT
 | 
			
		||||
 | 
			
		||||
    try:
 | 
			
		||||
        run_attestor_validation(repo_root)
 | 
			
		||||
    except RuntimeError as exc:
 | 
			
		||||
        logging.error("%s", exc)
 | 
			
		||||
        return os.EX_DATAERR
 | 
			
		||||
 | 
			
		||||
    if args.clean and output_root.exists():
 | 
			
		||||
        logging.info("Cleaning existing output directory %s", output_root)
 | 
			
		||||
        shutil.rmtree(output_root)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										81
									
								
								scripts/run-attestor-ttl-validation.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										81
									
								
								scripts/run-attestor-ttl-validation.sh
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,81 @@
 | 
			
		||||
#!/usr/bin/env bash
 | 
			
		||||
# Runs live TTL validation for Attestor dedupe stores against local MongoDB/Redis.
 | 
			
		||||
 | 
			
		||||
set -euo pipefail
 | 
			
		||||
 | 
			
		||||
if ! command -v docker >/dev/null 2>&1; then
 | 
			
		||||
  echo "docker CLI is required. Install Docker Desktop or ensure docker is on PATH." >&2
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if ! docker compose version >/dev/null 2>&1; then
 | 
			
		||||
  if command -v docker-compose >/dev/null 2>&1; then
 | 
			
		||||
    compose_cmd="docker-compose"
 | 
			
		||||
  else
 | 
			
		||||
    echo "docker compose plugin (or docker-compose) is required." >&2
 | 
			
		||||
    exit 1
 | 
			
		||||
  fi
 | 
			
		||||
else
 | 
			
		||||
  compose_cmd="docker compose"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
 | 
			
		||||
compose_file="$(mktemp -t attestor-ttl-compose-XXXXXX.yaml)"
 | 
			
		||||
 | 
			
		||||
cleanup() {
 | 
			
		||||
  $compose_cmd -f "$compose_file" down -v >/dev/null 2>&1 || true
 | 
			
		||||
  rm -f "$compose_file"
 | 
			
		||||
}
 | 
			
		||||
trap cleanup EXIT
 | 
			
		||||
 | 
			
		||||
cat >"$compose_file" <<'YAML'
 | 
			
		||||
services:
 | 
			
		||||
  mongo:
 | 
			
		||||
    image: mongo:7.0
 | 
			
		||||
    ports:
 | 
			
		||||
      - "27017:27017"
 | 
			
		||||
    healthcheck:
 | 
			
		||||
      test: ["CMD", "mongosh", "--quiet", "localhost/test", "--eval", "db.runCommand({ ping: 1 })"]
 | 
			
		||||
      interval: 5s
 | 
			
		||||
      timeout: 3s
 | 
			
		||||
      retries: 20
 | 
			
		||||
  redis:
 | 
			
		||||
    image: redis:7.2
 | 
			
		||||
    command: ["redis-server", "--save", "", "--appendonly", "no"]
 | 
			
		||||
    ports:
 | 
			
		||||
      - "6379:6379"
 | 
			
		||||
    healthcheck:
 | 
			
		||||
      test: ["CMD", "redis-cli", "ping"]
 | 
			
		||||
      interval: 5s
 | 
			
		||||
      timeout: 3s
 | 
			
		||||
      retries: 20
 | 
			
		||||
YAML
 | 
			
		||||
 | 
			
		||||
echo "Starting MongoDB and Redis containers..."
 | 
			
		||||
$compose_cmd -f "$compose_file" up -d
 | 
			
		||||
 | 
			
		||||
wait_for_port() {
 | 
			
		||||
  local host=$1
 | 
			
		||||
  local port=$2
 | 
			
		||||
  local name=$3
 | 
			
		||||
  for attempt in {1..60}; do
 | 
			
		||||
    if (echo > /dev/tcp/"$host"/"$port") >/dev/null 2>&1; then
 | 
			
		||||
      echo "$name is accepting connections."
 | 
			
		||||
      return 0
 | 
			
		||||
    fi
 | 
			
		||||
    sleep 1
 | 
			
		||||
  done
 | 
			
		||||
  echo "Timeout waiting for $name on $host:$port" >&2
 | 
			
		||||
  return 1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
wait_for_port 127.0.0.1 27017 "MongoDB"
 | 
			
		||||
wait_for_port 127.0.0.1 6379 "Redis"
 | 
			
		||||
 | 
			
		||||
export ATTESTOR_LIVE_MONGO_URI="${ATTESTOR_LIVE_MONGO_URI:-mongodb://127.0.0.1:27017}"
 | 
			
		||||
export ATTESTOR_LIVE_REDIS_URI="${ATTESTOR_LIVE_REDIS_URI:-127.0.0.1:6379}"
 | 
			
		||||
 | 
			
		||||
echo "Running live TTL validation tests..."
 | 
			
		||||
dotnet test "$repo_root/src/Attestor/StellaOps.Attestor.sln" --no-build --filter "Category=LiveTTL" "$@"
 | 
			
		||||
 | 
			
		||||
echo "Live TTL validation complete. Shutting down containers."
 | 
			
		||||
							
								
								
									
										145
									
								
								scripts/validate-attestation-schemas.mjs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										145
									
								
								scripts/validate-attestation-schemas.mjs
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,145 @@
 | 
			
		||||
import { readFileSync } from 'node:fs';
 | 
			
		||||
import { fileURLToPath } from 'node:url';
 | 
			
		||||
import { dirname, join } from 'node:path';
 | 
			
		||||
import { spawnSync } from 'node:child_process';
 | 
			
		||||
import Ajv2020 from 'ajv/dist/2020.js';
 | 
			
		||||
import addFormats from 'ajv-formats';
 | 
			
		||||
 | 
			
		||||
const __filename = fileURLToPath(import.meta.url);
 | 
			
		||||
const __dirname = dirname(__filename);
 | 
			
		||||
const repoRoot = join(__dirname, '..');
 | 
			
		||||
const moduleRoot = join(repoRoot, 'src', 'Attestor', 'StellaOps.Attestor.Types');
 | 
			
		||||
const schemasDir = join(moduleRoot, 'schemas');
 | 
			
		||||
const fixturesDir = join(moduleRoot, 'fixtures', 'v1');
 | 
			
		||||
const tsDir = join(moduleRoot, 'generated', 'ts');
 | 
			
		||||
const goDir = join(moduleRoot, 'generated', 'go');
 | 
			
		||||
 | 
			
		||||
const schemaFiles = [
 | 
			
		||||
  { schema: 'stellaops-build-provenance.v1.schema.json', sample: 'build-provenance.sample.json' },
 | 
			
		||||
  { schema: 'stellaops-sbom-attestation.v1.schema.json', sample: 'sbom-attestation.sample.json' },
 | 
			
		||||
  { schema: 'stellaops-scan-results.v1.schema.json', sample: 'scan-results.sample.json' },
 | 
			
		||||
  { schema: 'stellaops-vex-attestation.v1.schema.json', sample: 'vex-attestation.sample.json' },
 | 
			
		||||
  { schema: 'stellaops-policy-evaluation.v1.schema.json', sample: 'policy-evaluation.sample.json' },
 | 
			
		||||
  { schema: 'stellaops-risk-profile.v1.schema.json', sample: 'risk-profile-evidence.sample.json' },
 | 
			
		||||
  { schema: 'stellaops-custom-evidence.v1.schema.json', sample: 'custom-evidence.sample.json' }
 | 
			
		||||
];
 | 
			
		||||
 | 
			
		||||
const commonSchemaPath = join(schemasDir, 'attestation-common.v1.schema.json');
 | 
			
		||||
const ajv = new Ajv2020({ strict: false, allErrors: true });
 | 
			
		||||
addFormats(ajv);
 | 
			
		||||
 | 
			
		||||
const commonSchema = JSON.parse(readFileSync(commonSchemaPath, 'utf8'));
 | 
			
		||||
const commonId = commonSchema.$id || 'https://schemas.stella-ops.org/attestations/common/v1';
 | 
			
		||||
ajv.addSchema(commonSchema, commonId);
 | 
			
		||||
 | 
			
		||||
let failed = false;
 | 
			
		||||
 | 
			
		||||
function stableStringify(value) {
 | 
			
		||||
  if (Array.isArray(value)) {
 | 
			
		||||
    return '[' + value.map(stableStringify).join(',') + ']';
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if (value && typeof value === 'object') {
 | 
			
		||||
    const entries = Object.keys(value)
 | 
			
		||||
      .sort()
 | 
			
		||||
      .map((key) => `${JSON.stringify(key)}:${stableStringify(value[key])}`);
 | 
			
		||||
    return '{' + entries.join(',') + '}';
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return JSON.stringify(value);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
function runCommand(command, args, options) {
 | 
			
		||||
  const result = spawnSync(command, args, { stdio: 'inherit', ...options });
 | 
			
		||||
  if (result.error) {
 | 
			
		||||
    if (result.error.code === 'ENOENT') {
 | 
			
		||||
      throw new Error(`Command not found: ${command}`);
 | 
			
		||||
    }
 | 
			
		||||
    throw result.error;
 | 
			
		||||
  }
 | 
			
		||||
  if (result.status !== 0) {
 | 
			
		||||
    throw new Error(`Command failed: ${command} ${args.join(' ')}`);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
function commandExists(command) {
 | 
			
		||||
  const result = spawnSync(command, ['--version'], {
 | 
			
		||||
    stdio: 'ignore',
 | 
			
		||||
    env: {
 | 
			
		||||
      ...process.env,
 | 
			
		||||
      PATH: `/usr/local/go/bin:${process.env.PATH ?? ''}`,
 | 
			
		||||
    },
 | 
			
		||||
  });
 | 
			
		||||
  if (result.error && result.error.code === 'ENOENT') {
 | 
			
		||||
    return false;
 | 
			
		||||
  }
 | 
			
		||||
  return (result.status ?? 0) === 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
for (const mapping of schemaFiles) {
 | 
			
		||||
  const schemaFile = mapping.schema;
 | 
			
		||||
  const sample = mapping.sample;
 | 
			
		||||
  const schemaPath = join(schemasDir, schemaFile);
 | 
			
		||||
  const samplePath = join(fixturesDir, sample);
 | 
			
		||||
 | 
			
		||||
  const schemaJson = JSON.parse(readFileSync(schemaPath, 'utf8'));
 | 
			
		||||
  const sampleJson = JSON.parse(readFileSync(samplePath, 'utf8'));
 | 
			
		||||
 | 
			
		||||
  const schemaId = schemaJson.$id || ('https://stella-ops.org/schemas/attestor/' + schemaFile);
 | 
			
		||||
  ajv.removeSchema(schemaId);
 | 
			
		||||
  ajv.addSchema(schemaJson, schemaId);
 | 
			
		||||
 | 
			
		||||
  const alias = new URL('attestation-common.v1.schema.json', new URL(schemaId));
 | 
			
		||||
  if (!ajv.getSchema(alias.href)) {
 | 
			
		||||
    ajv.addSchema(commonSchema, alias.href);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  const validate = ajv.getSchema(schemaId) || ajv.compile(schemaJson);
 | 
			
		||||
  const valid = validate(sampleJson);
 | 
			
		||||
 | 
			
		||||
  if (!valid) {
 | 
			
		||||
    failed = true;
 | 
			
		||||
    console.error('✖ ' + schemaFile + ' failed for fixture ' + sample);
 | 
			
		||||
    console.error(validate.errors || []);
 | 
			
		||||
  } else {
 | 
			
		||||
    const canonical = stableStringify(sampleJson);
 | 
			
		||||
    const digest = Buffer.from(canonical, 'utf8').toString('base64');
 | 
			
		||||
    console.log('✔ ' + schemaFile + ' ✓ ' + sample + ' (canonical b64: ' + digest.slice(0, 16) + '… )');
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
if (failed) {
 | 
			
		||||
  console.error('One or more schema validations failed.');
 | 
			
		||||
  process.exit(1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
try {
 | 
			
		||||
  console.log('\n▶ Installing TypeScript dependencies...');
 | 
			
		||||
  runCommand('npm', ['install', '--no-fund', '--no-audit'], { cwd: tsDir });
 | 
			
		||||
 | 
			
		||||
  console.log('▶ Running TypeScript build/tests...');
 | 
			
		||||
  runCommand('npm', ['run', 'test'], { cwd: tsDir });
 | 
			
		||||
 | 
			
		||||
  const goCandidates = [
 | 
			
		||||
    'go',
 | 
			
		||||
    '/usr/local/go/bin/go',
 | 
			
		||||
    process.env.GO || '',
 | 
			
		||||
  ].filter(Boolean);
 | 
			
		||||
  const goCommand = goCandidates.find((candidate) => commandExists(candidate));
 | 
			
		||||
 | 
			
		||||
  if (goCommand) {
 | 
			
		||||
    console.log('▶ Running Go tests...');
 | 
			
		||||
    const goEnv = {
 | 
			
		||||
      ...process.env,
 | 
			
		||||
      PATH: `/usr/local/go/bin:${process.env.PATH ?? ''}`,
 | 
			
		||||
    };
 | 
			
		||||
    runCommand(goCommand, ['test', './...'], { cwd: goDir, env: goEnv });
 | 
			
		||||
  } else {
 | 
			
		||||
    console.warn('⚠️  Go toolchain not found; skipping Go SDK tests.');
 | 
			
		||||
  }
 | 
			
		||||
} catch (err) {
 | 
			
		||||
  console.error(err.message);
 | 
			
		||||
  process.exit(1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
console.log('All attestation schemas and SDKs validated successfully.');
 | 
			
		||||
		Reference in New Issue
	
	Block a user