Refactor code structure and optimize performance across multiple modules

This commit is contained in:
StellaOps Bot
2025-12-26 20:03:22 +02:00
parent c786faae84
commit b4fc66feb6
3353 changed files with 88254 additions and 1590657 deletions

View File

@@ -0,0 +1,130 @@
#!/usr/bin/env python3
"""
Adds StellaOps.TestKit ProjectReference to test projects that use TestCategories
but are missing the reference.
"""
import os
import re
import sys
from pathlib import Path
def get_relative_path_to_testkit(csproj_path: Path) -> str:
"""Calculate relative path from csproj to TestKit project."""
# TestKit is at src/__Libraries/StellaOps.TestKit/StellaOps.TestKit.csproj
csproj_dir = csproj_path.parent
src_root = None
# Walk up to find src directory
current = csproj_dir
depth = 0
while current.name != 'src' and depth < 10:
current = current.parent
depth += 1
if current.name == 'src':
src_root = current
else:
return None
# Calculate relative path from csproj to src/__Libraries/StellaOps.TestKit
rel_path = os.path.relpath(
src_root / '__Libraries' / 'StellaOps.TestKit' / 'StellaOps.TestKit.csproj',
csproj_dir
)
# Normalize to forward slashes for XML
return rel_path.replace('\\', '/')
def project_uses_testkit(csproj_dir: Path) -> bool:
"""Check if any .cs file in the project directory uses TestCategories."""
for cs_file in csproj_dir.rglob('*.cs'):
if '/obj/' in str(cs_file) or '/bin/' in str(cs_file):
continue
try:
content = cs_file.read_text(encoding='utf-8-sig', errors='ignore')
if 'TestCategories.' in content:
return True
except:
pass
return False
def project_has_testkit_reference(content: str) -> bool:
"""Check if csproj already references TestKit."""
return 'StellaOps.TestKit' in content
def add_testkit_reference(csproj_path: Path, dry_run: bool = False) -> bool:
"""Add TestKit reference to csproj if needed."""
try:
content = csproj_path.read_text(encoding='utf-8')
except Exception as e:
print(f" Error reading {csproj_path}: {e}", file=sys.stderr)
return False
if project_has_testkit_reference(content):
return False
if not project_uses_testkit(csproj_path.parent):
return False
rel_path = get_relative_path_to_testkit(csproj_path)
if not rel_path:
print(f" Could not determine path to TestKit from {csproj_path}", file=sys.stderr)
return False
# Find a good place to insert the reference - look for existing ProjectReference
if '<ProjectReference' in content:
# Insert before the last </ItemGroup> that contains ProjectReference
pattern = r'( <ProjectReference [^>]+/>\s*\n)( </ItemGroup>)'
replacement = f'\\1 <ProjectReference Include="{rel_path}" />\n\\2'
fixed = re.sub(pattern, replacement, content, count=1)
else:
# No ProjectReference, add a new ItemGroup before </Project>
pattern = r'(</Project>)'
new_item_group = f''' <ItemGroup>
<ProjectReference Include="{rel_path}" />
</ItemGroup>
\\1'''
fixed = re.sub(pattern, new_item_group, content)
if fixed == content:
print(f" Could not find insertion point in {csproj_path}", file=sys.stderr)
return False
if not dry_run:
csproj_path.write_text(fixed, encoding='utf-8')
return True
def main():
import argparse
parser = argparse.ArgumentParser(description='Add TestKit reference to test projects')
parser.add_argument('--path', default='src', help='Path to scan')
parser.add_argument('--dry-run', action='store_true', help='Show what would be fixed')
args = parser.parse_args()
root = Path(args.path)
fixed_count = 0
# Find all test project files
for csproj in root.rglob('*.Tests.csproj'):
if add_testkit_reference(csproj, dry_run=args.dry_run):
print(f"{'Would add' if args.dry_run else 'Added'} TestKit reference to: {csproj}")
fixed_count += 1
# Also check *UnitTests, *SmokeTests, etc.
for pattern in ['*UnitTests.csproj', '*IntegrationTests.csproj', '*SmokeTests.csproj', '*FixtureTests.csproj']:
for csproj in root.rglob(pattern):
if add_testkit_reference(csproj, dry_run=args.dry_run):
print(f"{'Would add' if args.dry_run else 'Added'} TestKit reference to: {csproj}")
fixed_count += 1
print(f"\nAdded TestKit reference to: {fixed_count} projects")
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,69 @@
#!/usr/bin/env pwsh
<#
.SYNOPSIS
Fixes misplaced 'using StellaOps.TestKit;' statements in test files.
.DESCRIPTION
The validate-test-traits.py --fix script has a bug that inserts
'using StellaOps.TestKit;' after 'using var' statements inside methods,
causing compilation errors.
This script:
1. Finds all affected .cs files
2. Removes the misplaced 'using StellaOps.TestKit;' lines
3. Ensures 'using StellaOps.TestKit;' exists at the top of the file
#>
param(
[string]$Path = "src",
[switch]$DryRun
)
$ErrorActionPreference = "Stop"
# Pattern to find misplaced using statements (after 'using var' in method body)
$brokenPattern = "(?m)^(\s*using var .+;\s*\r?\n)(using StellaOps\.TestKit;\s*\r?\n)"
# Counter for fixed files
$fixedCount = 0
$checkedCount = 0
# Get all .cs test files
$files = Get-ChildItem -Path $Path -Recurse -Include "*.cs" |
Where-Object { $_.FullName -match "Tests?" }
foreach ($file in $files) {
$checkedCount++
$content = Get-Content -Path $file.FullName -Raw -Encoding UTF8
# Check if file has the broken pattern
if ($content -match $brokenPattern) {
Write-Host "Fixing: $($file.FullName)" -ForegroundColor Yellow
# Remove all misplaced 'using StellaOps.TestKit;' lines
$fixed = $content -replace $brokenPattern, '$1'
# Check if 'using StellaOps.TestKit;' exists at the top of the file (in the using block)
$hasTopUsing = $fixed -match "(?m)^using StellaOps\.TestKit;\s*$"
if (-not $hasTopUsing) {
# Find the last 'using' statement at the top of the file and add after it
$fixed = $fixed -replace "(?m)(^using [^;]+;\s*\r?\n)(?!using)", "`$1using StellaOps.TestKit;`r`n"
}
if (-not $DryRun) {
# Preserve BOM if original file had one
$encoding = [System.Text.UTF8Encoding]::new($true)
[System.IO.File]::WriteAllText($file.FullName, $fixed, $encoding)
}
$fixedCount++
}
}
Write-Host "`nChecked: $checkedCount files" -ForegroundColor Cyan
Write-Host "Fixed: $fixedCount files" -ForegroundColor Green
if ($DryRun) {
Write-Host "`n(Dry run - no files were modified)" -ForegroundColor Magenta
}

View File

@@ -0,0 +1,109 @@
#!/usr/bin/env python3
"""
Fixes misplaced 'using StellaOps.TestKit;' statements in test files.
The validate-test-traits.py --fix script has a bug that inserts
'using StellaOps.TestKit;' after 'using var' statements inside methods,
causing CS1001 compilation errors.
This script:
1. Finds all affected .cs files
2. Removes the misplaced 'using StellaOps.TestKit;' lines (inside methods)
3. Ensures 'using StellaOps.TestKit;' exists at the top of the file
"""
import os
import re
import sys
from pathlib import Path
def fix_file(file_path: Path, dry_run: bool = False) -> bool:
"""Fix a single file by removing misplaced using statements."""
try:
content = file_path.read_text(encoding='utf-8-sig') # Handle BOM
except Exception as e:
print(f" Error reading {file_path}: {e}", file=sys.stderr)
return False
original = content
# Pattern to find 'using var' followed by 'using StellaOps.TestKit;' (bug)
# This matches the broken pattern inside method bodies
broken_pattern = re.compile(
r'(using var [^;]+;\s*\n)(using StellaOps\.TestKit;\s*\n)',
re.MULTILINE
)
# Check if file has the broken pattern
if not broken_pattern.search(content):
return False
# Remove all misplaced 'using StellaOps.TestKit;' lines after 'using var'
fixed = broken_pattern.sub(r'\1', content)
# Check if 'using StellaOps.TestKit;' exists at top of file (before namespace)
namespace_match = re.search(r'^namespace\s+\w+', fixed, re.MULTILINE)
if namespace_match:
top_section = fixed[:namespace_match.start()]
has_top_using = 'using StellaOps.TestKit;' in top_section
if not has_top_using:
# Find the last 'using' statement before namespace and add after it
last_using = None
for match in re.finditer(r'^using [^;]+;\s*$', top_section, re.MULTILINE):
last_using = match
if last_using:
insert_pos = last_using.end()
fixed = fixed[:insert_pos] + '\nusing StellaOps.TestKit;' + fixed[insert_pos:]
if fixed != original:
if not dry_run:
# Preserve UTF-8 BOM if present
encoding = 'utf-8-sig' if content.startswith('\ufeff') else 'utf-8'
file_path.write_text(fixed, encoding=encoding)
return True
return False
def main():
import argparse
parser = argparse.ArgumentParser(description='Fix misplaced using statements')
parser.add_argument('--path', default='src', help='Path to scan')
parser.add_argument('--dry-run', action='store_true', help='Show what would be fixed')
args = parser.parse_args()
root = Path(args.path)
if not root.exists():
print(f"Path not found: {root}", file=sys.stderr)
sys.exit(1)
fixed_count = 0
checked_count = 0
# Find all test .cs files
for file_path in root.rglob('*.cs'):
# Skip non-test files
if '/obj/' in str(file_path) or '/bin/' in str(file_path):
continue
if 'node_modules' in str(file_path):
continue
if 'Test' not in str(file_path):
continue
checked_count += 1
if fix_file(file_path, dry_run=args.dry_run):
print(f"{'Would fix' if args.dry_run else 'Fixed'}: {file_path}")
fixed_count += 1
print(f"\nChecked: {checked_count} files")
print(f"Fixed: {fixed_count} files")
if args.dry_run:
print("\n(Dry run - no files were modified)")
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,82 @@
#!/usr/bin/env python3
"""
Adds 'using StellaOps.TestKit;' to files that use TestCategories but are missing the import.
"""
import re
import sys
from pathlib import Path
def fix_file(file_path: Path, dry_run: bool = False) -> bool:
"""Add using StellaOps.TestKit; to files that need it."""
try:
content = file_path.read_text(encoding='utf-8-sig')
except Exception as e:
print(f" Error reading {file_path}: {e}", file=sys.stderr)
return False
# Check if file uses TestCategories
if 'TestCategories.' not in content:
return False
# Check if 'using StellaOps.TestKit;' exists anywhere in the file
if 'using StellaOps.TestKit;' in content:
return False
# Find the namespace declaration
namespace_match = re.search(r'^namespace\s+[\w.]+', content, re.MULTILINE)
if not namespace_match:
print(f" No namespace found in {file_path}", file=sys.stderr)
return False
# Find the last 'using' statement before the namespace
top_section = content[:namespace_match.start()]
last_using = None
for match in re.finditer(r'^using [^;]+;\s*$', top_section, re.MULTILINE):
last_using = match
if last_using:
insert_pos = last_using.end()
fixed = content[:insert_pos] + '\nusing StellaOps.TestKit;' + content[insert_pos:]
else:
# No using statements, add at the beginning
fixed = 'using StellaOps.TestKit;\n' + content
if not dry_run:
encoding = 'utf-8-sig' if content.startswith('\ufeff') else 'utf-8'
file_path.write_text(fixed, encoding=encoding)
return True
def main():
import argparse
parser = argparse.ArgumentParser(description='Add missing using StellaOps.TestKit statements')
parser.add_argument('--path', default='src', help='Path to scan')
parser.add_argument('--dry-run', action='store_true', help='Show what would be fixed')
args = parser.parse_args()
root = Path(args.path)
fixed_count = 0
checked_count = 0
for file_path in root.rglob('*.cs'):
if '/obj/' in str(file_path) or '/bin/' in str(file_path):
continue
if 'node_modules' in str(file_path):
continue
if 'Test' not in str(file_path):
continue
checked_count += 1
if fix_file(file_path, dry_run=args.dry_run):
print(f"{'Would add' if args.dry_run else 'Added'} using to: {file_path}")
fixed_count += 1
print(f"\nChecked: {checked_count} files")
print(f"Fixed: {fixed_count} files")
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,57 @@
#!/usr/bin/env python3
"""
Fixes missing newline between 'using StellaOps.TestKit;' and 'namespace'.
"""
import re
import sys
from pathlib import Path
def fix_file(file_path: Path, dry_run: bool = False) -> bool:
"""Add newline between using StellaOps.TestKit; and namespace."""
try:
content = file_path.read_text(encoding='utf-8-sig')
except Exception as e:
print(f" Error reading {file_path}: {e}", file=sys.stderr)
return False
# Pattern: using StellaOps.TestKit;namespace
if 'TestKit;namespace' not in content:
return False
# Fix: Add newline between them
fixed = content.replace('TestKit;namespace', 'TestKit;\nnamespace')
if not dry_run:
encoding = 'utf-8-sig' if content.startswith('\ufeff') else 'utf-8'
file_path.write_text(fixed, encoding=encoding)
return True
def main():
import argparse
parser = argparse.ArgumentParser(description='Fix missing newline between using and namespace')
parser.add_argument('--path', default='src', help='Path to scan')
parser.add_argument('--dry-run', action='store_true', help='Show what would be fixed')
args = parser.parse_args()
root = Path(args.path)
fixed_count = 0
for file_path in root.rglob('*.cs'):
if '/obj/' in str(file_path) or '/bin/' in str(file_path):
continue
if 'node_modules' in str(file_path):
continue
if fix_file(file_path, dry_run=args.dry_run):
print(f"{'Would fix' if args.dry_run else 'Fixed'}: {file_path}")
fixed_count += 1
print(f"\nFixed: {fixed_count} files")
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,221 @@
#!/usr/bin/env bash
#
# Initialize StellaOps configuration from sample files
#
# Usage:
# ./devops/scripts/init-config.sh [profile]
#
# Profiles:
# dev - Development environment (default)
# stage - Staging environment
# prod - Production environment
# airgap - Air-gapped deployment
#
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(cd "${SCRIPT_DIR}/../.." && pwd)"
ETC_DIR="${ROOT_DIR}/etc"
PROFILE="${1:-dev}"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log_info() { echo -e "${BLUE}[INFO]${NC} $*"; }
log_ok() { echo -e "${GREEN}[OK]${NC} $*"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
log_error() { echo -e "${RED}[ERROR]${NC} $*"; }
# Validate profile
case "${PROFILE}" in
dev|stage|prod|airgap)
log_info "Initializing configuration for profile: ${PROFILE}"
;;
*)
log_error "Unknown profile: ${PROFILE}"
echo "Valid profiles: dev, stage, prod, airgap"
exit 1
;;
esac
# Create directory structure
create_directories() {
log_info "Creating directory structure..."
local dirs=(
"etc/authority/plugins"
"etc/certificates/trust-roots"
"etc/certificates/signing"
"etc/concelier/sources"
"etc/crypto/profiles/cn"
"etc/crypto/profiles/eu"
"etc/crypto/profiles/kr"
"etc/crypto/profiles/ru"
"etc/crypto/profiles/us-fips"
"etc/env"
"etc/llm-providers"
"etc/notify/templates"
"etc/plugins/notify"
"etc/plugins/scanner/lang"
"etc/plugins/scanner/os"
"etc/policy/packs"
"etc/policy/schemas"
"etc/router"
"etc/scanner"
"etc/scheduler"
"etc/scm-connectors"
"etc/secrets"
"etc/signals"
"etc/vex"
)
for dir in "${dirs[@]}"; do
mkdir -p "${ROOT_DIR}/${dir}"
done
log_ok "Directory structure created"
}
# Copy sample files to active configs
copy_sample_files() {
log_info "Copying sample files..."
local count=0
# Find all .sample files
while IFS= read -r -d '' sample_file; do
# Determine target file (remove .sample extension)
local target_file="${sample_file%.sample}"
# Skip if target already exists
if [[ -f "${target_file}" ]]; then
log_warn "Skipping (exists): ${target_file#${ROOT_DIR}/}"
continue
fi
cp "${sample_file}" "${target_file}"
log_ok "Created: ${target_file#${ROOT_DIR}/}"
((count++))
done < <(find "${ETC_DIR}" -name "*.sample" -type f -print0 2>/dev/null)
log_info "Copied ${count} sample files"
}
# Copy environment-specific profile
copy_env_profile() {
log_info "Setting up environment profile: ${PROFILE}"
local env_sample="${ETC_DIR}/env/${PROFILE}.env.sample"
local env_target="${ROOT_DIR}/.env"
if [[ -f "${env_sample}" ]]; then
if [[ -f "${env_target}" ]]; then
log_warn ".env already exists, not overwriting"
else
cp "${env_sample}" "${env_target}"
log_ok "Created .env from ${PROFILE} profile"
fi
else
log_warn "No environment sample found for profile: ${PROFILE}"
fi
}
# Create .gitignore entries for active configs
update_gitignore() {
log_info "Updating .gitignore..."
local gitignore="${ROOT_DIR}/.gitignore"
local entries=(
"# Active configuration files (not samples)"
"etc/**/*.yaml"
"!etc/**/*.yaml.sample"
"etc/**/*.json"
"!etc/**/*.json.sample"
"etc/**/env"
"!etc/**/env.sample"
"etc/secrets/*"
"!etc/secrets/*.sample"
"!etc/secrets/README.md"
)
# Check if entries already exist
if grep -q "# Active configuration files" "${gitignore}" 2>/dev/null; then
log_warn ".gitignore already contains config entries"
return
fi
echo "" >> "${gitignore}"
for entry in "${entries[@]}"; do
echo "${entry}" >> "${gitignore}"
done
log_ok "Updated .gitignore"
}
# Validate the configuration
validate_config() {
log_info "Validating configuration..."
local errors=0
# Check for required directories
local required_dirs=(
"etc/scanner"
"etc/authority"
"etc/policy"
)
for dir in "${required_dirs[@]}"; do
if [[ ! -d "${ROOT_DIR}/${dir}" ]]; then
log_error "Missing required directory: ${dir}"
((errors++))
fi
done
if [[ ${errors} -gt 0 ]]; then
log_error "Validation failed with ${errors} errors"
exit 1
fi
log_ok "Configuration validated"
}
# Print summary
print_summary() {
echo ""
echo "========================================"
echo " Configuration Initialized"
echo "========================================"
echo ""
echo "Profile: ${PROFILE}"
echo ""
echo "Next steps:"
echo " 1. Review and customize configurations in etc/"
echo " 2. Set sensitive values via environment variables"
echo " 3. For crypto compliance, set STELLAOPS_CRYPTO_PROFILE"
echo ""
echo "Quick start:"
echo " docker compose up -d"
echo ""
echo "Documentation:"
echo " docs/operations/configuration-guide.md"
echo ""
}
# Main
main() {
create_directories
copy_sample_files
copy_env_profile
update_gitignore
validate_config
print_summary
}
main "$@"

View File

@@ -0,0 +1,330 @@
#!/usr/bin/env bash
#
# Migrate legacy configuration structure to consolidated etc/
#
# This script migrates:
# - certificates/ -> etc/certificates/
# - config/ -> etc/crypto/ and etc/env/
# - policies/ -> etc/policy/
# - etc/rootpack/ -> etc/crypto/profiles/
#
# Usage:
# ./devops/scripts/migrate-config.sh [--dry-run]
#
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(cd "${SCRIPT_DIR}/../.." && pwd)"
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $*"; }
log_ok() { echo -e "${GREEN}[OK]${NC} $*"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
log_error() { echo -e "${RED}[ERROR]${NC} $*"; }
log_dry() { echo -e "${YELLOW}[DRY-RUN]${NC} $*"; }
# Execute or log command
run_cmd() {
if [[ "${DRY_RUN}" == true ]]; then
log_dry "$*"
else
"$@"
fi
}
# Create backup
create_backup() {
local backup_file="${ROOT_DIR}/config-backup-$(date +%Y%m%d-%H%M%S).tar.gz"
log_info "Creating backup: ${backup_file}"
if [[ "${DRY_RUN}" == true ]]; then
log_dry "Would create backup of: certificates/ config/ policies/ etc/"
return
fi
local dirs_to_backup=()
[[ -d "${ROOT_DIR}/certificates" ]] && dirs_to_backup+=("certificates")
[[ -d "${ROOT_DIR}/config" ]] && dirs_to_backup+=("config")
[[ -d "${ROOT_DIR}/policies" ]] && dirs_to_backup+=("policies")
[[ -d "${ROOT_DIR}/etc" ]] && dirs_to_backup+=("etc")
if [[ ${#dirs_to_backup[@]} -gt 0 ]]; then
cd "${ROOT_DIR}"
tar -czvf "${backup_file}" "${dirs_to_backup[@]}"
log_ok "Backup created: ${backup_file}"
else
log_warn "No directories to backup"
fi
}
# Create new directory structure
create_directories() {
log_info "Creating new directory structure..."
local dirs=(
"etc/certificates/trust-roots"
"etc/certificates/signing"
"etc/crypto/profiles/cn"
"etc/crypto/profiles/eu"
"etc/crypto/profiles/kr"
"etc/crypto/profiles/ru"
"etc/crypto/profiles/us-fips"
"etc/env"
"etc/policy/packs"
"etc/policy/schemas"
)
for dir in "${dirs[@]}"; do
run_cmd mkdir -p "${ROOT_DIR}/${dir}"
done
log_ok "Directory structure created"
}
# Migrate certificates/
migrate_certificates() {
local src_dir="${ROOT_DIR}/certificates"
if [[ ! -d "${src_dir}" ]]; then
log_info "No certificates/ directory found, skipping"
return
fi
log_info "Migrating certificates/..."
# Trust roots (CA bundles)
for f in "${src_dir}"/*-bundle*.pem "${src_dir}"/*-root*.pem "${src_dir}"/*_bundle*.pem "${src_dir}"/*_root*.pem 2>/dev/null; do
[[ -f "$f" ]] || continue
run_cmd mv "$f" "${ROOT_DIR}/etc/certificates/trust-roots/"
log_ok "Moved: $(basename "$f") -> etc/certificates/trust-roots/"
done
# Signing keys
for f in "${src_dir}"/*-signing-*.pem "${src_dir}"/*_signing_*.pem 2>/dev/null; do
[[ -f "$f" ]] || continue
run_cmd mv "$f" "${ROOT_DIR}/etc/certificates/signing/"
log_ok "Moved: $(basename "$f") -> etc/certificates/signing/"
done
# Move remaining .pem and .cer files to trust-roots
for f in "${src_dir}"/*.pem "${src_dir}"/*.cer 2>/dev/null; do
[[ -f "$f" ]] || continue
run_cmd mv "$f" "${ROOT_DIR}/etc/certificates/trust-roots/"
log_ok "Moved: $(basename "$f") -> etc/certificates/trust-roots/"
done
# Remove empty directory
if [[ -d "${src_dir}" ]] && [[ -z "$(ls -A "${src_dir}")" ]]; then
run_cmd rmdir "${src_dir}"
log_ok "Removed empty: certificates/"
fi
}
# Migrate config/
migrate_config_dir() {
local src_dir="${ROOT_DIR}/config"
if [[ ! -d "${src_dir}" ]]; then
log_info "No config/ directory found, skipping"
return
fi
log_info "Migrating config/..."
# Map env files to crypto profiles
declare -A env_mapping=(
[".env.fips.example"]="us-fips/env.sample"
[".env.eidas.example"]="eu/env.sample"
[".env.ru-free.example"]="ru/env.sample"
[".env.ru-paid.example"]="ru/env-paid.sample"
[".env.sm.example"]="cn/env.sample"
[".env.kcmvp.example"]="kr/env.sample"
)
for src_name in "${!env_mapping[@]}"; do
local src_file="${src_dir}/env/${src_name}"
local dst_file="${ROOT_DIR}/etc/crypto/profiles/${env_mapping[$src_name]}"
if [[ -f "${src_file}" ]]; then
run_cmd mkdir -p "$(dirname "${dst_file}")"
run_cmd mv "${src_file}" "${dst_file}"
log_ok "Moved: ${src_name} -> etc/crypto/profiles/${env_mapping[$src_name]}"
fi
done
# Remove crypto-profiles.sample.json (superseded)
if [[ -f "${src_dir}/crypto-profiles.sample.json" ]]; then
run_cmd rm "${src_dir}/crypto-profiles.sample.json"
log_ok "Removed: config/crypto-profiles.sample.json (superseded by etc/crypto/)"
fi
# Remove empty directories
[[ -d "${src_dir}/env" ]] && [[ -z "$(ls -A "${src_dir}/env" 2>/dev/null)" ]] && run_cmd rmdir "${src_dir}/env"
[[ -d "${src_dir}" ]] && [[ -z "$(ls -A "${src_dir}" 2>/dev/null)" ]] && run_cmd rmdir "${src_dir}"
}
# Migrate policies/
migrate_policies() {
local src_dir="${ROOT_DIR}/policies"
if [[ ! -d "${src_dir}" ]]; then
log_info "No policies/ directory found, skipping"
return
fi
log_info "Migrating policies/..."
# Move policy packs
for f in "${src_dir}"/*.yaml 2>/dev/null; do
[[ -f "$f" ]] || continue
run_cmd mv "$f" "${ROOT_DIR}/etc/policy/packs/"
log_ok "Moved: $(basename "$f") -> etc/policy/packs/"
done
# Move schemas
if [[ -d "${src_dir}/schemas" ]]; then
for f in "${src_dir}/schemas"/*.json 2>/dev/null; do
[[ -f "$f" ]] || continue
run_cmd mv "$f" "${ROOT_DIR}/etc/policy/schemas/"
log_ok "Moved: schemas/$(basename "$f") -> etc/policy/schemas/"
done
[[ -z "$(ls -A "${src_dir}/schemas" 2>/dev/null)" ]] && run_cmd rmdir "${src_dir}/schemas"
fi
# Move AGENTS.md if present
[[ -f "${src_dir}/AGENTS.md" ]] && run_cmd mv "${src_dir}/AGENTS.md" "${ROOT_DIR}/etc/policy/"
# Remove empty directory
[[ -d "${src_dir}" ]] && [[ -z "$(ls -A "${src_dir}" 2>/dev/null)" ]] && run_cmd rmdir "${src_dir}"
}
# Migrate etc/rootpack/ to etc/crypto/profiles/
migrate_rootpack() {
local src_dir="${ROOT_DIR}/etc/rootpack"
if [[ ! -d "${src_dir}" ]]; then
log_info "No etc/rootpack/ directory found, skipping"
return
fi
log_info "Migrating etc/rootpack/ to etc/crypto/profiles/..."
for region_dir in "${src_dir}"/*; do
[[ -d "${region_dir}" ]] || continue
local region_name=$(basename "${region_dir}")
local target_dir="${ROOT_DIR}/etc/crypto/profiles/${region_name}"
run_cmd mkdir -p "${target_dir}"
for f in "${region_dir}"/*; do
[[ -f "$f" ]] || continue
run_cmd mv "$f" "${target_dir}/"
log_ok "Moved: rootpack/${region_name}/$(basename "$f") -> etc/crypto/profiles/${region_name}/"
done
[[ -z "$(ls -A "${region_dir}" 2>/dev/null)" ]] && run_cmd rmdir "${region_dir}"
done
[[ -d "${src_dir}" ]] && [[ -z "$(ls -A "${src_dir}" 2>/dev/null)" ]] && run_cmd rmdir "${src_dir}"
}
# Validate migration
validate_migration() {
log_info "Validating migration..."
local errors=0
# Check new structure exists
local required=(
"etc/certificates"
"etc/crypto/profiles"
"etc/policy"
)
for dir in "${required[@]}"; do
if [[ ! -d "${ROOT_DIR}/${dir}" ]]; then
log_error "Missing: ${dir}"
((errors++))
fi
done
# Check legacy directories are gone
local legacy=(
"certificates"
"config"
"policies"
"etc/rootpack"
)
for dir in "${legacy[@]}"; do
if [[ -d "${ROOT_DIR}/${dir}" ]] && [[ -n "$(ls -A "${ROOT_DIR}/${dir}" 2>/dev/null)" ]]; then
log_warn "Legacy directory still has content: ${dir}"
fi
done
if [[ ${errors} -gt 0 ]]; then
log_error "Validation failed"
return 1
fi
log_ok "Migration validated"
}
# Print summary
print_summary() {
echo ""
echo "========================================"
if [[ "${DRY_RUN}" == true ]]; then
echo " Migration Dry Run Complete"
else
echo " Migration Complete"
fi
echo "========================================"
echo ""
echo "New structure:"
echo " etc/certificates/ - Trust anchors and signing keys"
echo " etc/crypto/profiles/ - Regional crypto profiles"
echo " etc/policy/ - Policy engine configuration"
echo ""
if [[ "${DRY_RUN}" == true ]]; then
echo "Run without --dry-run to apply changes"
else
echo "Next steps:"
echo " 1. Update Docker Compose volume mounts"
echo " 2. Update any hardcoded paths in scripts"
echo " 3. Restart services and validate"
echo ""
echo "Rollback:"
echo " tar -xzvf config-backup-*.tar.gz"
fi
echo ""
}
# Main
main() {
if [[ "${DRY_RUN}" == true ]]; then
log_info "DRY RUN - no changes will be made"
fi
create_backup
create_directories
migrate_certificates
migrate_config_dir
migrate_policies
migrate_rootpack
validate_migration
print_summary
}
main "$@"

View File

@@ -0,0 +1,343 @@
#!/usr/bin/env python3
"""
Validate and report on test Category traits across the codebase.
Sprint: SPRINT_20251226_007_CICD
This script scans all test files in the codebase and reports:
1. Test files with Category traits
2. Test files missing Category traits
3. Coverage percentage by module
Usage:
python devops/scripts/validate-test-traits.py [--fix] [--module <name>]
Options:
--fix Attempt to add default Unit trait to tests without categories
--module Only process tests in the specified module
--verbose Show detailed output
--json Output as JSON for CI consumption
"""
import os
import re
import sys
import json
import argparse
from pathlib import Path
from dataclasses import dataclass, field
from typing import List, Dict, Set, Optional
VALID_CATEGORIES = {
"Unit",
"Integration",
"Architecture",
"Contract",
"Security",
"Golden",
"Performance",
"Benchmark",
"AirGap",
"Chaos",
"Determinism",
"Resilience",
"Observability",
"Property",
"Snapshot",
"Live",
}
# Patterns to identify test methods and classes
FACT_PATTERN = re.compile(r'\[Fact[^\]]*\]')
THEORY_PATTERN = re.compile(r'\[Theory[^\]]*\]')
# Match both string literals and TestCategories.Xxx constants
# Also match inline format like [Fact, Trait("Category", ...)]
TRAIT_CATEGORY_PATTERN = re.compile(
r'Trait\s*\(\s*["\']Category["\']\s*,\s*(?:["\'](\w+)["\']|TestCategories\.(\w+))\s*\)'
)
TEST_CLASS_PATTERN = re.compile(r'public\s+(?:sealed\s+)?class\s+\w+.*Tests?\b')
@dataclass
class TestFileAnalysis:
path: str
has_facts: bool = False
has_theories: bool = False
has_category_traits: bool = False
categories_found: Set[str] = field(default_factory=set)
test_method_count: int = 0
categorized_test_count: int = 0
def analyze_test_file(file_path: Path) -> TestFileAnalysis:
"""Analyze a single test file for Category traits."""
analysis = TestFileAnalysis(path=str(file_path))
try:
content = file_path.read_text(encoding='utf-8', errors='ignore')
except Exception as e:
print(f"Warning: Could not read {file_path}: {e}", file=sys.stderr)
return analysis
# Check for test methods
facts = FACT_PATTERN.findall(content)
theories = THEORY_PATTERN.findall(content)
analysis.has_facts = len(facts) > 0
analysis.has_theories = len(theories) > 0
analysis.test_method_count = len(facts) + len(theories)
# Check for Category traits
category_matches = TRAIT_CATEGORY_PATTERN.findall(content)
if category_matches:
analysis.has_category_traits = True
# Pattern has two capture groups - one for string literal, one for constant
# Extract non-empty values from tuples
categories = set()
for match in category_matches:
cat = match[0] or match[1] # First non-empty group
if cat:
categories.add(cat)
analysis.categories_found = categories
analysis.categorized_test_count = len(category_matches)
return analysis
def get_module_from_path(file_path: Path) -> str:
"""Extract module name from file path."""
parts = file_path.parts
# Look for src/<Module> pattern
for i, part in enumerate(parts):
if part == 'src' and i + 1 < len(parts):
next_part = parts[i + 1]
if next_part.startswith('__'):
return next_part # e.g., __Tests, __Libraries
return next_part
return "Unknown"
def find_test_files(root_path: Path, module_filter: Optional[str] = None) -> List[Path]:
"""Find all test files in the codebase."""
test_files = []
for pattern in ['**/*.Tests.cs', '**/*Test.cs', '**/*Tests/*.cs']:
for file_path in root_path.glob(pattern):
# Skip generated files
if '/obj/' in str(file_path) or '/bin/' in str(file_path):
continue
if 'node_modules' in str(file_path):
continue
# Apply module filter if specified
if module_filter:
module = get_module_from_path(file_path)
if module.lower() != module_filter.lower():
continue
test_files.append(file_path)
return test_files
def generate_report(analyses: List[TestFileAnalysis], verbose: bool = False) -> Dict:
"""Generate a summary report from analyses."""
total_files = len(analyses)
files_with_tests = [a for a in analyses if a.has_facts or a.has_theories]
files_with_traits = [a for a in analyses if a.has_category_traits]
files_missing_traits = [a for a in files_with_tests if not a.has_category_traits]
# Group by module
by_module: Dict[str, Dict] = {}
for analysis in analyses:
module = get_module_from_path(Path(analysis.path))
if module not in by_module:
by_module[module] = {
'total': 0,
'with_tests': 0,
'with_traits': 0,
'missing_traits': 0,
'files_missing': []
}
by_module[module]['total'] += 1
if analysis.has_facts or analysis.has_theories:
by_module[module]['with_tests'] += 1
if analysis.has_category_traits:
by_module[module]['with_traits'] += 1
else:
if analysis.has_facts or analysis.has_theories:
by_module[module]['missing_traits'] += 1
if verbose:
by_module[module]['files_missing'].append(analysis.path)
# Calculate coverage
coverage = (len(files_with_traits) / len(files_with_tests) * 100) if files_with_tests else 0
# Collect all categories found
all_categories: Set[str] = set()
for analysis in analyses:
all_categories.update(analysis.categories_found)
return {
'summary': {
'total_test_files': total_files,
'files_with_tests': len(files_with_tests),
'files_with_category_traits': len(files_with_traits),
'files_missing_traits': len(files_missing_traits),
'coverage_percent': round(coverage, 1),
'categories_used': sorted(all_categories),
'valid_categories': sorted(VALID_CATEGORIES),
},
'by_module': by_module,
'files_missing_traits': [a.path for a in files_missing_traits] if verbose else []
}
def add_default_trait(file_path: Path, default_category: str = "Unit") -> bool:
"""Add default Category trait to test methods missing traits."""
try:
content = file_path.read_text(encoding='utf-8')
original = content
# Pattern to find [Fact] or [Theory] not preceded by Category trait
# This is a simplified approach - adds trait after [Fact] or [Theory]
# Check if file already has Category traits
if TRAIT_CATEGORY_PATTERN.search(content):
return False # Already has some traits, skip
# Add using statement if not present
if 'using StellaOps.TestKit;' not in content:
# Find last using statement and add after it
using_pattern = re.compile(r'(using [^;]+;\s*\n)(?!using)')
match = list(using_pattern.finditer(content))
if match:
last_using = match[-1]
insert_pos = last_using.end()
content = content[:insert_pos] + 'using StellaOps.TestKit;\n' + content[insert_pos:]
# Add Trait to [Fact] attributes
content = re.sub(
r'(\[Fact\])',
f'[Trait("Category", TestCategories.{default_category})]\n \\1',
content
)
# Add Trait to [Theory] attributes
content = re.sub(
r'(\[Theory\])',
f'[Trait("Category", TestCategories.{default_category})]\n \\1',
content
)
if content != original:
file_path.write_text(content, encoding='utf-8')
return True
return False
except Exception as e:
print(f"Error processing {file_path}: {e}", file=sys.stderr)
return False
def main():
parser = argparse.ArgumentParser(description='Validate test Category traits')
parser.add_argument('--fix', action='store_true', help='Add default Unit trait to tests without categories')
parser.add_argument('--module', type=str, help='Only process tests in the specified module')
parser.add_argument('--verbose', '-v', action='store_true', help='Show detailed output')
parser.add_argument('--json', action='store_true', help='Output as JSON')
parser.add_argument('--category', type=str, default='Unit', help='Default category for --fix (default: Unit)')
args = parser.parse_args()
# Find repository root
script_path = Path(__file__).resolve()
repo_root = script_path.parent.parent.parent
src_path = repo_root / 'src'
if not src_path.exists():
print(f"Error: src directory not found at {src_path}", file=sys.stderr)
sys.exit(1)
# Find all test files
test_files = find_test_files(src_path, args.module)
if not args.json:
print(f"Found {len(test_files)} test files to analyze...")
# Analyze each file
analyses = [analyze_test_file(f) for f in test_files]
# Generate report
report = generate_report(analyses, args.verbose)
if args.json:
print(json.dumps(report, indent=2))
else:
# Print summary
summary = report['summary']
print("\n" + "=" * 60)
print("TEST CATEGORY TRAIT COVERAGE REPORT")
print("=" * 60)
print(f"Total test files: {summary['total_test_files']}")
print(f"Files with test methods: {summary['files_with_tests']}")
print(f"Files with Category trait: {summary['files_with_category_traits']}")
print(f"Files missing traits: {summary['files_missing_traits']}")
print(f"Coverage: {summary['coverage_percent']}%")
print(f"\nCategories in use: {', '.join(summary['categories_used']) or 'None'}")
print(f"Valid categories: {', '.join(summary['valid_categories'])}")
# Print by module
print("\n" + "-" * 60)
print("BY MODULE")
print("-" * 60)
print(f"{'Module':<25} {'With Tests':<12} {'With Traits':<12} {'Missing':<10}")
print("-" * 60)
for module, data in sorted(report['by_module'].items()):
if data['with_tests'] > 0:
print(f"{module:<25} {data['with_tests']:<12} {data['with_traits']:<12} {data['missing_traits']:<10}")
# Show files missing traits if verbose
if args.verbose and report['files_missing_traits']:
print("\n" + "-" * 60)
print("FILES MISSING CATEGORY TRAITS")
print("-" * 60)
for f in sorted(report['files_missing_traits'])[:50]: # Limit to first 50
print(f" {f}")
if len(report['files_missing_traits']) > 50:
print(f" ... and {len(report['files_missing_traits']) - 50} more")
# Fix mode
if args.fix:
files_to_fix = [Path(a.path) for a in analyses
if (a.has_facts or a.has_theories) and not a.has_category_traits]
if not args.json:
print(f"\n{'=' * 60}")
print(f"FIXING {len(files_to_fix)} FILES WITH DEFAULT CATEGORY: {args.category}")
print("=" * 60)
fixed_count = 0
for file_path in files_to_fix:
if add_default_trait(file_path, args.category):
fixed_count += 1
if not args.json:
print(f" Fixed: {file_path}")
if not args.json:
print(f"\nFixed {fixed_count} files")
# Exit with error code if coverage is below threshold
if report['summary']['coverage_percent'] < 80:
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()