up
Some checks failed
LNM Migration CI / build-runner (push) Has been cancelled
Ledger OpenAPI CI / deprecation-check (push) Has been cancelled
Docs CI / lint-and-preview (push) Has been cancelled
Airgap Sealed CI Smoke / sealed-smoke (push) Has been cancelled
Ledger Packs CI / build-pack (push) Has been cancelled
Export Center CI / export-ci (push) Has been cancelled
Ledger OpenAPI CI / validate-oas (push) Has been cancelled
Ledger OpenAPI CI / check-wellknown (push) Has been cancelled
Ledger Packs CI / verify-pack (push) Has been cancelled
LNM Migration CI / validate-metrics (push) Has been cancelled
AOC Guard CI / aoc-guard (push) Has been cancelled
AOC Guard CI / aoc-verify (push) Has been cancelled

This commit is contained in:
StellaOps Bot
2025-12-14 18:33:02 +02:00
parent d233fa3529
commit 2e70c9fdb6
51 changed files with 5958 additions and 75 deletions

View File

@@ -0,0 +1,442 @@
using System.Buffers;
using System.Diagnostics;
using System.Runtime.CompilerServices;
using System.Security.Cryptography;
using System.Text;
using System.Text.Encodings.Web;
using System.Text.Json;
using Microsoft.Extensions.Logging;
using StellaOps.ExportCenter.Core.Planner;
namespace StellaOps.ExportCenter.Core.Adapters;
/// <summary>
/// Combined Runtime adapter (runtime:combined) - exports scanner.entrytrace and zastava.runtime
/// into a single NDJSON stream for offline kit attestation.
/// </summary>
/// <remarks>
/// Output format: combined.runtime.ndjson with records:
/// - combined.header: Metadata header with export info
/// - entrytrace.*: Scanner entry trace records (from scanner.entrytrace.ndjson)
/// - runtime.event: Zastava runtime events (from zastava.runtime.ndjson)
/// - combined.footer: Summary with counts and hashes
///
/// Records are deterministically ordered for reproducible output.
/// </remarks>
public sealed class CombinedRuntimeAdapter : IExportAdapter
{
public const string Id = "runtime:combined";
private static readonly JsonWriterOptions WriterOptions = new()
{
Encoder = JavaScriptEncoder.UnsafeRelaxedJsonEscaping,
Indented = false,
SkipValidation = false
};
private readonly ILogger<CombinedRuntimeAdapter> _logger;
private readonly ExportCompressor _compressor;
public string AdapterId => Id;
public string DisplayName => "Combined Runtime Stream";
public IReadOnlyList<ExportFormat> SupportedFormats { get; } = [ExportFormat.Ndjson];
public bool SupportsStreaming => true;
public CombinedRuntimeAdapter(ILogger<CombinedRuntimeAdapter> logger)
{
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
_compressor = new ExportCompressor();
}
public async Task<ExportAdapterResult> ProcessAsync(
ExportAdapterContext context,
CancellationToken cancellationToken = default)
{
var stopwatch = Stopwatch.StartNew();
var itemResults = new List<AdapterItemResult>();
try
{
Directory.CreateDirectory(context.Config.OutputDirectory);
var result = await ProcessCombinedNdjsonAsync(context, cancellationToken);
stopwatch.Stop();
if (!result.Success)
{
return ExportAdapterResult.Failed(result.ErrorMessage ?? "Combined export failed");
}
var counts = new ExportManifestCounts
{
TotalItems = result.EntryTraceCount + result.RuntimeEventCount,
ProcessedItems = result.EntryTraceCount + result.RuntimeEventCount,
SuccessfulItems = result.EntryTraceCount + result.RuntimeEventCount,
FailedItems = 0,
SkippedItems = 0,
ArtifactCount = 1,
TotalSizeBytes = result.Artifact!.SizeBytes,
CompressedSizeBytes = result.Artifact.IsCompressed ? result.Artifact.SizeBytes : null,
ByKind = new Dictionary<string, int>
{
["entrytrace"] = result.EntryTraceCount,
["runtime_event"] = result.RuntimeEventCount
},
ByStatus = new Dictionary<string, int>
{
["success"] = result.EntryTraceCount + result.RuntimeEventCount
}
};
_logger.LogInformation(
"Combined runtime export completed: {EntryTraceCount} entrytrace + {RuntimeEventCount} runtime events = {TotalBytes} bytes in {ElapsedMs}ms",
result.EntryTraceCount, result.RuntimeEventCount, result.Artifact.SizeBytes, stopwatch.ElapsedMilliseconds);
return new ExportAdapterResult
{
Success = true,
ItemResults = result.ItemResults,
Artifacts = [result.Artifact],
ManifestCounts = counts,
ProcessingTime = stopwatch.Elapsed,
CompletedAt = context.TimeProvider.GetUtcNow()
};
}
catch (OperationCanceledException)
{
return ExportAdapterResult.Failed("Export cancelled");
}
catch (Exception ex)
{
_logger.LogError(ex, "Combined runtime export failed");
return ExportAdapterResult.Failed($"Export failed: {ex.Message}");
}
}
public async IAsyncEnumerable<AdapterItemResult> ProcessStreamAsync(
ExportAdapterContext context,
[EnumeratorCancellation] CancellationToken cancellationToken = default)
{
Directory.CreateDirectory(context.Config.OutputDirectory);
foreach (var item in context.Items)
{
cancellationToken.ThrowIfCancellationRequested();
var content = await context.DataFetcher.FetchAsync(item, cancellationToken);
yield return new AdapterItemResult
{
ItemId = item.ItemId,
Success = content.Success,
ContentHash = content.OriginalHash,
ProcessedAt = context.TimeProvider.GetUtcNow()
};
}
}
public Task<IReadOnlyList<string>> ValidateConfigAsync(
ExportAdapterConfig config,
CancellationToken cancellationToken = default)
{
var errors = new List<string>();
if (string.IsNullOrWhiteSpace(config.OutputDirectory))
{
errors.Add("Output directory is required");
}
if (config.FormatOptions.Format != ExportFormat.Ndjson)
{
errors.Add("Combined runtime adapter only supports NDJSON format");
}
return Task.FromResult<IReadOnlyList<string>>(errors);
}
private async Task<CombinedExportResult> ProcessCombinedNdjsonAsync(
ExportAdapterContext context,
CancellationToken cancellationToken)
{
var lines = new List<string>();
var itemResults = new List<AdapterItemResult>();
var now = context.TimeProvider.GetUtcNow();
// Categorize items
var entryTraceItems = context.Items
.Where(i => i.Kind.StartsWith("entrytrace", StringComparison.OrdinalIgnoreCase))
.OrderBy(i => i.Name)
.ThenBy(i => i.ItemId)
.ToList();
var runtimeItems = context.Items
.Where(i => i.Kind.StartsWith("runtime", StringComparison.OrdinalIgnoreCase) ||
i.Kind.Equals("zastava_event", StringComparison.OrdinalIgnoreCase))
.OrderBy(i => i.Name)
.ThenBy(i => i.ItemId)
.ToList();
// Write header
lines.Add(BuildHeaderLine(context, entryTraceItems.Count, runtimeItems.Count, now));
// Process entry trace items
var entryTraceRecordCount = 0;
foreach (var item in entryTraceItems)
{
cancellationToken.ThrowIfCancellationRequested();
var content = await context.DataFetcher.FetchAsync(item, cancellationToken);
if (!content.Success)
{
itemResults.Add(AdapterItemResult.Failed(item.ItemId, content.ErrorMessage ?? "Failed to fetch"));
continue;
}
// Entry trace items may be NDJSON themselves, pass through each line
var entryLines = ParseNdjsonLines(content.JsonContent);
foreach (var line in entryLines)
{
lines.Add(line);
entryTraceRecordCount++;
}
itemResults.Add(new AdapterItemResult
{
ItemId = item.ItemId,
Success = true,
ContentHash = content.OriginalHash,
ProcessedAt = now
});
}
// Process runtime event items
var runtimeEventCount = 0;
foreach (var item in runtimeItems)
{
cancellationToken.ThrowIfCancellationRequested();
var content = await context.DataFetcher.FetchAsync(item, cancellationToken);
if (!content.Success)
{
itemResults.Add(AdapterItemResult.Failed(item.ItemId, content.ErrorMessage ?? "Failed to fetch"));
continue;
}
// Runtime items may be NDJSON or single JSON
var eventLines = ParseNdjsonLines(content.JsonContent);
foreach (var line in eventLines)
{
// Wrap runtime events with type marker if not already present
var wrappedLine = EnsureRuntimeEventType(line);
lines.Add(wrappedLine);
runtimeEventCount++;
}
itemResults.Add(new AdapterItemResult
{
ItemId = item.ItemId,
Success = true,
ContentHash = content.OriginalHash,
ProcessedAt = now
});
}
// Write footer
lines.Add(BuildFooterLine(entryTraceRecordCount, runtimeEventCount, now));
if (lines.Count <= 2) // Only header and footer
{
return CombinedExportResult.Failed("No items to export");
}
// Write combined NDJSON
var ndjsonContent = string.Join("\n", lines) + "\n";
var outputBytes = Encoding.UTF8.GetBytes(ndjsonContent);
var originalSize = outputBytes.Length;
var compression = context.Config.FormatOptions.Compression;
if (compression != CompressionFormat.None)
{
var compressed = _compressor.CompressBytes(outputBytes, compression);
if (!compressed.Success)
{
return CombinedExportResult.Failed(compressed.ErrorMessage ?? "Compression failed");
}
outputBytes = compressed.CompressedData!;
}
var fileName = $"combined.runtime.ndjson{ExportCompressor.GetFileExtension(compression)}";
var outputPath = Path.Combine(context.Config.OutputDirectory, fileName);
await File.WriteAllBytesAsync(outputPath, outputBytes, cancellationToken);
var hash = ComputeSha256(outputBytes);
if (context.Config.IncludeChecksums)
{
var checksumPath = outputPath + ".sha256";
await File.WriteAllTextAsync(checksumPath, $"{hash} {fileName}\n", cancellationToken);
}
return new CombinedExportResult
{
Success = true,
ItemResults = itemResults,
EntryTraceCount = entryTraceRecordCount,
RuntimeEventCount = runtimeEventCount,
Artifact = new ExportOutputArtifact
{
Path = outputPath,
SizeBytes = outputBytes.Length,
Sha256 = hash,
ContentType = "application/x-ndjson",
ItemCount = lines.Count,
IsCompressed = compression != CompressionFormat.None,
Compression = compression,
OriginalSizeBytes = originalSize
}
};
}
private static string BuildHeaderLine(
ExportAdapterContext context,
int entryTraceItemCount,
int runtimeItemCount,
DateTimeOffset timestamp)
{
var buffer = new ArrayBufferWriter<byte>(256);
using (var writer = new Utf8JsonWriter(buffer, WriterOptions))
{
writer.WriteStartObject();
writer.WriteString("type", "combined.header");
writer.WriteString("version", "1.0.0");
writer.WriteString("schema", "stellaops.combined.runtime@v1");
writer.WriteString("generated_at", timestamp.UtcDateTime.ToString("O"));
writer.WriteString("tenant_id", context.TenantId.ToString("D"));
if (!string.IsNullOrEmpty(context.CorrelationId))
{
writer.WriteString("correlation_id", context.CorrelationId);
}
writer.WritePropertyName("source_counts");
writer.WriteStartObject();
writer.WriteNumber("entrytrace_items", entryTraceItemCount);
writer.WriteNumber("runtime_items", runtimeItemCount);
writer.WriteEndObject();
writer.WriteEndObject();
writer.Flush();
}
return Encoding.UTF8.GetString(buffer.WrittenSpan);
}
private static string BuildFooterLine(int entryTraceCount, int runtimeEventCount, DateTimeOffset timestamp)
{
var buffer = new ArrayBufferWriter<byte>(256);
using (var writer = new Utf8JsonWriter(buffer, WriterOptions))
{
writer.WriteStartObject();
writer.WriteString("type", "combined.footer");
writer.WritePropertyName("record_counts");
writer.WriteStartObject();
writer.WriteNumber("entrytrace_records", entryTraceCount);
writer.WriteNumber("runtime_events", runtimeEventCount);
writer.WriteNumber("total", entryTraceCount + runtimeEventCount);
writer.WriteEndObject();
writer.WriteString("completed_at", timestamp.UtcDateTime.ToString("O"));
writer.WriteEndObject();
writer.Flush();
}
return Encoding.UTF8.GetString(buffer.WrittenSpan);
}
private static IReadOnlyList<string> ParseNdjsonLines(string? content)
{
if (string.IsNullOrWhiteSpace(content))
{
return [];
}
var lines = new List<string>();
using var reader = new StringReader(content);
string? line;
while ((line = reader.ReadLine()) is not null)
{
var trimmed = line.Trim();
if (!string.IsNullOrEmpty(trimmed))
{
lines.Add(trimmed);
}
}
return lines;
}
private static string EnsureRuntimeEventType(string jsonLine)
{
// If the line already has a "type" field starting with "runtime." or "entrytrace.", pass through
if (jsonLine.Contains("\"type\":\"runtime.") ||
jsonLine.Contains("\"type\":\"entrytrace.") ||
jsonLine.Contains("\"type\": \"runtime.") ||
jsonLine.Contains("\"type\": \"entrytrace."))
{
return jsonLine;
}
// Wrap as runtime.event if no type present
try
{
using var doc = JsonDocument.Parse(jsonLine);
var root = doc.RootElement;
if (root.TryGetProperty("type", out var typeElement))
{
// Has type but not runtime/entrytrace prefix, pass through
return jsonLine;
}
// Add type field for runtime events
var buffer = new ArrayBufferWriter<byte>(jsonLine.Length + 32);
using (var writer = new Utf8JsonWriter(buffer, WriterOptions))
{
writer.WriteStartObject();
writer.WriteString("type", "runtime.event");
foreach (var property in root.EnumerateObject())
{
property.WriteTo(writer);
}
writer.WriteEndObject();
writer.Flush();
}
return Encoding.UTF8.GetString(buffer.WrittenSpan);
}
catch
{
// If parsing fails, return original
return jsonLine;
}
}
private static string ComputeSha256(byte[] data)
{
var hashBytes = SHA256.HashData(data);
return Convert.ToHexString(hashBytes).ToLowerInvariant();
}
private sealed record CombinedExportResult
{
public required bool Success { get; init; }
public IReadOnlyList<AdapterItemResult> ItemResults { get; init; } = [];
public int EntryTraceCount { get; init; }
public int RuntimeEventCount { get; init; }
public ExportOutputArtifact? Artifact { get; init; }
public string? ErrorMessage { get; init; }
public static CombinedExportResult Failed(string errorMessage)
=> new() { Success = false, ErrorMessage = errorMessage };
}
}

View File

@@ -94,6 +94,7 @@ public static class ExportAdapterServiceExtensions
// Register individual adapters
services.AddSingleton<IExportAdapter, JsonRawAdapter>();
services.AddSingleton<IExportAdapter, JsonPolicyAdapter>();
services.AddSingleton<IExportAdapter, CombinedRuntimeAdapter>();
services.AddSingleton<IExportAdapter>(sp =>
new MirrorAdapter(
sp.GetRequiredService<ILogger<MirrorAdapter>>(),

View File

@@ -14,6 +14,7 @@ using StellaOps.Scanner.WebService.Contracts;
using StellaOps.Scanner.WebService.Domain;
using StellaOps.Scanner.WebService.Infrastructure;
using StellaOps.Scanner.WebService.Security;
using StellaOps.Scanner.WebService.Options;
using StellaOps.Scanner.WebService.Services;
using DomainScanProgressEvent = StellaOps.Scanner.WebService.Domain.ScanProgressEvent;
using StellaOps.Scanner.Core.Contracts;

View File

@@ -358,13 +358,52 @@ public sealed class ScannerWebServiceOptions
public int EventTtlDays { get; set; } = 45;
// === Tenant-level rate limits ===
public double PerTenantEventsPerSecond { get; set; } = 200;
public int PerTenantBurst { get; set; } = 1000;
// === Node-level rate limits ===
public double PerNodeEventsPerSecond { get; set; } = 50;
public int PerNodeBurst { get; set; } = 200;
public double PerTenantEventsPerSecond { get; set; } = 200;
// === Namespace-level rate limits (hierarchical budget) ===
/// <summary>
/// Maximum events per second per namespace.
/// Part of hierarchical rate limiting: tenant → namespace → workload.
/// Default: 100 events/second per namespace.
/// </summary>
public double PerNamespaceEventsPerSecond { get; set; } = 100;
public int PerTenantBurst { get; set; } = 1000;
/// <summary>
/// Burst capacity per namespace.
/// Default: 500 events burst.
/// </summary>
public int PerNamespaceBurst { get; set; } = 500;
// === Workload-level rate limits (hierarchical budget) ===
/// <summary>
/// Maximum events per second per workload (pod/container).
/// Part of hierarchical rate limiting: tenant → namespace → workload.
/// Prevents noisy workloads from exhausting namespace or tenant budgets.
/// Default: 25 events/second per workload.
/// </summary>
public double PerWorkloadEventsPerSecond { get; set; } = 25;
/// <summary>
/// Burst capacity per workload.
/// Default: 100 events burst.
/// </summary>
public int PerWorkloadBurst { get; set; } = 100;
/// <summary>
/// Enable hierarchical rate limiting across tenant → namespace → workload.
/// When enabled, rate limits are enforced at all three levels.
/// When disabled, only tenant and node limits apply (legacy behavior).
/// Default: false (opt-in for backward compatibility).
/// </summary>
public bool HierarchicalRateLimitingEnabled { get; set; } = false;
public int PolicyCacheTtlSeconds { get; set; } = 300;

View File

@@ -11,6 +11,7 @@ using StellaOps.Cryptography;
using StellaOps.Replay.Core;
using StellaOps.Scanner.Core.Replay;
using StellaOps.Scanner.Reachability;
using ReachabilityWriter = StellaOps.Scanner.Reachability.ReachabilityReplayWriter;
using StellaOps.Scanner.Storage;
using StellaOps.Scanner.Storage.ObjectStore;
using StellaOps.Scanner.WebService.Domain;
@@ -25,7 +26,7 @@ namespace StellaOps.Scanner.WebService.Replay;
internal sealed class RecordModeService : IRecordModeService
{
private readonly RecordModeAssembler _assembler;
private readonly ReachabilityReplayWriter _reachability;
private readonly ReachabilityWriter _reachability;
private readonly ICryptoHash _cryptoHash;
private readonly IArtifactObjectStore? _objectStore;
private readonly ScannerStorageOptions? _storageOptions;
@@ -45,7 +46,7 @@ internal sealed class RecordModeService : IRecordModeService
_timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
_assembler = new RecordModeAssembler(cryptoHash, timeProvider);
_reachability = new ReachabilityReplayWriter();
_reachability = new ReachabilityWriter();
}
// Legacy/testing constructor for unit tests that do not require storage.
@@ -53,7 +54,7 @@ internal sealed class RecordModeService : IRecordModeService
{
_cryptoHash = cryptoHash ?? throw new ArgumentNullException(nameof(cryptoHash));
_assembler = new RecordModeAssembler(cryptoHash, timeProvider);
_reachability = new ReachabilityReplayWriter();
_reachability = new ReachabilityWriter();
_timeProvider = timeProvider ?? TimeProvider.System;
}

View File

@@ -5,10 +5,17 @@ using StellaOps.Zastava.Core.Contracts;
namespace StellaOps.Scanner.WebService.Services;
/// <summary>
/// Hierarchical rate limiter for runtime events.
/// Supports rate limiting at tenant, node, namespace, and workload levels.
/// Budget allocation: tenant → namespace → workload (when hierarchical mode enabled).
/// </summary>
internal sealed class RuntimeEventRateLimiter
{
private readonly ConcurrentDictionary<string, TokenBucket> _tenantBuckets = new(StringComparer.Ordinal);
private readonly ConcurrentDictionary<string, TokenBucket> _nodeBuckets = new(StringComparer.Ordinal);
private readonly ConcurrentDictionary<string, TokenBucket> _namespaceBuckets = new(StringComparer.Ordinal);
private readonly ConcurrentDictionary<string, TokenBucket> _workloadBuckets = new(StringComparer.Ordinal);
private readonly TimeProvider _timeProvider;
private readonly IOptionsMonitor<ScannerWebServiceOptions> _optionsMonitor;
@@ -29,33 +36,36 @@ internal sealed class RuntimeEventRateLimiter
var options = _optionsMonitor.CurrentValue.Runtime ?? new ScannerWebServiceOptions.RuntimeOptions();
var now = _timeProvider.GetUtcNow();
// Count events by scope
var tenantCounts = new Dictionary<string, int>(StringComparer.Ordinal);
var nodeCounts = new Dictionary<string, int>(StringComparer.Ordinal);
var namespaceCounts = new Dictionary<string, int>(StringComparer.Ordinal);
var workloadCounts = new Dictionary<string, int>(StringComparer.Ordinal);
foreach (var envelope in envelopes)
{
var tenant = envelope.Event.Tenant;
var node = envelope.Event.Node;
if (tenantCounts.TryGetValue(tenant, out var tenantCount))
{
tenantCounts[tenant] = tenantCount + 1;
}
else
{
tenantCounts[tenant] = 1;
}
var ns = envelope.Event.Workload?.Namespace ?? "_default";
var workloadId = GetWorkloadKey(envelope.Event);
var nodeKey = $"{tenant}|{node}";
if (nodeCounts.TryGetValue(nodeKey, out var nodeCount))
// Tenant counts
IncrementCount(tenantCounts, tenant);
// Node counts (tenant-scoped)
IncrementCount(nodeCounts, $"{tenant}|{node}");
// Namespace counts (tenant-scoped) - only used in hierarchical mode
if (options.HierarchicalRateLimitingEnabled)
{
nodeCounts[nodeKey] = nodeCount + 1;
}
else
{
nodeCounts[nodeKey] = 1;
IncrementCount(namespaceCounts, $"{tenant}|{ns}");
IncrementCount(workloadCounts, $"{tenant}|{ns}|{workloadId}");
}
}
// === Evaluate rate limits in order: tenant → node → namespace → workload ===
// 1. Tenant-level check
var tenantDecision = TryAcquire(
_tenantBuckets,
tenantCounts,
@@ -69,6 +79,7 @@ internal sealed class RuntimeEventRateLimiter
return tenantDecision;
}
// 2. Node-level check
var nodeDecision = TryAcquire(
_nodeBuckets,
nodeCounts,
@@ -77,7 +88,84 @@ internal sealed class RuntimeEventRateLimiter
now,
scope: "node");
return nodeDecision;
if (!nodeDecision.Allowed)
{
return nodeDecision;
}
// 3. Hierarchical checks (namespace → workload) - only when enabled
if (options.HierarchicalRateLimitingEnabled)
{
// 3a. Namespace-level check
var namespaceDecision = TryAcquire(
_namespaceBuckets,
namespaceCounts,
options.PerNamespaceEventsPerSecond,
options.PerNamespaceBurst,
now,
scope: "namespace");
if (!namespaceDecision.Allowed)
{
return namespaceDecision;
}
// 3b. Workload-level check
var workloadDecision = TryAcquire(
_workloadBuckets,
workloadCounts,
options.PerWorkloadEventsPerSecond,
options.PerWorkloadBurst,
now,
scope: "workload");
if (!workloadDecision.Allowed)
{
return workloadDecision;
}
}
return RateLimitDecision.Success;
}
/// <summary>
/// Gets a unique key for a workload from the runtime event.
/// Uses pod name if available, otherwise container ID or a generated key.
/// </summary>
private static string GetWorkloadKey(RuntimeEvent evt)
{
var workload = evt.Workload;
if (workload is null)
{
return "_unknown";
}
// Prefer pod name for Kubernetes workloads
if (!string.IsNullOrEmpty(workload.Pod))
{
return workload.Pod;
}
// Fall back to container ID
if (!string.IsNullOrEmpty(workload.ContainerId))
{
// Truncate container ID for reasonable key length
var containerId = workload.ContainerId;
if (containerId.Contains("://"))
{
containerId = containerId.Substring(containerId.IndexOf("://") + 3);
}
return containerId.Length > 12 ? containerId[..12] : containerId;
}
// Last resort: use container name
return workload.Container ?? "_unknown";
}
private static void IncrementCount(Dictionary<string, int> counts, string key)
{
counts.TryGetValue(key, out var count);
counts[key] = count + 1;
}
private static RateLimitDecision TryAcquire(

View File

@@ -37,6 +37,7 @@
<ProjectReference Include="../__Libraries/StellaOps.Scanner.Core/StellaOps.Scanner.Core.csproj" />
<ProjectReference Include="../../__Libraries/StellaOps.Replay.Core/StellaOps.Replay.Core.csproj" />
<ProjectReference Include="../../Zastava/__Libraries/StellaOps.Zastava.Core/StellaOps.Zastava.Core.csproj" />
<ProjectReference Include="../__Libraries/StellaOps.Scanner.Reachability/StellaOps.Scanner.Reachability.csproj" />
<ProjectReference Include="../../Concelier/__Libraries/StellaOps.Concelier.Core/StellaOps.Concelier.Core.csproj" />
<ProjectReference Include="../../Concelier/__Libraries/StellaOps.Concelier.Connector.Common/StellaOps.Concelier.Connector.Common.csproj" />
<ProjectReference Include="../../__Libraries/StellaOps.Messaging/StellaOps.Messaging.csproj" />

View File

@@ -0,0 +1,606 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Net;
using System.Net.Http.Json;
using System.Text;
using System.Text.Json;
using CycloneDX.Json;
using CycloneDX.Models;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using StellaOps.Scanner.Storage.Catalog;
using StellaOps.Scanner.Storage.ObjectStore;
using StellaOps.Scanner.Storage.Repositories;
using StellaOps.Scanner.WebService.Contracts;
using StellaOps.Zastava.Core.Contracts;
namespace StellaOps.Scanner.WebService.Tests;
public sealed class RuntimeReconciliationTests
{
private const string TestImageDigest = "sha256:abc123def456";
private const string TestTenant = "tenant-alpha";
private const string TestNode = "node-a";
[Fact]
public async Task ReconcileEndpoint_WithNoRuntimeEvents_ReturnsNotFound()
{
using var factory = new ScannerApplicationFactory();
using var client = factory.CreateClient();
var request = new RuntimeReconcileRequestDto
{
ImageDigest = TestImageDigest
};
var response = await client.PostAsJsonAsync("/api/v1/runtime/reconcile", request);
Assert.Equal(HttpStatusCode.NotFound, response.StatusCode);
var payload = await response.Content.ReadFromJsonAsync<RuntimeReconcileResponseDto>();
Assert.NotNull(payload);
Assert.Equal("NO_RUNTIME_EVENTS", payload!.ErrorCode);
Assert.Contains("No runtime events found", payload.ErrorMessage);
}
[Fact]
public async Task ReconcileEndpoint_WithRuntimeEventsButNoSbom_ReturnsNoSbomError()
{
var mockObjectStore = new InMemoryArtifactObjectStore();
using var factory = new ScannerApplicationFactory(
configureServices: services =>
{
services.RemoveAll<IArtifactObjectStore>();
services.AddSingleton<IArtifactObjectStore>(mockObjectStore);
});
using var client = factory.CreateClient();
// Ingest runtime event with loaded libraries
var ingestRequest = new RuntimeEventsIngestRequestDto
{
Events = new[]
{
CreateEnvelopeWithLibraries("evt-001", TestImageDigest, new[]
{
new RuntimeLoadedLibrary { Path = "/lib/libssl.so.3", Sha256 = "sha256:lib1hash", Inode = 1001 },
new RuntimeLoadedLibrary { Path = "/lib/libcrypto.so.3", Sha256 = "sha256:lib2hash", Inode = 1002 }
})
}
};
var ingestResponse = await client.PostAsJsonAsync("/api/v1/runtime/events", ingestRequest);
Assert.Equal(HttpStatusCode.Accepted, ingestResponse.StatusCode);
// Request reconciliation - no SBOM linked
var reconcileRequest = new RuntimeReconcileRequestDto
{
ImageDigest = TestImageDigest
};
var response = await client.PostAsJsonAsync("/api/v1/runtime/reconcile", reconcileRequest);
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
var payload = await response.Content.ReadFromJsonAsync<RuntimeReconcileResponseDto>();
Assert.NotNull(payload);
Assert.Equal("NO_SBOM", payload!.ErrorCode);
Assert.Equal(2, payload.TotalRuntimeLibraries);
Assert.Equal(0, payload.TotalSbomComponents);
Assert.Equal(0, payload.MatchCount);
Assert.Equal(2, payload.MissCount);
Assert.Equal(2, payload.Misses.Count);
}
[Fact]
public async Task ReconcileEndpoint_WithHashMatches_ReturnsMatches()
{
var mockObjectStore = new InMemoryArtifactObjectStore();
using var factory = new ScannerApplicationFactory(
configureServices: services =>
{
services.RemoveAll<IArtifactObjectStore>();
services.AddSingleton<IArtifactObjectStore>(mockObjectStore);
});
using var client = factory.CreateClient();
// Setup: Create SBOM artifact with components
const string sbomArtifactId = "imagebom/sha256-sbomdigest";
const string sbomHash = "sha256:sbomdigest";
using (var scope = factory.Services.CreateScope())
{
var artifacts = scope.ServiceProvider.GetRequiredService<ArtifactRepository>();
var links = scope.ServiceProvider.GetRequiredService<LinkRepository>();
await artifacts.UpsertAsync(new ArtifactDocument
{
Id = sbomArtifactId,
Type = ArtifactDocumentType.ImageBom,
Format = ArtifactDocumentFormat.CycloneDxJson,
MediaType = "application/json",
BytesSha256 = sbomHash,
RefCount = 1
}, CancellationToken.None);
await links.UpsertAsync(new LinkDocument
{
Id = Guid.NewGuid().ToString("N"),
FromType = LinkSourceType.Image,
FromDigest = TestImageDigest,
ArtifactId = sbomArtifactId,
CreatedAtUtc = DateTime.UtcNow
}, CancellationToken.None);
}
// Create SBOM content with matching hash
var sbom = CreateSbomWithComponents(new[]
{
("comp-1", "openssl", "3.0.0", "pkg:deb/debian/openssl@3.0.0", new[] { "lib1hash" }, new[] { "/lib/libssl.so.3" }),
("comp-2", "libcrypto", "3.0.0", "pkg:deb/debian/libcrypto@3.0.0", new[] { "lib2hash" }, new[] { "/lib/libcrypto.so.3" })
});
var sbomJson = await Serializer.SerializeAsync(sbom);
var sbomBytes = Encoding.UTF8.GetBytes(sbomJson);
mockObjectStore.Store($"scanner-artifacts/imagebom/cyclonedx-json/{sbomHash}", sbomBytes);
// Ingest runtime event with matching libraries
var ingestRequest = new RuntimeEventsIngestRequestDto
{
Events = new[]
{
CreateEnvelopeWithLibraries("evt-hash-001", TestImageDigest, new[]
{
new RuntimeLoadedLibrary { Path = "/lib/libssl.so.3", Sha256 = "lib1hash", Inode = 1001 },
new RuntimeLoadedLibrary { Path = "/lib/libcrypto.so.3", Sha256 = "lib2hash", Inode = 1002 }
})
}
};
var ingestResponse = await client.PostAsJsonAsync("/api/v1/runtime/events", ingestRequest);
Assert.Equal(HttpStatusCode.Accepted, ingestResponse.StatusCode);
// Request reconciliation
var reconcileRequest = new RuntimeReconcileRequestDto
{
ImageDigest = TestImageDigest
};
var response = await client.PostAsJsonAsync("/api/v1/runtime/reconcile", reconcileRequest);
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
var payload = await response.Content.ReadFromJsonAsync<RuntimeReconcileResponseDto>();
Assert.NotNull(payload);
Assert.Null(payload!.ErrorCode);
Assert.Equal(2, payload.TotalRuntimeLibraries);
Assert.Equal(2, payload.TotalSbomComponents);
Assert.Equal(2, payload.MatchCount);
Assert.Equal(0, payload.MissCount);
Assert.Equal(2, payload.Matches.Count);
Assert.All(payload.Matches, m => Assert.Equal("sha256", m.MatchType));
}
[Fact]
public async Task ReconcileEndpoint_WithPathMatches_ReturnsMatches()
{
var mockObjectStore = new InMemoryArtifactObjectStore();
using var factory = new ScannerApplicationFactory(
configureServices: services =>
{
services.RemoveAll<IArtifactObjectStore>();
services.AddSingleton<IArtifactObjectStore>(mockObjectStore);
});
using var client = factory.CreateClient();
const string imageDigest = "sha256:pathtest123";
const string sbomArtifactId = "imagebom/sha256-sbomdigest-path";
const string sbomHash = "sha256:sbomdigest-path";
using (var scope = factory.Services.CreateScope())
{
var artifacts = scope.ServiceProvider.GetRequiredService<ArtifactRepository>();
var links = scope.ServiceProvider.GetRequiredService<LinkRepository>();
await artifacts.UpsertAsync(new ArtifactDocument
{
Id = sbomArtifactId,
Type = ArtifactDocumentType.ImageBom,
Format = ArtifactDocumentFormat.CycloneDxJson,
MediaType = "application/json",
BytesSha256 = sbomHash,
RefCount = 1
}, CancellationToken.None);
await links.UpsertAsync(new LinkDocument
{
Id = Guid.NewGuid().ToString("N"),
FromType = LinkSourceType.Image,
FromDigest = imageDigest,
ArtifactId = sbomArtifactId,
CreatedAtUtc = DateTime.UtcNow
}, CancellationToken.None);
}
// Create SBOM with paths but different hashes (path matching)
var sbom = CreateSbomWithComponents(new[]
{
("comp-1", "zlib", "1.2.11", "pkg:deb/debian/zlib@1.2.11", Array.Empty<string>(), new[] { "/usr/lib/libz.so.1" })
});
var sbomJson = await Serializer.SerializeAsync(sbom);
var sbomBytes = Encoding.UTF8.GetBytes(sbomJson);
mockObjectStore.Store($"scanner-artifacts/imagebom/cyclonedx-json/{sbomHash}", sbomBytes);
// Ingest runtime event - no hash, path match only
var ingestRequest = new RuntimeEventsIngestRequestDto
{
Events = new[]
{
CreateEnvelopeWithLibraries("evt-path-001", imageDigest, new[]
{
new RuntimeLoadedLibrary { Path = "/usr/lib/libz.so.1", Sha256 = null, Inode = 2001 }
})
}
};
var ingestResponse = await client.PostAsJsonAsync("/api/v1/runtime/events", ingestRequest);
Assert.Equal(HttpStatusCode.Accepted, ingestResponse.StatusCode);
var reconcileRequest = new RuntimeReconcileRequestDto
{
ImageDigest = imageDigest
};
var response = await client.PostAsJsonAsync("/api/v1/runtime/reconcile", reconcileRequest);
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
var payload = await response.Content.ReadFromJsonAsync<RuntimeReconcileResponseDto>();
Assert.NotNull(payload);
Assert.Null(payload!.ErrorCode);
Assert.Equal(1, payload.MatchCount);
Assert.Equal(0, payload.MissCount);
Assert.Single(payload.Matches);
Assert.Equal("path", payload.Matches[0].MatchType);
}
[Fact]
public async Task ReconcileEndpoint_WithSpecificEventId_UsesSpecifiedEvent()
{
var mockObjectStore = new InMemoryArtifactObjectStore();
using var factory = new ScannerApplicationFactory(
configureServices: services =>
{
services.RemoveAll<IArtifactObjectStore>();
services.AddSingleton<IArtifactObjectStore>(mockObjectStore);
});
using var client = factory.CreateClient();
const string imageDigest = "sha256:eventidtest";
const string sbomArtifactId = "imagebom/sha256-sbomdigest-eventid";
const string sbomHash = "sha256:sbomdigest-eventid";
using (var scope = factory.Services.CreateScope())
{
var artifacts = scope.ServiceProvider.GetRequiredService<ArtifactRepository>();
var links = scope.ServiceProvider.GetRequiredService<LinkRepository>();
await artifacts.UpsertAsync(new ArtifactDocument
{
Id = sbomArtifactId,
Type = ArtifactDocumentType.ImageBom,
Format = ArtifactDocumentFormat.CycloneDxJson,
MediaType = "application/json",
BytesSha256 = sbomHash,
RefCount = 1
}, CancellationToken.None);
await links.UpsertAsync(new LinkDocument
{
Id = Guid.NewGuid().ToString("N"),
FromType = LinkSourceType.Image,
FromDigest = imageDigest,
ArtifactId = sbomArtifactId,
CreatedAtUtc = DateTime.UtcNow
}, CancellationToken.None);
}
var sbom = CreateSbomWithComponents(new[]
{
("comp-1", "test-lib", "1.0.0", "pkg:test/lib@1.0.0", new[] { "specifichash" }, Array.Empty<string>())
});
var sbomJson = await Serializer.SerializeAsync(sbom);
var sbomBytes = Encoding.UTF8.GetBytes(sbomJson);
mockObjectStore.Store($"scanner-artifacts/imagebom/cyclonedx-json/{sbomHash}", sbomBytes);
// Ingest multiple events with different libraries
var ingestRequest = new RuntimeEventsIngestRequestDto
{
Events = new[]
{
CreateEnvelopeWithLibraries("evt-specific-001", imageDigest, new[]
{
new RuntimeLoadedLibrary { Path = "/lib/specific.so", Sha256 = "specifichash", Inode = 3001 }
}),
CreateEnvelopeWithLibraries("evt-specific-002", imageDigest, new[]
{
new RuntimeLoadedLibrary { Path = "/lib/other.so", Sha256 = "otherhash", Inode = 3002 }
})
}
};
var ingestResponse = await client.PostAsJsonAsync("/api/v1/runtime/events", ingestRequest);
Assert.Equal(HttpStatusCode.Accepted, ingestResponse.StatusCode);
// Request reconciliation for specific event (evt-specific-001 should match)
var reconcileRequest = new RuntimeReconcileRequestDto
{
ImageDigest = imageDigest,
RuntimeEventId = "evt-specific-001"
};
var response = await client.PostAsJsonAsync("/api/v1/runtime/reconcile", reconcileRequest);
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
var payload = await response.Content.ReadFromJsonAsync<RuntimeReconcileResponseDto>();
Assert.NotNull(payload);
Assert.Equal("evt-specific-001", payload!.RuntimeEventId);
Assert.Equal(1, payload.MatchCount);
Assert.Equal(0, payload.MissCount);
}
[Fact]
public async Task ReconcileEndpoint_WithNonExistentEventId_ReturnsNotFound()
{
using var factory = new ScannerApplicationFactory();
using var client = factory.CreateClient();
var request = new RuntimeReconcileRequestDto
{
ImageDigest = TestImageDigest,
RuntimeEventId = "non-existent-event-id"
};
var response = await client.PostAsJsonAsync("/api/v1/runtime/reconcile", request);
Assert.Equal(HttpStatusCode.NotFound, response.StatusCode);
var payload = await response.Content.ReadFromJsonAsync<RuntimeReconcileResponseDto>();
Assert.NotNull(payload);
Assert.Equal("RUNTIME_EVENT_NOT_FOUND", payload!.ErrorCode);
}
[Fact]
public async Task ReconcileEndpoint_WithMissingImageDigest_ReturnsBadRequest()
{
using var factory = new ScannerApplicationFactory();
using var client = factory.CreateClient();
var request = new RuntimeReconcileRequestDto
{
ImageDigest = ""
};
var response = await client.PostAsJsonAsync("/api/v1/runtime/reconcile", request);
Assert.Equal(HttpStatusCode.BadRequest, response.StatusCode);
}
[Fact]
public async Task ReconcileEndpoint_WithMixedMatchesAndMisses_ReturnsCorrectCounts()
{
var mockObjectStore = new InMemoryArtifactObjectStore();
using var factory = new ScannerApplicationFactory(
configureServices: services =>
{
services.RemoveAll<IArtifactObjectStore>();
services.AddSingleton<IArtifactObjectStore>(mockObjectStore);
});
using var client = factory.CreateClient();
const string imageDigest = "sha256:mixedtest";
const string sbomArtifactId = "imagebom/sha256-sbomdigest-mixed";
const string sbomHash = "sha256:sbomdigest-mixed";
using (var scope = factory.Services.CreateScope())
{
var artifacts = scope.ServiceProvider.GetRequiredService<ArtifactRepository>();
var links = scope.ServiceProvider.GetRequiredService<LinkRepository>();
await artifacts.UpsertAsync(new ArtifactDocument
{
Id = sbomArtifactId,
Type = ArtifactDocumentType.ImageBom,
Format = ArtifactDocumentFormat.CycloneDxJson,
MediaType = "application/json",
BytesSha256 = sbomHash,
RefCount = 1
}, CancellationToken.None);
await links.UpsertAsync(new LinkDocument
{
Id = Guid.NewGuid().ToString("N"),
FromType = LinkSourceType.Image,
FromDigest = imageDigest,
ArtifactId = sbomArtifactId,
CreatedAtUtc = DateTime.UtcNow
}, CancellationToken.None);
}
// SBOM has 2 components
var sbom = CreateSbomWithComponents(new[]
{
("comp-known-1", "known-lib", "1.0.0", "pkg:test/known@1.0.0", new[] { "knownhash1" }, new[] { "/lib/known.so" }),
("comp-known-2", "another-lib", "2.0.0", "pkg:test/another@2.0.0", new[] { "knownhash2" }, Array.Empty<string>())
});
var sbomJson = await Serializer.SerializeAsync(sbom);
var sbomBytes = Encoding.UTF8.GetBytes(sbomJson);
mockObjectStore.Store($"scanner-artifacts/imagebom/cyclonedx-json/{sbomHash}", sbomBytes);
// Runtime has 3 libraries: 1 hash match, 1 path match, 1 miss
var ingestRequest = new RuntimeEventsIngestRequestDto
{
Events = new[]
{
CreateEnvelopeWithLibraries("evt-mixed-001", imageDigest, new[]
{
new RuntimeLoadedLibrary { Path = "/lib/known.so", Sha256 = "knownhash1", Inode = 4001 }, // hash match
new RuntimeLoadedLibrary { Path = "/lib/unknown.so", Sha256 = "unknownhash", Inode = 4002 }, // miss
new RuntimeLoadedLibrary { Path = "/lib/another.so", Sha256 = "knownhash2", Inode = 4003 } // hash match
})
}
};
var ingestResponse = await client.PostAsJsonAsync("/api/v1/runtime/events", ingestRequest);
Assert.Equal(HttpStatusCode.Accepted, ingestResponse.StatusCode);
var reconcileRequest = new RuntimeReconcileRequestDto
{
ImageDigest = imageDigest
};
var response = await client.PostAsJsonAsync("/api/v1/runtime/reconcile", reconcileRequest);
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
var payload = await response.Content.ReadFromJsonAsync<RuntimeReconcileResponseDto>();
Assert.NotNull(payload);
Assert.Null(payload!.ErrorCode);
Assert.Equal(3, payload.TotalRuntimeLibraries);
Assert.Equal(2, payload.TotalSbomComponents);
Assert.Equal(2, payload.MatchCount);
Assert.Equal(1, payload.MissCount);
Assert.Single(payload.Misses);
Assert.Equal("/lib/unknown.so", payload.Misses[0].Path);
}
private static RuntimeEventEnvelope CreateEnvelopeWithLibraries(
string eventId,
string imageDigest,
RuntimeLoadedLibrary[] libraries)
{
var runtimeEvent = new RuntimeEvent
{
EventId = eventId,
When = DateTimeOffset.UtcNow,
Kind = RuntimeEventKind.ContainerStart,
Tenant = TestTenant,
Node = TestNode,
Runtime = new RuntimeEngine
{
Engine = "containerd",
Version = "1.7.0"
},
Workload = new RuntimeWorkload
{
Platform = "kubernetes",
Namespace = "default",
Pod = "test-pod",
Container = "test-container",
ContainerId = $"containerd://{eventId}",
ImageRef = $"ghcr.io/example/test@{imageDigest}"
},
Delta = new RuntimeDelta
{
BaselineImageDigest = imageDigest
},
Process = new RuntimeProcess
{
Pid = 1234,
Entrypoint = new[] { "/bin/start" },
EntryTrace = Array.Empty<RuntimeEntryTrace>()
},
LoadedLibraries = libraries
};
return RuntimeEventEnvelope.Create(runtimeEvent, ZastavaContractVersions.RuntimeEvent);
}
private static Bom CreateSbomWithComponents(
(string bomRef, string name, string version, string purl, string[] hashes, string[] paths)[] components)
{
var bom = new Bom
{
Version = 1,
SerialNumber = $"urn:uuid:{Guid.NewGuid()}",
Components = new List<Component>()
};
foreach (var (bomRef, name, version, purl, hashes, paths) in components)
{
var component = new Component
{
BomRef = bomRef,
Name = name,
Version = version,
Purl = purl,
Type = Component.Classification.Library,
Hashes = hashes.Select(h => new Hash
{
Alg = Hash.HashAlgorithm.SHA_256,
Content = h
}).ToList()
};
if (paths.Length > 0)
{
component.Evidence = new Evidence
{
Occurrences = paths.Select(p => new EvidenceOccurrence
{
Location = p
}).ToList()
};
}
bom.Components.Add(component);
}
return bom;
}
private sealed class InMemoryArtifactObjectStore : IArtifactObjectStore
{
private readonly Dictionary<string, byte[]> _store = new(StringComparer.OrdinalIgnoreCase);
public void Store(string key, byte[] content)
{
_store[key] = content;
}
public Task PutAsync(ArtifactObjectDescriptor descriptor, Stream content, CancellationToken cancellationToken)
{
using var ms = new MemoryStream();
content.CopyTo(ms);
_store[$"{descriptor.Bucket}/{descriptor.Key}"] = ms.ToArray();
return Task.CompletedTask;
}
public Task<Stream?> GetAsync(ArtifactObjectDescriptor descriptor, CancellationToken cancellationToken)
{
var key = $"{descriptor.Bucket}/{descriptor.Key}";
if (_store.TryGetValue(key, out var content))
{
return Task.FromResult<Stream?>(new MemoryStream(content));
}
return Task.FromResult<Stream?>(null);
}
public Task DeleteAsync(ArtifactObjectDescriptor descriptor, CancellationToken cancellationToken)
{
var key = $"{descriptor.Bucket}/{descriptor.Key}";
_store.Remove(key);
return Task.CompletedTask;
}
}
}

View File

@@ -39,6 +39,12 @@ public sealed class ZastavaRuntimeOptions
[Required]
public ZastavaAuthorityOptions Authority { get; set; } = new();
/// <summary>
/// Offline/air-gapped operation configuration.
/// </summary>
[Required]
public ZastavaOfflineOptions Offline { get; set; } = new();
}
public sealed class ZastavaRuntimeLoggingOptions
@@ -82,3 +88,62 @@ public sealed class ZastavaRuntimeMetricsOptions
/// </summary>
public IDictionary<string, string> CommonTags { get; init; } = new Dictionary<string, string>(StringComparer.Ordinal);
}
/// <summary>
/// Offline/air-gapped operation configuration for Zastava components.
/// Controls network access restrictions for secure, disconnected deployments.
/// </summary>
public sealed class ZastavaOfflineOptions
{
/// <summary>
/// Enable strict offline mode. When true, any HTTP request to an external host
/// (not in <see cref="AllowedHosts"/>) will throw an exception at request time.
/// Default: false.
/// </summary>
public bool StrictMode { get; init; }
/// <summary>
/// Require Surface.FS cache to be available and populated at startup.
/// When true, the component will fail startup if the cache directory is missing
/// or empty. Used with <see cref="StrictMode"/> for fully air-gapped deployments.
/// Default: false.
/// </summary>
public bool RequireSurfaceCache { get; init; }
/// <summary>
/// Path to the Surface.FS cache directory containing pre-fetched vulnerability data.
/// Required when <see cref="RequireSurfaceCache"/> is true.
/// </summary>
public string? SurfaceCachePath { get; init; }
/// <summary>
/// Minimum number of cache entries required when <see cref="RequireSurfaceCache"/> is true.
/// Ensures the cache has been properly populated before starting.
/// Default: 1.
/// </summary>
[Range(1, int.MaxValue)]
public int MinimumCacheEntries { get; init; } = 1;
/// <summary>
/// Maximum age (in hours) of cache entries before they are considered stale.
/// When <see cref="StrictMode"/> is true and all entries exceed this age,
/// a warning is emitted but operation continues.
/// Default: 168 (7 days).
/// </summary>
[Range(1, 8760)]
public int MaxCacheAgeHours { get; init; } = 168;
/// <summary>
/// List of hostnames explicitly allowed for network access in strict mode.
/// Supports exact matches and wildcard prefixes (e.g., "*.internal.corp").
/// Localhost (127.0.0.1, ::1, localhost) is always implicitly allowed.
/// </summary>
public IList<string> AllowedHosts { get; init; } = new List<string>();
/// <summary>
/// When true, emits detailed logs for each blocked network request.
/// Useful for auditing network access patterns during initial deployment.
/// Default: false.
/// </summary>
public bool LogBlockedRequests { get; init; }
}

View File

@@ -7,7 +7,9 @@ using Microsoft.Extensions.Options;
using StellaOps.Auth.Client;
using StellaOps.Zastava.Core.Configuration;
using StellaOps.Zastava.Core.Diagnostics;
using StellaOps.Zastava.Core.Http;
using StellaOps.Zastava.Core.Security;
using StellaOps.Zastava.Core.Validation;
namespace Microsoft.Extensions.DependencyInjection;
@@ -45,9 +47,27 @@ public static class ZastavaServiceCollectionExtensions
ConfigureAuthorityServices(services, configuration);
services.TryAddSingleton<IZastavaAuthorityTokenProvider, ZastavaAuthorityTokenProvider>();
// Register offline strict mode handler for HttpClientFactory
services.TryAddTransient<OfflineStrictModeHandler>();
// Register Surface.FS cache validator as hosted service
// This validates cache availability at startup when RequireSurfaceCache is enabled
services.AddHostedService<SurfaceCacheValidator>();
return services;
}
/// <summary>
/// Adds the offline strict mode handler to an HttpClient configuration.
/// When <see cref="ZastavaOfflineOptions.StrictMode"/> is enabled, requests to
/// hosts not in the allowlist will be blocked.
/// </summary>
public static IHttpClientBuilder AddOfflineStrictModeHandler(this IHttpClientBuilder builder)
{
ArgumentNullException.ThrowIfNull(builder);
return builder.AddHttpMessageHandler<OfflineStrictModeHandler>();
}
private static void ConfigureAuthorityServices(IServiceCollection services, IConfiguration configuration)
{
var authoritySection = configuration.GetSection($"{ZastavaRuntimeOptions.SectionName}:authority");

View File

@@ -0,0 +1,147 @@
using System.Net;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using StellaOps.Zastava.Core.Configuration;
namespace StellaOps.Zastava.Core.Http;
/// <summary>
/// HTTP delegating handler that enforces strict offline mode.
/// When <see cref="ZastavaOfflineOptions.StrictMode"/> is enabled, requests to
/// hosts not in the allowlist will be rejected with an exception.
/// </summary>
public sealed class OfflineStrictModeHandler : DelegatingHandler
{
private readonly IOptionsMonitor<ZastavaRuntimeOptions> _optionsMonitor;
private readonly ILogger<OfflineStrictModeHandler> _logger;
// Implicitly allowed local hosts
private static readonly HashSet<string> ImplicitlyAllowedHosts = new(StringComparer.OrdinalIgnoreCase)
{
"localhost",
"127.0.0.1",
"::1",
"[::1]"
};
public OfflineStrictModeHandler(
IOptionsMonitor<ZastavaRuntimeOptions> optionsMonitor,
ILogger<OfflineStrictModeHandler> logger)
{
_optionsMonitor = optionsMonitor ?? throw new ArgumentNullException(nameof(optionsMonitor));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
}
protected override async Task<HttpResponseMessage> SendAsync(
HttpRequestMessage request,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(request);
var options = _optionsMonitor.CurrentValue.Offline;
// If strict mode is not enabled, pass through
if (!options.StrictMode)
{
return await base.SendAsync(request, cancellationToken).ConfigureAwait(false);
}
var requestUri = request.RequestUri;
if (requestUri is null)
{
throw new OfflineStrictModeException("Request URI is null - cannot validate against offline strict mode.");
}
var host = requestUri.Host;
// Check if host is allowed
if (!IsHostAllowed(host, options))
{
if (options.LogBlockedRequests)
{
_logger.LogWarning(
"Offline strict mode blocked request to {Host}{Path} (Method: {Method})",
host,
requestUri.PathAndQuery,
request.Method);
}
throw new OfflineStrictModeException(
$"Offline strict mode is enabled. Request to external host '{host}' is not allowed. " +
$"Add the host to zastava:runtime:offline:allowedHosts or disable strict mode.");
}
return await base.SendAsync(request, cancellationToken).ConfigureAwait(false);
}
private static bool IsHostAllowed(string host, ZastavaOfflineOptions options)
{
// Implicitly allowed hosts (localhost, loopback)
if (ImplicitlyAllowedHosts.Contains(host))
{
return true;
}
// Check for loopback IP patterns
if (host.StartsWith("127.", StringComparison.Ordinal) ||
host.StartsWith("[::ffff:127.", StringComparison.Ordinal))
{
return true;
}
// Check explicit allowlist
if (options.AllowedHosts.Count == 0)
{
return false;
}
foreach (var allowedHost in options.AllowedHosts)
{
if (MatchesHost(host, allowedHost))
{
return true;
}
}
return false;
}
private static bool MatchesHost(string host, string pattern)
{
if (string.IsNullOrWhiteSpace(pattern))
{
return false;
}
// Exact match
if (string.Equals(host, pattern, StringComparison.OrdinalIgnoreCase))
{
return true;
}
// Wildcard prefix match (e.g., "*.internal.corp")
if (pattern.StartsWith("*.", StringComparison.Ordinal))
{
var suffix = pattern.Substring(1); // ".internal.corp"
return host.EndsWith(suffix, StringComparison.OrdinalIgnoreCase) ||
string.Equals(host, pattern.Substring(2), StringComparison.OrdinalIgnoreCase);
}
return false;
}
}
/// <summary>
/// Exception thrown when a network request is blocked by offline strict mode.
/// </summary>
public sealed class OfflineStrictModeException : InvalidOperationException
{
public OfflineStrictModeException(string message) : base(message)
{
}
public OfflineStrictModeException(string message, Exception innerException)
: base(message, innerException)
{
}
}

View File

@@ -0,0 +1,185 @@
using System.IO;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using StellaOps.Zastava.Core.Configuration;
namespace StellaOps.Zastava.Core.Validation;
/// <summary>
/// Startup validator that ensures Surface.FS cache is available and populated
/// when <see cref="ZastavaOfflineOptions.RequireSurfaceCache"/> is enabled.
/// </summary>
public sealed class SurfaceCacheValidator : IHostedService
{
private readonly IOptionsMonitor<ZastavaRuntimeOptions> _optionsMonitor;
private readonly ILogger<SurfaceCacheValidator> _logger;
private readonly TimeProvider _timeProvider;
public SurfaceCacheValidator(
IOptionsMonitor<ZastavaRuntimeOptions> optionsMonitor,
ILogger<SurfaceCacheValidator> logger,
TimeProvider? timeProvider = null)
{
_optionsMonitor = optionsMonitor ?? throw new ArgumentNullException(nameof(optionsMonitor));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
_timeProvider = timeProvider ?? TimeProvider.System;
}
public Task StartAsync(CancellationToken cancellationToken)
{
var options = _optionsMonitor.CurrentValue.Offline;
// Skip validation if RequireSurfaceCache is not enabled
if (!options.RequireSurfaceCache)
{
_logger.LogDebug("Surface.FS cache validation skipped (RequireSurfaceCache=false)");
return Task.CompletedTask;
}
ValidateCache(options);
return Task.CompletedTask;
}
public Task StopAsync(CancellationToken cancellationToken) => Task.CompletedTask;
private void ValidateCache(ZastavaOfflineOptions options)
{
var cachePath = options.SurfaceCachePath;
// Validate path is configured
if (string.IsNullOrWhiteSpace(cachePath))
{
throw new SurfaceCacheValidationException(
"Surface.FS cache path is required when RequireSurfaceCache is enabled. " +
"Set zastava:runtime:offline:surfaceCachePath in configuration.");
}
// Validate directory exists
if (!Directory.Exists(cachePath))
{
throw new SurfaceCacheValidationException(
$"Surface.FS cache directory does not exist: '{cachePath}'. " +
"Ensure the cache has been populated before starting in offline mode.");
}
// Count cache entries (files in the directory, excluding metadata files)
var cacheEntries = GetCacheEntries(cachePath).ToList();
var entryCount = cacheEntries.Count;
if (entryCount < options.MinimumCacheEntries)
{
throw new SurfaceCacheValidationException(
$"Surface.FS cache has {entryCount} entries, but {options.MinimumCacheEntries} are required. " +
"Populate the cache before starting in offline mode.");
}
_logger.LogInformation(
"Surface.FS cache validated: {EntryCount} entries found in {CachePath}",
entryCount,
cachePath);
// Check for stale cache entries
CheckCacheStaleness(cacheEntries, options);
}
private void CheckCacheStaleness(IReadOnlyList<CacheEntry> entries, ZastavaOfflineOptions options)
{
var now = _timeProvider.GetUtcNow();
var maxAge = TimeSpan.FromHours(options.MaxCacheAgeHours);
var staleThreshold = now - maxAge;
var staleCount = entries.Count(e => e.LastModified < staleThreshold);
var freshCount = entries.Count - staleCount;
if (staleCount > 0)
{
var oldestEntry = entries.OrderBy(e => e.LastModified).FirstOrDefault();
var oldestAge = oldestEntry is not null ? now - oldestEntry.LastModified : TimeSpan.Zero;
if (freshCount == 0)
{
// All entries are stale - warn but continue
_logger.LogWarning(
"All {StaleCount} Surface.FS cache entries are older than {MaxAge} hours. " +
"Oldest entry is {OldestAge:N1} hours old. " +
"Consider refreshing the cache for up-to-date vulnerability data.",
staleCount,
options.MaxCacheAgeHours,
oldestAge.TotalHours);
}
else
{
// Some entries are stale
_logger.LogInformation(
"Surface.FS cache status: {FreshCount} fresh, {StaleCount} stale " +
"(threshold: {MaxAge} hours)",
freshCount,
staleCount,
options.MaxCacheAgeHours);
}
}
else
{
_logger.LogDebug(
"All {EntryCount} Surface.FS cache entries are within the {MaxAge} hour threshold",
entries.Count,
options.MaxCacheAgeHours);
}
}
private static IEnumerable<CacheEntry> GetCacheEntries(string cachePath)
{
// Cache entries are typically .json, .json.gz, or .ndjson files
// Exclude metadata files like .manifest, .index, .lock
var metadataExtensions = new HashSet<string>(StringComparer.OrdinalIgnoreCase)
{
".manifest",
".index",
".lock",
".tmp",
".partial"
};
foreach (var file in Directory.EnumerateFiles(cachePath, "*", SearchOption.AllDirectories))
{
var extension = Path.GetExtension(file);
// Skip metadata files
if (metadataExtensions.Contains(extension))
{
continue;
}
// Skip hidden files
var fileName = Path.GetFileName(file);
if (fileName.StartsWith('.'))
{
continue;
}
var info = new FileInfo(file);
if (info.Length > 0) // Skip empty files
{
yield return new CacheEntry(file, info.LastWriteTimeUtc);
}
}
}
private readonly record struct CacheEntry(string Path, DateTimeOffset LastModified);
}
/// <summary>
/// Exception thrown when Surface.FS cache validation fails at startup.
/// </summary>
public sealed class SurfaceCacheValidationException : InvalidOperationException
{
public SurfaceCacheValidationException(string message) : base(message)
{
}
public SurfaceCacheValidationException(string message, Exception innerException)
: base(message, innerException)
{
}
}

View File

@@ -0,0 +1,428 @@
using System.IO;
using System.Net;
using System.Net.Http;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using Microsoft.Extensions.Options;
using StellaOps.Zastava.Core.Configuration;
using StellaOps.Zastava.Core.Http;
using StellaOps.Zastava.Core.Validation;
namespace StellaOps.Zastava.Core.Tests.Validation;
public sealed class OfflineStrictModeTests : IDisposable
{
private readonly string _tempCachePath;
public OfflineStrictModeTests()
{
_tempCachePath = Path.Combine(Path.GetTempPath(), "zastava-test-cache-" + Guid.NewGuid().ToString("N")[..8]);
}
public void Dispose()
{
if (Directory.Exists(_tempCachePath))
{
Directory.Delete(_tempCachePath, recursive: true);
}
}
#region OfflineStrictModeHandler Tests
[Fact]
public async Task OfflineStrictModeHandler_WhenDisabled_AllowsAnyRequest()
{
// Arrange
var options = CreateOptions(strictMode: false);
var handler = CreateHandler(options);
handler.InnerHandler = new TestHttpMessageHandler();
using var client = new HttpClient(handler);
// Act
var response = await client.GetAsync("https://external.example.com/api/data");
// Assert
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
}
[Fact]
public async Task OfflineStrictModeHandler_WhenEnabled_BlocksExternalHost()
{
// Arrange
var options = CreateOptions(strictMode: true);
var handler = CreateHandler(options);
handler.InnerHandler = new TestHttpMessageHandler();
using var client = new HttpClient(handler);
// Act & Assert
var exception = await Assert.ThrowsAsync<OfflineStrictModeException>(
() => client.GetAsync("https://external.example.com/api/data"));
Assert.Contains("external.example.com", exception.Message);
Assert.Contains("offline strict mode", exception.Message.ToLowerInvariant());
}
[Fact]
public async Task OfflineStrictModeHandler_WhenEnabled_AllowsLocalhost()
{
// Arrange
var options = CreateOptions(strictMode: true);
var handler = CreateHandler(options);
handler.InnerHandler = new TestHttpMessageHandler();
using var client = new HttpClient(handler);
// Act - localhost should be implicitly allowed
var response = await client.GetAsync("http://localhost:8080/api/health");
// Assert
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
}
[Fact]
public async Task OfflineStrictModeHandler_WhenEnabled_AllowsLoopbackIp()
{
// Arrange
var options = CreateOptions(strictMode: true);
var handler = CreateHandler(options);
handler.InnerHandler = new TestHttpMessageHandler();
using var client = new HttpClient(handler);
// Act - 127.0.0.1 should be implicitly allowed
var response = await client.GetAsync("http://127.0.0.1:8080/api/health");
// Assert
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
}
[Fact]
public async Task OfflineStrictModeHandler_WhenEnabled_AllowsExplicitlyAllowedHost()
{
// Arrange
var options = CreateOptions(
strictMode: true,
allowedHosts: ["scanner.internal", "backend.corp"]);
var handler = CreateHandler(options);
handler.InnerHandler = new TestHttpMessageHandler();
using var client = new HttpClient(handler);
// Act
var response = await client.GetAsync("https://scanner.internal/api/events");
// Assert
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
}
[Fact]
public async Task OfflineStrictModeHandler_WhenEnabled_SupportsWildcardHost()
{
// Arrange
var options = CreateOptions(
strictMode: true,
allowedHosts: ["*.internal.corp"]);
var handler = CreateHandler(options);
handler.InnerHandler = new TestHttpMessageHandler();
using var client = new HttpClient(handler);
// Act - subdomain matching
var response = await client.GetAsync("https://scanner.internal.corp/api/events");
// Assert
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
}
[Fact]
public async Task OfflineStrictModeHandler_WhenEnabled_BlocksNonMatchingWildcard()
{
// Arrange
var options = CreateOptions(
strictMode: true,
allowedHosts: ["*.internal.corp"]);
var handler = CreateHandler(options);
handler.InnerHandler = new TestHttpMessageHandler();
using var client = new HttpClient(handler);
// Act & Assert - different domain should be blocked
var exception = await Assert.ThrowsAsync<OfflineStrictModeException>(
() => client.GetAsync("https://scanner.external.com/api/events"));
Assert.Contains("scanner.external.com", exception.Message);
}
#endregion
#region SurfaceCacheValidator Tests
[Fact]
public async Task SurfaceCacheValidator_WhenRequireCacheDisabled_SkipsValidation()
{
// Arrange
var options = CreateOptions(requireSurfaceCache: false);
var validator = CreateValidator(options);
// Act & Assert - should complete without exception
await validator.StartAsync(CancellationToken.None);
}
[Fact]
public async Task SurfaceCacheValidator_WhenPathNotConfigured_ThrowsException()
{
// Arrange
var options = CreateOptions(requireSurfaceCache: true, surfaceCachePath: null);
var validator = CreateValidator(options);
// Act & Assert
var exception = await Assert.ThrowsAsync<SurfaceCacheValidationException>(
() => validator.StartAsync(CancellationToken.None));
Assert.Contains("path is required", exception.Message.ToLowerInvariant());
}
[Fact]
public async Task SurfaceCacheValidator_WhenDirectoryMissing_ThrowsException()
{
// Arrange
var options = CreateOptions(
requireSurfaceCache: true,
surfaceCachePath: "/nonexistent/path/to/cache");
var validator = CreateValidator(options);
// Act & Assert
var exception = await Assert.ThrowsAsync<SurfaceCacheValidationException>(
() => validator.StartAsync(CancellationToken.None));
Assert.Contains("does not exist", exception.Message.ToLowerInvariant());
}
[Fact]
public async Task SurfaceCacheValidator_WhenCacheEmpty_ThrowsException()
{
// Arrange
Directory.CreateDirectory(_tempCachePath);
var options = CreateOptions(
requireSurfaceCache: true,
surfaceCachePath: _tempCachePath,
minimumCacheEntries: 1);
var validator = CreateValidator(options);
// Act & Assert
var exception = await Assert.ThrowsAsync<SurfaceCacheValidationException>(
() => validator.StartAsync(CancellationToken.None));
Assert.Contains("0 entries", exception.Message);
Assert.Contains("1 are required", exception.Message);
}
[Fact]
public async Task SurfaceCacheValidator_WhenBelowMinimumEntries_ThrowsException()
{
// Arrange
Directory.CreateDirectory(_tempCachePath);
File.WriteAllText(Path.Combine(_tempCachePath, "entry1.json"), "{}");
File.WriteAllText(Path.Combine(_tempCachePath, "entry2.json"), "{}");
var options = CreateOptions(
requireSurfaceCache: true,
surfaceCachePath: _tempCachePath,
minimumCacheEntries: 5);
var validator = CreateValidator(options);
// Act & Assert
var exception = await Assert.ThrowsAsync<SurfaceCacheValidationException>(
() => validator.StartAsync(CancellationToken.None));
Assert.Contains("2 entries", exception.Message);
Assert.Contains("5 are required", exception.Message);
}
[Fact]
public async Task SurfaceCacheValidator_WhenSufficientEntries_Succeeds()
{
// Arrange
Directory.CreateDirectory(_tempCachePath);
File.WriteAllText(Path.Combine(_tempCachePath, "entry1.json"), "{}");
File.WriteAllText(Path.Combine(_tempCachePath, "entry2.json"), "{}");
File.WriteAllText(Path.Combine(_tempCachePath, "entry3.json"), "{}");
var options = CreateOptions(
requireSurfaceCache: true,
surfaceCachePath: _tempCachePath,
minimumCacheEntries: 3);
var validator = CreateValidator(options);
// Act & Assert - should complete without exception
await validator.StartAsync(CancellationToken.None);
}
[Fact]
public async Task SurfaceCacheValidator_IgnoresMetadataFiles()
{
// Arrange
Directory.CreateDirectory(_tempCachePath);
File.WriteAllText(Path.Combine(_tempCachePath, "entry1.json"), "{}");
File.WriteAllText(Path.Combine(_tempCachePath, ".manifest"), "metadata");
File.WriteAllText(Path.Combine(_tempCachePath, "data.index"), "index");
File.WriteAllText(Path.Combine(_tempCachePath, ".lock"), "lock");
var options = CreateOptions(
requireSurfaceCache: true,
surfaceCachePath: _tempCachePath,
minimumCacheEntries: 1);
var validator = CreateValidator(options);
// Act & Assert - should succeed with only 1 valid entry
await validator.StartAsync(CancellationToken.None);
}
[Fact]
public async Task SurfaceCacheValidator_IgnoresEmptyFiles()
{
// Arrange
Directory.CreateDirectory(_tempCachePath);
File.WriteAllText(Path.Combine(_tempCachePath, "entry1.json"), "{}");
File.WriteAllText(Path.Combine(_tempCachePath, "empty.json"), ""); // Empty file
var options = CreateOptions(
requireSurfaceCache: true,
surfaceCachePath: _tempCachePath,
minimumCacheEntries: 2);
var validator = CreateValidator(options);
// Act & Assert - should fail as only 1 non-empty file
var exception = await Assert.ThrowsAsync<SurfaceCacheValidationException>(
() => validator.StartAsync(CancellationToken.None));
Assert.Contains("1 entries", exception.Message);
}
#endregion
#region Integration Tests
[Fact]
public void FullOfflineConfiguration_ValidatesCorrectly()
{
// Arrange
Directory.CreateDirectory(_tempCachePath);
File.WriteAllText(Path.Combine(_tempCachePath, "vuln-data.json"), "{\"version\":1}");
var configuration = new ConfigurationBuilder()
.AddInMemoryCollection(new Dictionary<string, string?>
{
["zastava:runtime:tenant"] = "offline-tenant",
["zastava:runtime:environment"] = "airgap",
["zastava:runtime:offline:strictMode"] = "true",
["zastava:runtime:offline:requireSurfaceCache"] = "true",
["zastava:runtime:offline:surfaceCachePath"] = _tempCachePath,
["zastava:runtime:offline:minimumCacheEntries"] = "1",
["zastava:runtime:offline:maxCacheAgeHours"] = "168",
["zastava:runtime:offline:allowedHosts:0"] = "localhost",
["zastava:runtime:offline:allowedHosts:1"] = "*.internal.corp",
["zastava:runtime:offline:logBlockedRequests"] = "true"
})
.Build();
var services = new ServiceCollection();
services.AddLogging();
services.AddZastavaRuntimeCore(configuration, componentName: "observer");
using var provider = services.BuildServiceProvider();
// Act
var options = provider.GetRequiredService<IOptions<ZastavaRuntimeOptions>>().Value;
// Assert
Assert.True(options.Offline.StrictMode);
Assert.True(options.Offline.RequireSurfaceCache);
Assert.Equal(_tempCachePath, options.Offline.SurfaceCachePath);
Assert.Equal(1, options.Offline.MinimumCacheEntries);
Assert.Equal(168, options.Offline.MaxCacheAgeHours);
Assert.True(options.Offline.LogBlockedRequests);
Assert.Equal(2, options.Offline.AllowedHosts.Count);
Assert.Contains("localhost", options.Offline.AllowedHosts);
Assert.Contains("*.internal.corp", options.Offline.AllowedHosts);
}
#endregion
#region Helpers
private static IOptionsMonitor<ZastavaRuntimeOptions> CreateOptions(
bool strictMode = false,
bool requireSurfaceCache = false,
string? surfaceCachePath = null,
int minimumCacheEntries = 1,
int maxCacheAgeHours = 168,
bool logBlockedRequests = false,
string[]? allowedHosts = null)
{
var options = new ZastavaRuntimeOptions
{
Tenant = "test-tenant",
Environment = "test",
Offline = new ZastavaOfflineOptions
{
StrictMode = strictMode,
RequireSurfaceCache = requireSurfaceCache,
SurfaceCachePath = surfaceCachePath,
MinimumCacheEntries = minimumCacheEntries,
MaxCacheAgeHours = maxCacheAgeHours,
LogBlockedRequests = logBlockedRequests,
AllowedHosts = allowedHosts?.ToList() ?? new List<string>()
}
};
return new TestOptionsMonitor<ZastavaRuntimeOptions>(options);
}
private static OfflineStrictModeHandler CreateHandler(IOptionsMonitor<ZastavaRuntimeOptions> options)
{
return new OfflineStrictModeHandler(
options,
NullLogger<OfflineStrictModeHandler>.Instance);
}
private static SurfaceCacheValidator CreateValidator(IOptionsMonitor<ZastavaRuntimeOptions> options)
{
return new SurfaceCacheValidator(
options,
NullLogger<SurfaceCacheValidator>.Instance);
}
private sealed class TestOptionsMonitor<T> : IOptionsMonitor<T>
{
public TestOptionsMonitor(T currentValue)
{
CurrentValue = currentValue;
}
public T CurrentValue { get; }
public T Get(string? name) => CurrentValue;
public IDisposable? OnChange(Action<T, string?> listener) => null;
}
private sealed class TestHttpMessageHandler : HttpMessageHandler
{
protected override Task<HttpResponseMessage> SendAsync(
HttpRequestMessage request,
CancellationToken cancellationToken)
{
return Task.FromResult(new HttpResponseMessage(HttpStatusCode.OK)
{
Content = new StringContent("{\"status\":\"ok\"}")
});
}
}
#endregion
}

View File

@@ -0,0 +1,393 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging.Abstractions;
using StellaOps.Zastava.Observer.ContainerRuntime.Windows;
using Xunit;
namespace StellaOps.Zastava.Observer.Tests.ContainerRuntime.Windows;
public sealed class WindowsContainerRuntimeTests
{
[Fact]
public void WindowsContainerInfo_RequiredProperties_AreSet()
{
var container = new WindowsContainerInfo
{
Id = "abc123",
Name = "test-container",
ImageRef = "mcr.microsoft.com/windows/servercore:ltsc2022"
};
Assert.Equal("abc123", container.Id);
Assert.Equal("test-container", container.Name);
Assert.Equal("mcr.microsoft.com/windows/servercore:ltsc2022", container.ImageRef);
Assert.Equal(WindowsContainerState.Unknown, container.State);
Assert.Equal("windows", container.RuntimeType);
Assert.Empty(container.Command);
Assert.Empty(container.Labels);
}
[Fact]
public void WindowsContainerInfo_WithKubernetesOwner_HasOwnerSet()
{
var container = new WindowsContainerInfo
{
Id = "def456",
Name = "k8s_container_pod",
Owner = new WindowsContainerOwner
{
Kind = "Pod",
Name = "my-pod",
Namespace = "default"
}
};
Assert.NotNull(container.Owner);
Assert.Equal("Pod", container.Owner.Kind);
Assert.Equal("my-pod", container.Owner.Name);
Assert.Equal("default", container.Owner.Namespace);
}
[Fact]
public void WindowsContainerInfo_HyperVContainer_HasIsolationFlag()
{
var container = new WindowsContainerInfo
{
Id = "hyperv123",
Name = "hyperv-container",
HyperVIsolated = true,
RuntimeType = "hyperv"
};
Assert.True(container.HyperVIsolated);
Assert.Equal("hyperv", container.RuntimeType);
}
[Fact]
public void WindowsContainerEvent_RequiredProperties_AreSet()
{
var timestamp = DateTimeOffset.UtcNow;
var evt = new WindowsContainerEvent
{
Type = WindowsContainerEventType.ContainerStarted,
ContainerId = "xyz789",
ContainerName = "started-container",
ImageRef = "myimage:latest",
Timestamp = timestamp,
Data = new Dictionary<string, string>
{
["exitCode"] = "0"
}
};
Assert.Equal(WindowsContainerEventType.ContainerStarted, evt.Type);
Assert.Equal("xyz789", evt.ContainerId);
Assert.Equal("started-container", evt.ContainerName);
Assert.Equal("myimage:latest", evt.ImageRef);
Assert.Equal(timestamp, evt.Timestamp);
Assert.NotNull(evt.Data);
Assert.Equal("0", evt.Data["exitCode"]);
}
[Theory]
[InlineData(WindowsContainerEventType.ContainerCreated)]
[InlineData(WindowsContainerEventType.ContainerStarted)]
[InlineData(WindowsContainerEventType.ContainerStopped)]
[InlineData(WindowsContainerEventType.ContainerDeleted)]
[InlineData(WindowsContainerEventType.ProcessStarted)]
[InlineData(WindowsContainerEventType.ProcessExited)]
public void WindowsContainerEventType_AllValues_AreDefined(WindowsContainerEventType eventType)
{
var evt = new WindowsContainerEvent
{
Type = eventType,
ContainerId = "test",
Timestamp = DateTimeOffset.UtcNow
};
Assert.Equal(eventType, evt.Type);
}
[Fact]
public void WindowsRuntimeIdentity_RequiredProperties_AreSet()
{
var identity = new WindowsRuntimeIdentity
{
RuntimeName = "docker",
RuntimeVersion = "20.10.21",
OsVersion = "10.0.20348",
OsBuild = 20348,
HyperVAvailable = true
};
Assert.Equal("docker", identity.RuntimeName);
Assert.Equal("20.10.21", identity.RuntimeVersion);
Assert.Equal("10.0.20348", identity.OsVersion);
Assert.Equal(20348, identity.OsBuild);
Assert.True(identity.HyperVAvailable);
}
[Theory]
[InlineData(WindowsContainerState.Unknown)]
[InlineData(WindowsContainerState.Created)]
[InlineData(WindowsContainerState.Running)]
[InlineData(WindowsContainerState.Paused)]
[InlineData(WindowsContainerState.Stopped)]
public void WindowsContainerState_AllValues_AreDefined(WindowsContainerState state)
{
var container = new WindowsContainerInfo
{
Id = "test",
Name = "test",
State = state
};
Assert.Equal(state, container.State);
}
[Fact]
public void WindowsContainerInfo_WithTimestamps_TracksLifecycle()
{
var createdAt = DateTimeOffset.UtcNow.AddMinutes(-10);
var startedAt = DateTimeOffset.UtcNow.AddMinutes(-9);
var finishedAt = DateTimeOffset.UtcNow;
var container = new WindowsContainerInfo
{
Id = "lifecycle-test",
Name = "lifecycle-container",
State = WindowsContainerState.Stopped,
CreatedAt = createdAt,
StartedAt = startedAt,
FinishedAt = finishedAt,
ExitCode = 0
};
Assert.Equal(createdAt, container.CreatedAt);
Assert.Equal(startedAt, container.StartedAt);
Assert.Equal(finishedAt, container.FinishedAt);
Assert.Equal(0, container.ExitCode);
Assert.True(container.StartedAt > container.CreatedAt);
Assert.True(container.FinishedAt > container.StartedAt);
}
[Fact]
public void WindowsContainerInfo_WithLabels_CanBeEnumerated()
{
var labels = new Dictionary<string, string>
{
["io.kubernetes.pod.name"] = "my-pod",
["io.kubernetes.pod.namespace"] = "default",
["app"] = "test-app"
};
var container = new WindowsContainerInfo
{
Id = "labeled",
Name = "labeled-container",
Labels = labels
};
Assert.Equal(3, container.Labels.Count);
Assert.Equal("my-pod", container.Labels["io.kubernetes.pod.name"]);
Assert.Equal("default", container.Labels["io.kubernetes.pod.namespace"]);
Assert.Equal("test-app", container.Labels["app"]);
}
[Fact]
public void WindowsContainerInfo_WithCommand_HasEntrypoint()
{
var command = new[] { "powershell.exe", "-Command", "Get-Process" };
var container = new WindowsContainerInfo
{
Id = "cmd",
Name = "cmd-container",
Command = command
};
Assert.Equal(3, container.Command.Count);
Assert.Equal("powershell.exe", container.Command[0]);
Assert.Contains("-Command", container.Command);
}
}
/// <summary>
/// Integration tests that require Windows and Docker Windows containers.
/// These tests are skipped on non-Windows platforms.
/// </summary>
[Collection("WindowsIntegration")]
public sealed class WindowsContainerRuntimeIntegrationTests
{
private static bool IsWindowsWithDocker =>
RuntimeInformation.IsOSPlatform(OSPlatform.Windows) &&
Environment.GetEnvironmentVariable("ZASTAVA_WINDOWS_INTEGRATION_TESTS") == "true";
[SkippableFact]
public async Task WindowsLibraryHashCollector_CollectCurrentProcess_ReturnsModules()
{
Skip.IfNot(RuntimeInformation.IsOSPlatform(OSPlatform.Windows), "Windows-only test");
var collector = new WindowsLibraryHashCollector(NullLogger<WindowsLibraryHashCollector>.Instance);
var processId = Environment.ProcessId;
var libraries = await collector.CollectAsync(processId, CancellationToken.None);
// Current process should have at least some loaded modules
Assert.NotEmpty(libraries);
// Should include the main process executable
var hasExe = libraries.Any(lib => lib.Path.EndsWith(".exe", StringComparison.OrdinalIgnoreCase));
Assert.True(hasExe, "Should include at least one .exe module");
// All libraries should have paths
Assert.All(libraries, lib => Assert.False(string.IsNullOrWhiteSpace(lib.Path)));
}
[SkippableFact]
public async Task WindowsLibraryHashCollector_WithMaxLimit_RespectsLimit()
{
Skip.IfNot(RuntimeInformation.IsOSPlatform(OSPlatform.Windows), "Windows-only test");
var collector = new WindowsLibraryHashCollector(
NullLogger<WindowsLibraryHashCollector>.Instance,
maxLibraries: 5);
var processId = Environment.ProcessId;
var libraries = await collector.CollectAsync(processId, CancellationToken.None);
Assert.True(libraries.Count <= 5, "Should respect maxLibraries limit");
}
[SkippableFact]
public async Task WindowsLibraryHashCollector_InvalidProcessId_ReturnsEmptyList()
{
Skip.IfNot(RuntimeInformation.IsOSPlatform(OSPlatform.Windows), "Windows-only test");
var collector = new WindowsLibraryHashCollector(NullLogger<WindowsLibraryHashCollector>.Instance);
// Use an invalid process ID
var libraries = await collector.CollectAsync(int.MaxValue, CancellationToken.None);
Assert.Empty(libraries);
}
[SkippableFact]
public async Task WindowsLibraryHashCollector_ComputesHashes_WhenFilesAccessible()
{
Skip.IfNot(RuntimeInformation.IsOSPlatform(OSPlatform.Windows), "Windows-only test");
var collector = new WindowsLibraryHashCollector(
NullLogger<WindowsLibraryHashCollector>.Instance,
maxLibraries: 10,
maxFileBytes: 100_000_000);
var processId = Environment.ProcessId;
var libraries = await collector.CollectAsync(processId, CancellationToken.None);
// At least some libraries should have hashes (system DLLs should be accessible)
var librariesWithHashes = libraries.Where(lib => !string.IsNullOrEmpty(lib.Sha256)).ToList();
Assert.NotEmpty(librariesWithHashes);
Assert.All(librariesWithHashes, lib =>
{
Assert.StartsWith("sha256:", lib.Sha256);
Assert.Equal(71, lib.Sha256!.Length); // "sha256:" + 64 hex chars
});
}
[SkippableFact]
public async Task DockerWindowsRuntimeClient_IsAvailable_WhenDockerRunning()
{
Skip.IfNot(IsWindowsWithDocker, "Requires Windows with Docker in Windows containers mode");
await using var client = new DockerWindowsRuntimeClient(NullLogger<DockerWindowsRuntimeClient>.Instance);
var available = await client.IsAvailableAsync(CancellationToken.None);
Assert.True(available, "Docker Windows should be available");
}
[SkippableFact]
public async Task DockerWindowsRuntimeClient_GetIdentity_ReturnsDockerInfo()
{
Skip.IfNot(IsWindowsWithDocker, "Requires Windows with Docker in Windows containers mode");
await using var client = new DockerWindowsRuntimeClient(NullLogger<DockerWindowsRuntimeClient>.Instance);
var identity = await client.GetIdentityAsync(CancellationToken.None);
Assert.NotNull(identity);
Assert.Equal("docker", identity.RuntimeName);
Assert.False(string.IsNullOrEmpty(identity.RuntimeVersion));
Assert.False(string.IsNullOrEmpty(identity.OsVersion));
}
[SkippableFact]
public async Task DockerWindowsRuntimeClient_ListContainers_ReturnsWindowsContainers()
{
Skip.IfNot(IsWindowsWithDocker, "Requires Windows with Docker in Windows containers mode");
await using var client = new DockerWindowsRuntimeClient(NullLogger<DockerWindowsRuntimeClient>.Instance);
var containers = await client.ListContainersAsync(
WindowsContainerState.Running,
CancellationToken.None);
// May be empty if no containers running, but should not throw
Assert.NotNull(containers);
Assert.All(containers, c =>
{
Assert.False(string.IsNullOrEmpty(c.Id));
Assert.False(string.IsNullOrEmpty(c.Name));
});
}
}
/// <summary>
/// Skippable fact attribute for conditional tests.
/// </summary>
public sealed class SkippableFactAttribute : FactAttribute
{
public SkippableFactAttribute()
{
}
}
/// <summary>
/// Skip helper for conditional tests.
/// </summary>
public static class Skip
{
public static void IfNot(bool condition, string reason)
{
if (!condition)
{
throw new SkipException(reason);
}
}
public static void If(bool condition, string reason)
{
if (condition)
{
throw new SkipException(reason);
}
}
}
/// <summary>
/// Exception thrown to skip a test.
/// </summary>
public sealed class SkipException : Exception
{
public SkipException(string reason) : base(reason)
{
}
}