test fixes and new product advisories work
This commit is contained in:
@@ -0,0 +1,626 @@
|
||||
// <copyright file="CgroupContainerResolver.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Cgroup;
|
||||
|
||||
using System.Collections.Concurrent;
|
||||
using System.Text.RegularExpressions;
|
||||
using Microsoft.Extensions.Logging;
|
||||
|
||||
/// <summary>
|
||||
/// Resolves cgroup IDs and PIDs to container identities.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// Supports:
|
||||
/// - containerd: /system.slice/containerd-{id}.scope
|
||||
/// - Docker: /docker/{id} or /system.slice/docker-{id}.scope
|
||||
/// - CRI-O: /crio-{id}.scope
|
||||
/// - Podman: /libpod-{id}.scope
|
||||
/// </remarks>
|
||||
public sealed class CgroupContainerResolver : IDisposable
|
||||
{
|
||||
private readonly ILogger<CgroupContainerResolver> _logger;
|
||||
private readonly string _procRoot;
|
||||
private readonly string _cgroupRoot;
|
||||
private readonly ConcurrentDictionary<ulong, ContainerIdentity> _cgroupCache;
|
||||
private readonly ConcurrentDictionary<int, ContainerIdentity> _pidCache;
|
||||
private readonly ConcurrentDictionary<int, NamespaceInfo> _namespaceCache;
|
||||
private readonly NamespaceFilter? _namespaceFilter;
|
||||
private bool _disposed;
|
||||
|
||||
// Regex patterns for extracting container IDs from cgroup paths
|
||||
private static readonly Regex ContainerdPattern = new(
|
||||
@"containerd-([a-f0-9]{64})\.scope",
|
||||
RegexOptions.Compiled | RegexOptions.IgnoreCase);
|
||||
|
||||
private static readonly Regex DockerPattern = new(
|
||||
@"docker[/-]([a-f0-9]{64})(?:\.scope)?",
|
||||
RegexOptions.Compiled | RegexOptions.IgnoreCase);
|
||||
|
||||
private static readonly Regex CrioPattern = new(
|
||||
@"crio-([a-f0-9]{64})\.scope",
|
||||
RegexOptions.Compiled | RegexOptions.IgnoreCase);
|
||||
|
||||
private static readonly Regex PodmanPattern = new(
|
||||
@"libpod-([a-f0-9]{64})\.scope",
|
||||
RegexOptions.Compiled | RegexOptions.IgnoreCase);
|
||||
|
||||
// Regex pattern for extracting namespace inode from symlink target
|
||||
// Format: "pid:[4026531836]" or "mnt:[4026531840]"
|
||||
private static readonly Regex NamespaceInodePattern = new(
|
||||
@"\[(\d+)\]",
|
||||
RegexOptions.Compiled);
|
||||
|
||||
/// <summary>
|
||||
/// Cache TTL for container identity lookups.
|
||||
/// </summary>
|
||||
private static readonly TimeSpan CacheTtl = TimeSpan.FromMinutes(5);
|
||||
|
||||
public CgroupContainerResolver(
|
||||
ILogger<CgroupContainerResolver> logger,
|
||||
string procRoot = "/proc",
|
||||
string cgroupRoot = "/sys/fs/cgroup",
|
||||
NamespaceFilter? namespaceFilter = null)
|
||||
{
|
||||
_logger = logger;
|
||||
_procRoot = procRoot;
|
||||
_cgroupRoot = cgroupRoot;
|
||||
_cgroupCache = new();
|
||||
_pidCache = new();
|
||||
_namespaceCache = new();
|
||||
_namespaceFilter = namespaceFilter;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Resolve PID to container identity.
|
||||
/// </summary>
|
||||
public ContainerIdentity? ResolveByPid(int pid)
|
||||
{
|
||||
ObjectDisposedException.ThrowIf(_disposed, this);
|
||||
|
||||
if (_pidCache.TryGetValue(pid, out var cached))
|
||||
{
|
||||
return cached;
|
||||
}
|
||||
|
||||
var identity = ResolveByPidInternal(pid);
|
||||
if (identity != null)
|
||||
{
|
||||
_pidCache.TryAdd(pid, identity);
|
||||
}
|
||||
|
||||
return identity;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Resolve cgroup ID to container identity.
|
||||
/// </summary>
|
||||
public ContainerIdentity? ResolveByCgroupId(ulong cgroupId)
|
||||
{
|
||||
ObjectDisposedException.ThrowIf(_disposed, this);
|
||||
|
||||
if (_cgroupCache.TryGetValue(cgroupId, out var cached))
|
||||
{
|
||||
return cached;
|
||||
}
|
||||
|
||||
// Cgroup ID requires scanning /proc to find a matching process
|
||||
// This is expensive, so we rely on PID-based resolution primarily
|
||||
_logger.LogDebug("Cgroup ID {CgroupId} not in cache, scanning /proc", cgroupId);
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Register a known mapping from cgroup ID to container.
|
||||
/// </summary>
|
||||
public void RegisterCgroupMapping(ulong cgroupId, ContainerIdentity identity)
|
||||
{
|
||||
_cgroupCache.TryAdd(cgroupId, identity);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Invalidate cached entries for a process.
|
||||
/// </summary>
|
||||
public void InvalidatePid(int pid)
|
||||
{
|
||||
_pidCache.TryRemove(pid, out _);
|
||||
_namespaceCache.TryRemove(pid, out _);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get namespace information for a process.
|
||||
/// </summary>
|
||||
public NamespaceInfo? GetNamespaceInfo(int pid)
|
||||
{
|
||||
ObjectDisposedException.ThrowIf(_disposed, this);
|
||||
|
||||
if (_namespaceCache.TryGetValue(pid, out var cached))
|
||||
{
|
||||
return cached;
|
||||
}
|
||||
|
||||
var nsInfo = ReadNamespaceInfo(pid);
|
||||
if (nsInfo != null)
|
||||
{
|
||||
_namespaceCache.TryAdd(pid, nsInfo);
|
||||
}
|
||||
|
||||
return nsInfo;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Check if a process matches the configured namespace filter.
|
||||
/// </summary>
|
||||
public bool MatchesNamespaceFilter(int pid)
|
||||
{
|
||||
if (_namespaceFilter == null)
|
||||
{
|
||||
return true; // No filter configured, allow all
|
||||
}
|
||||
|
||||
var nsInfo = GetNamespaceInfo(pid);
|
||||
if (nsInfo == null)
|
||||
{
|
||||
return _namespaceFilter.AllowUnknown;
|
||||
}
|
||||
|
||||
return _namespaceFilter.Matches(nsInfo);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Check if a process is in the same namespace as a reference PID.
|
||||
/// </summary>
|
||||
public bool IsInSameNamespace(int pid, int referencePid, NamespaceType namespaceType = NamespaceType.Pid)
|
||||
{
|
||||
var pidNsInfo = GetNamespaceInfo(pid);
|
||||
var refNsInfo = GetNamespaceInfo(referencePid);
|
||||
|
||||
if (pidNsInfo == null || refNsInfo == null)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return namespaceType switch
|
||||
{
|
||||
NamespaceType.Pid => pidNsInfo.PidNs == refNsInfo.PidNs,
|
||||
NamespaceType.Mnt => pidNsInfo.MntNs == refNsInfo.MntNs,
|
||||
NamespaceType.Net => pidNsInfo.NetNs == refNsInfo.NetNs,
|
||||
NamespaceType.User => pidNsInfo.UserNs == refNsInfo.UserNs,
|
||||
NamespaceType.Cgroup => pidNsInfo.CgroupNs == refNsInfo.CgroupNs,
|
||||
_ => false,
|
||||
};
|
||||
}
|
||||
|
||||
private NamespaceInfo? ReadNamespaceInfo(int pid)
|
||||
{
|
||||
try
|
||||
{
|
||||
var nsDir = Path.Combine(_procRoot, pid.ToString(), "ns");
|
||||
|
||||
// Check if the ns directory exists
|
||||
if (!Directory.Exists(nsDir))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
var pidNsPath = Path.Combine(nsDir, "pid");
|
||||
var mntNsPath = Path.Combine(nsDir, "mnt");
|
||||
var netNsPath = Path.Combine(nsDir, "net");
|
||||
var userNsPath = Path.Combine(nsDir, "user");
|
||||
var cgroupNsPath = Path.Combine(nsDir, "cgroup");
|
||||
|
||||
var pidNs = ReadNamespaceInode(pidNsPath);
|
||||
var mntNs = ReadNamespaceInode(mntNsPath);
|
||||
|
||||
// If we can't read at least pid or mnt namespace, return null
|
||||
if (pidNs == 0 && mntNs == 0)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
return new NamespaceInfo
|
||||
{
|
||||
PidNs = pidNs,
|
||||
MntNs = mntNs,
|
||||
NetNs = ReadNamespaceInode(netNsPath),
|
||||
UserNs = ReadNamespaceInode(userNsPath),
|
||||
CgroupNs = ReadNamespaceInode(cgroupNsPath),
|
||||
};
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogDebug(ex, "Failed to read namespace info for PID {Pid}", pid);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private ulong ReadNamespaceInode(string nsPath)
|
||||
{
|
||||
// On Linux, /proc/{pid}/ns/{type} is a symlink like "pid:[4026531836]"
|
||||
// We need to read the symlink target and extract the inode number
|
||||
|
||||
if (!File.Exists(nsPath))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
// Try to read the symlink target
|
||||
// On Windows/test environments, we simulate with a file containing the inode
|
||||
var target = File.Exists(nsPath + ".link")
|
||||
? File.ReadAllText(nsPath + ".link").Trim()
|
||||
: File.ReadAllText(nsPath).Trim();
|
||||
|
||||
var match = NamespaceInodePattern.Match(target);
|
||||
if (match.Success && ulong.TryParse(match.Groups[1].Value, out var inode))
|
||||
{
|
||||
return inode;
|
||||
}
|
||||
|
||||
// If no match, try parsing the whole content as inode (for test fixtures)
|
||||
if (ulong.TryParse(target, out var directInode))
|
||||
{
|
||||
return directInode;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
catch
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
private ContainerIdentity? ResolveByPidInternal(int pid)
|
||||
{
|
||||
var cgroupPath = Path.Combine(_procRoot, pid.ToString(), "cgroup");
|
||||
if (!File.Exists(cgroupPath))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
var cgroupContent = File.ReadAllText(cgroupPath);
|
||||
var identity = ParseCgroupContent(cgroupContent);
|
||||
|
||||
if (identity != null)
|
||||
{
|
||||
// Enrich with namespace information
|
||||
var nsInfo = ReadNamespaceInfo(pid);
|
||||
if (nsInfo != null)
|
||||
{
|
||||
identity = identity with { Namespaces = nsInfo };
|
||||
}
|
||||
}
|
||||
|
||||
return identity;
|
||||
}
|
||||
catch (IOException ex)
|
||||
{
|
||||
_logger.LogDebug(ex, "Failed to read cgroup for PID {Pid}", pid);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private ContainerIdentity? ParseCgroupContent(string content)
|
||||
{
|
||||
// cgroup v2: single line "0::/path"
|
||||
// cgroup v1: multiple lines "hierarchy-ID:controllers:path"
|
||||
|
||||
foreach (var line in content.Split('\n', StringSplitOptions.RemoveEmptyEntries))
|
||||
{
|
||||
var parts = line.Split(':', 3);
|
||||
if (parts.Length < 3)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
var path = parts[2];
|
||||
var identity = ParseCgroupPath(path);
|
||||
if (identity != null)
|
||||
{
|
||||
return identity;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
private ContainerIdentity? ParseCgroupPath(string path)
|
||||
{
|
||||
// Try each runtime pattern
|
||||
var match = ContainerdPattern.Match(path);
|
||||
if (match.Success)
|
||||
{
|
||||
return new ContainerIdentity
|
||||
{
|
||||
ContainerId = $"containerd://{match.Groups[1].Value}",
|
||||
Runtime = ContainerRuntime.Containerd,
|
||||
ShortId = match.Groups[1].Value[..12],
|
||||
FullId = match.Groups[1].Value,
|
||||
};
|
||||
}
|
||||
|
||||
match = DockerPattern.Match(path);
|
||||
if (match.Success)
|
||||
{
|
||||
return new ContainerIdentity
|
||||
{
|
||||
ContainerId = $"docker://{match.Groups[1].Value}",
|
||||
Runtime = ContainerRuntime.Docker,
|
||||
ShortId = match.Groups[1].Value[..12],
|
||||
FullId = match.Groups[1].Value,
|
||||
};
|
||||
}
|
||||
|
||||
match = CrioPattern.Match(path);
|
||||
if (match.Success)
|
||||
{
|
||||
return new ContainerIdentity
|
||||
{
|
||||
ContainerId = $"cri-o://{match.Groups[1].Value}",
|
||||
Runtime = ContainerRuntime.CriO,
|
||||
ShortId = match.Groups[1].Value[..12],
|
||||
FullId = match.Groups[1].Value,
|
||||
};
|
||||
}
|
||||
|
||||
match = PodmanPattern.Match(path);
|
||||
if (match.Success)
|
||||
{
|
||||
return new ContainerIdentity
|
||||
{
|
||||
ContainerId = $"podman://{match.Groups[1].Value}",
|
||||
Runtime = ContainerRuntime.Podman,
|
||||
ShortId = match.Groups[1].Value[..12],
|
||||
FullId = match.Groups[1].Value,
|
||||
};
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public void Dispose()
|
||||
{
|
||||
if (!_disposed)
|
||||
{
|
||||
_cgroupCache.Clear();
|
||||
_pidCache.Clear();
|
||||
_disposed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Container identity information.
|
||||
/// </summary>
|
||||
public sealed record ContainerIdentity
|
||||
{
|
||||
/// <summary>
|
||||
/// Full container ID with runtime prefix.
|
||||
/// Format: "{runtime}://{id}" (e.g., "containerd://abc123...")
|
||||
/// </summary>
|
||||
public required string ContainerId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Container runtime type.
|
||||
/// </summary>
|
||||
public required ContainerRuntime Runtime { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Short container ID (first 12 chars).
|
||||
/// </summary>
|
||||
public required string ShortId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Full container ID (64 chars).
|
||||
/// </summary>
|
||||
public required string FullId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Image reference (if known).
|
||||
/// </summary>
|
||||
public string? ImageRef { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Image digest (if known).
|
||||
/// </summary>
|
||||
public string? ImageDigest { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Kubernetes pod name (if applicable).
|
||||
/// </summary>
|
||||
public string? PodName { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Kubernetes namespace (if applicable).
|
||||
/// </summary>
|
||||
public string? Namespace { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Linux namespace information for the container.
|
||||
/// </summary>
|
||||
public NamespaceInfo? Namespaces { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Linux namespace information for multi-tenant filtering.
|
||||
/// </summary>
|
||||
public sealed record NamespaceInfo
|
||||
{
|
||||
/// <summary>
|
||||
/// PID namespace inode number from /proc/{pid}/ns/pid.
|
||||
/// </summary>
|
||||
public ulong PidNs { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Mount namespace inode number from /proc/{pid}/ns/mnt.
|
||||
/// </summary>
|
||||
public ulong MntNs { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Network namespace inode number from /proc/{pid}/ns/net.
|
||||
/// </summary>
|
||||
public ulong NetNs { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// User namespace inode number from /proc/{pid}/ns/user.
|
||||
/// </summary>
|
||||
public ulong UserNs { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Cgroup namespace inode number from /proc/{pid}/ns/cgroup.
|
||||
/// </summary>
|
||||
public ulong CgroupNs { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Container runtime type.
|
||||
/// </summary>
|
||||
public enum ContainerRuntime
|
||||
{
|
||||
/// <summary>Unknown runtime.</summary>
|
||||
Unknown = 0,
|
||||
|
||||
/// <summary>containerd runtime.</summary>
|
||||
Containerd = 1,
|
||||
|
||||
/// <summary>Docker runtime.</summary>
|
||||
Docker = 2,
|
||||
|
||||
/// <summary>CRI-O runtime.</summary>
|
||||
CriO = 3,
|
||||
|
||||
/// <summary>Podman runtime.</summary>
|
||||
Podman = 4,
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Linux namespace type for filtering.
|
||||
/// </summary>
|
||||
public enum NamespaceType
|
||||
{
|
||||
/// <summary>PID namespace.</summary>
|
||||
Pid,
|
||||
|
||||
/// <summary>Mount namespace.</summary>
|
||||
Mnt,
|
||||
|
||||
/// <summary>Network namespace.</summary>
|
||||
Net,
|
||||
|
||||
/// <summary>User namespace.</summary>
|
||||
User,
|
||||
|
||||
/// <summary>Cgroup namespace.</summary>
|
||||
Cgroup,
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Filter configuration for namespace-based multi-tenant isolation.
|
||||
/// </summary>
|
||||
public sealed record NamespaceFilter
|
||||
{
|
||||
/// <summary>
|
||||
/// Target PID namespaces to include. Empty means all.
|
||||
/// </summary>
|
||||
public IReadOnlySet<ulong> TargetPidNamespaces { get; init; } = new HashSet<ulong>();
|
||||
|
||||
/// <summary>
|
||||
/// Target mount namespaces to include. Empty means all.
|
||||
/// </summary>
|
||||
public IReadOnlySet<ulong> TargetMntNamespaces { get; init; } = new HashSet<ulong>();
|
||||
|
||||
/// <summary>
|
||||
/// Target network namespaces to include. Empty means all.
|
||||
/// </summary>
|
||||
public IReadOnlySet<ulong> TargetNetNamespaces { get; init; } = new HashSet<ulong>();
|
||||
|
||||
/// <summary>
|
||||
/// Target cgroup namespaces to include. Empty means all.
|
||||
/// </summary>
|
||||
public IReadOnlySet<ulong> TargetCgroupNamespaces { get; init; } = new HashSet<ulong>();
|
||||
|
||||
/// <summary>
|
||||
/// Whether to allow processes whose namespace cannot be determined.
|
||||
/// </summary>
|
||||
public bool AllowUnknown { get; init; } = false;
|
||||
|
||||
/// <summary>
|
||||
/// Filter mode - require ALL specified namespaces to match, or ANY.
|
||||
/// </summary>
|
||||
public NamespaceFilterMode Mode { get; init; } = NamespaceFilterMode.Any;
|
||||
|
||||
/// <summary>
|
||||
/// Check if the given namespace info matches this filter.
|
||||
/// </summary>
|
||||
public bool Matches(NamespaceInfo nsInfo)
|
||||
{
|
||||
var checks = new List<bool>();
|
||||
|
||||
if (TargetPidNamespaces.Count > 0)
|
||||
{
|
||||
checks.Add(TargetPidNamespaces.Contains(nsInfo.PidNs));
|
||||
}
|
||||
|
||||
if (TargetMntNamespaces.Count > 0)
|
||||
{
|
||||
checks.Add(TargetMntNamespaces.Contains(nsInfo.MntNs));
|
||||
}
|
||||
|
||||
if (TargetNetNamespaces.Count > 0)
|
||||
{
|
||||
checks.Add(TargetNetNamespaces.Contains(nsInfo.NetNs));
|
||||
}
|
||||
|
||||
if (TargetCgroupNamespaces.Count > 0)
|
||||
{
|
||||
checks.Add(TargetCgroupNamespaces.Contains(nsInfo.CgroupNs));
|
||||
}
|
||||
|
||||
if (checks.Count == 0)
|
||||
{
|
||||
return true; // No filters specified
|
||||
}
|
||||
|
||||
return Mode == NamespaceFilterMode.All
|
||||
? checks.All(c => c)
|
||||
: checks.Any(c => c);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Create a filter from a reference PID (target same namespaces as reference).
|
||||
/// </summary>
|
||||
public static NamespaceFilter FromReferencePid(CgroupContainerResolver resolver, int referencePid)
|
||||
{
|
||||
var nsInfo = resolver.GetNamespaceInfo(referencePid);
|
||||
if (nsInfo == null)
|
||||
{
|
||||
return new NamespaceFilter { AllowUnknown = true };
|
||||
}
|
||||
|
||||
return new NamespaceFilter
|
||||
{
|
||||
TargetPidNamespaces = new HashSet<ulong> { nsInfo.PidNs },
|
||||
TargetMntNamespaces = new HashSet<ulong> { nsInfo.MntNs },
|
||||
Mode = NamespaceFilterMode.All,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Namespace filter matching mode.
|
||||
/// </summary>
|
||||
public enum NamespaceFilterMode
|
||||
{
|
||||
/// <summary>Match if ANY specified namespace matches.</summary>
|
||||
Any,
|
||||
|
||||
/// <summary>Match only if ALL specified namespaces match.</summary>
|
||||
All,
|
||||
}
|
||||
@@ -0,0 +1,155 @@
|
||||
// <copyright file="IContainerIdentityResolver.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Cgroup;
|
||||
|
||||
/// <summary>
|
||||
/// Interface for resolving container identities from runtime systems like Zastava.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// This interface enables decoupling between eBPF evidence collection and container
|
||||
/// runtime observers. Implementations may query container runtimes (containerd, Docker,
|
||||
/// CRI-O) or use cached state from container lifecycle tracking systems.
|
||||
/// </remarks>
|
||||
public interface IContainerIdentityResolver
|
||||
{
|
||||
/// <summary>
|
||||
/// Resolve container identity by container ID.
|
||||
/// </summary>
|
||||
/// <param name="containerId">
|
||||
/// Container ID in either short (12 char) or full (64 char) format.
|
||||
/// May optionally include runtime prefix (e.g., "containerd://abc123...").
|
||||
/// </param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
/// <returns>Container identity if found; null otherwise.</returns>
|
||||
Task<ContainerIdentity?> ResolveByContainerIdAsync(string containerId, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Resolve container identity by process ID.
|
||||
/// </summary>
|
||||
/// <param name="pid">Process ID running in the container.</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
/// <returns>Container identity if the PID belongs to a container; null otherwise.</returns>
|
||||
Task<ContainerIdentity?> ResolveByPidAsync(int pid, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Resolve container identity by cgroup ID.
|
||||
/// </summary>
|
||||
/// <param name="cgroupId">Cgroup ID from eBPF event.</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
/// <returns>Container identity if found; null otherwise.</returns>
|
||||
Task<ContainerIdentity?> ResolveByCgroupIdAsync(ulong cgroupId, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Get image digest for a container.
|
||||
/// </summary>
|
||||
/// <param name="containerId">Container ID.</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
/// <returns>
|
||||
/// Image digest in format "sha256:..." if available; null otherwise.
|
||||
/// </returns>
|
||||
Task<string?> GetImageDigestAsync(string containerId, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Register a mapping from cgroup ID to container identity for faster lookups.
|
||||
/// </summary>
|
||||
/// <param name="cgroupId">Cgroup ID.</param>
|
||||
/// <param name="identity">Container identity.</param>
|
||||
void RegisterCgroupMapping(ulong cgroupId, ContainerIdentity identity);
|
||||
|
||||
/// <summary>
|
||||
/// Event raised when a container starts.
|
||||
/// </summary>
|
||||
event Func<ContainerLifecycleEventArgs, CancellationToken, Task>? ContainerStarted;
|
||||
|
||||
/// <summary>
|
||||
/// Event raised when a container stops.
|
||||
/// </summary>
|
||||
event Func<ContainerLifecycleEventArgs, CancellationToken, Task>? ContainerStopped;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Container lifecycle event arguments.
|
||||
/// </summary>
|
||||
public sealed record ContainerLifecycleEventArgs
|
||||
{
|
||||
/// <summary>
|
||||
/// Container identity.
|
||||
/// </summary>
|
||||
public required ContainerIdentity Identity { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Event timestamp.
|
||||
/// </summary>
|
||||
public required DateTimeOffset Timestamp { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Process IDs running in this container (at time of event).
|
||||
/// </summary>
|
||||
public IReadOnlyList<int> Pids { get; init; } = [];
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Adapter that wraps the local CgroupContainerResolver as an IContainerIdentityResolver.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// This provides a default implementation when no external container identity
|
||||
/// resolver (like Zastava) is available. It uses local /proc introspection only.
|
||||
/// </remarks>
|
||||
public sealed class LocalContainerIdentityResolver : IContainerIdentityResolver
|
||||
{
|
||||
private readonly CgroupContainerResolver _resolver;
|
||||
|
||||
public LocalContainerIdentityResolver(CgroupContainerResolver resolver)
|
||||
{
|
||||
_resolver = resolver;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task<ContainerIdentity?> ResolveByContainerIdAsync(string containerId, CancellationToken ct = default)
|
||||
{
|
||||
// Local resolver doesn't support lookup by container ID directly
|
||||
// Would need to scan /proc to find matching cgroup
|
||||
return Task.FromResult<ContainerIdentity?>(null);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task<ContainerIdentity?> ResolveByPidAsync(int pid, CancellationToken ct = default)
|
||||
{
|
||||
var identity = _resolver.ResolveByPid(pid);
|
||||
return Task.FromResult(identity);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task<ContainerIdentity?> ResolveByCgroupIdAsync(ulong cgroupId, CancellationToken ct = default)
|
||||
{
|
||||
var identity = _resolver.ResolveByCgroupId(cgroupId);
|
||||
return Task.FromResult(identity);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task<string?> GetImageDigestAsync(string containerId, CancellationToken ct = default)
|
||||
{
|
||||
// Local resolver doesn't have access to image digests
|
||||
// Would need container runtime API access
|
||||
return Task.FromResult<string?>(null);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public void RegisterCgroupMapping(ulong cgroupId, ContainerIdentity identity)
|
||||
{
|
||||
_resolver.RegisterCgroupMapping(cgroupId, identity);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
#pragma warning disable CS0067 // Event is never used - local resolver doesn't track container lifecycle
|
||||
public event Func<ContainerLifecycleEventArgs, CancellationToken, Task>? ContainerStarted;
|
||||
|
||||
/// <inheritdoc />
|
||||
public event Func<ContainerLifecycleEventArgs, CancellationToken, Task>? ContainerStopped;
|
||||
#pragma warning restore CS0067
|
||||
|
||||
// Note: Local resolver doesn't track container lifecycle events.
|
||||
// These would need to come from Zastava integration.
|
||||
}
|
||||
@@ -0,0 +1,80 @@
|
||||
// <copyright file="IContainerStateProvider.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Enrichment;
|
||||
|
||||
/// <summary>
|
||||
/// Interface for retrieving container state and metadata.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// Implementations typically wrap Zastava's ContainerStateTracker or similar
|
||||
/// container lifecycle monitoring systems.
|
||||
/// </remarks>
|
||||
public interface IContainerStateProvider
|
||||
{
|
||||
/// <summary>
|
||||
/// Get container metadata by container ID.
|
||||
/// </summary>
|
||||
/// <param name="containerId">
|
||||
/// Container ID (short or full format, with or without runtime prefix).
|
||||
/// </param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
/// <returns>Container metadata if found; null otherwise.</returns>
|
||||
Task<ContainerMetadata?> GetContainerMetadataAsync(string containerId, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Get all running containers.
|
||||
/// </summary>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
/// <returns>All currently running containers.</returns>
|
||||
IAsyncEnumerable<ContainerMetadata> GetRunningContainersAsync(CancellationToken ct = default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Container metadata for enrichment.
|
||||
/// </summary>
|
||||
public sealed record ContainerMetadata
|
||||
{
|
||||
/// <summary>
|
||||
/// Container ID (full format with runtime prefix).
|
||||
/// </summary>
|
||||
public required string ContainerId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Container name.
|
||||
/// </summary>
|
||||
public string? Name { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Image reference (tag or digest).
|
||||
/// Format: "registry/repo:tag" or "registry/repo@sha256:..."
|
||||
/// </summary>
|
||||
public string? ImageRef { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Resolved image digest.
|
||||
/// Format: "sha256:..."
|
||||
/// </summary>
|
||||
public string? ImageDigest { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Container labels.
|
||||
/// </summary>
|
||||
public IReadOnlyDictionary<string, string> Labels { get; init; } = new Dictionary<string, string>();
|
||||
|
||||
/// <summary>
|
||||
/// Container start time.
|
||||
/// </summary>
|
||||
public DateTimeOffset? StartedAt { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Process IDs running in this container (if known).
|
||||
/// </summary>
|
||||
public IReadOnlyList<int> Pids { get; init; } = [];
|
||||
|
||||
/// <summary>
|
||||
/// SBOM component PURLs for this container's image (if available).
|
||||
/// </summary>
|
||||
public IReadOnlyList<string> ComponentPurls { get; init; } = [];
|
||||
}
|
||||
@@ -0,0 +1,175 @@
|
||||
// <copyright file="IImageDigestResolver.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Enrichment;
|
||||
|
||||
/// <summary>
|
||||
/// Interface for resolving image references to digests.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// Implementations may use local manifest caches or registry APIs.
|
||||
/// </remarks>
|
||||
public interface IImageDigestResolver
|
||||
{
|
||||
/// <summary>
|
||||
/// Resolve an image reference to its digest.
|
||||
/// </summary>
|
||||
/// <param name="imageRef">
|
||||
/// Image reference. May be:
|
||||
/// - Full reference with tag: "registry.io/repo/image:tag"
|
||||
/// - Full reference with digest: "registry.io/repo/image@sha256:..."
|
||||
/// - Short reference: "image:tag" or "repo/image:tag"
|
||||
/// </param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
/// <returns>
|
||||
/// Image digest in format "sha256:..." if resolved; null otherwise.
|
||||
/// If imageRef is already a digest reference, returns the digest portion.
|
||||
/// </returns>
|
||||
Task<string?> ResolveDigestAsync(string imageRef, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Batch resolve multiple image references.
|
||||
/// </summary>
|
||||
/// <param name="imageRefs">Image references to resolve.</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
/// <returns>Dictionary mapping image refs to digests (null for unresolved).</returns>
|
||||
Task<IReadOnlyDictionary<string, string?>> ResolveDigestBatchAsync(
|
||||
IEnumerable<string> imageRefs,
|
||||
CancellationToken ct = default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Caching decorator for IImageDigestResolver.
|
||||
/// </summary>
|
||||
public sealed class CachingImageDigestResolver : IImageDigestResolver
|
||||
{
|
||||
private readonly IImageDigestResolver _inner;
|
||||
private readonly Dictionary<string, (string? Digest, DateTimeOffset CachedAt)> _cache;
|
||||
private readonly TimeSpan _cacheTtl;
|
||||
private readonly object _lock = new();
|
||||
|
||||
public CachingImageDigestResolver(IImageDigestResolver inner, TimeSpan? cacheTtl = null)
|
||||
{
|
||||
_inner = inner;
|
||||
_cache = new(StringComparer.OrdinalIgnoreCase);
|
||||
_cacheTtl = cacheTtl ?? TimeSpan.FromMinutes(5);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<string?> ResolveDigestAsync(string imageRef, CancellationToken ct = default)
|
||||
{
|
||||
// Check cache
|
||||
lock (_lock)
|
||||
{
|
||||
if (_cache.TryGetValue(imageRef, out var entry))
|
||||
{
|
||||
if (DateTimeOffset.UtcNow - entry.CachedAt < _cacheTtl)
|
||||
{
|
||||
return entry.Digest;
|
||||
}
|
||||
|
||||
_cache.Remove(imageRef);
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve and cache
|
||||
var digest = await _inner.ResolveDigestAsync(imageRef, ct).ConfigureAwait(false);
|
||||
|
||||
lock (_lock)
|
||||
{
|
||||
_cache[imageRef] = (digest, DateTimeOffset.UtcNow);
|
||||
}
|
||||
|
||||
return digest;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyDictionary<string, string?>> ResolveDigestBatchAsync(
|
||||
IEnumerable<string> imageRefs,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
var results = new Dictionary<string, string?>(StringComparer.OrdinalIgnoreCase);
|
||||
var toResolve = new List<string>();
|
||||
|
||||
// Check cache for each
|
||||
lock (_lock)
|
||||
{
|
||||
foreach (var imageRef in imageRefs)
|
||||
{
|
||||
if (_cache.TryGetValue(imageRef, out var entry) &&
|
||||
DateTimeOffset.UtcNow - entry.CachedAt < _cacheTtl)
|
||||
{
|
||||
results[imageRef] = entry.Digest;
|
||||
}
|
||||
else
|
||||
{
|
||||
toResolve.Add(imageRef);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve missing
|
||||
if (toResolve.Count > 0)
|
||||
{
|
||||
var resolved = await _inner.ResolveDigestBatchAsync(toResolve, ct).ConfigureAwait(false);
|
||||
|
||||
lock (_lock)
|
||||
{
|
||||
foreach (var (imageRef, digest) in resolved)
|
||||
{
|
||||
results[imageRef] = digest;
|
||||
_cache[imageRef] = (digest, DateTimeOffset.UtcNow);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Simple digest resolver that extracts digests from digest references
|
||||
/// and returns null for tag references (requires registry access).
|
||||
/// </summary>
|
||||
public sealed class LocalImageDigestResolver : IImageDigestResolver
|
||||
{
|
||||
/// <inheritdoc />
|
||||
public Task<string?> ResolveDigestAsync(string imageRef, CancellationToken ct = default)
|
||||
{
|
||||
if (string.IsNullOrEmpty(imageRef))
|
||||
{
|
||||
return Task.FromResult<string?>(null);
|
||||
}
|
||||
|
||||
// Check if already a digest reference
|
||||
var atIndex = imageRef.IndexOf('@');
|
||||
if (atIndex > 0 && atIndex < imageRef.Length - 1)
|
||||
{
|
||||
var digest = imageRef[(atIndex + 1)..];
|
||||
if (digest.StartsWith("sha256:", StringComparison.OrdinalIgnoreCase) ||
|
||||
digest.StartsWith("sha512:", StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
return Task.FromResult<string?>(digest);
|
||||
}
|
||||
}
|
||||
|
||||
// Tag reference - cannot resolve without registry access
|
||||
return Task.FromResult<string?>(null);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyDictionary<string, string?>> ResolveDigestBatchAsync(
|
||||
IEnumerable<string> imageRefs,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
var results = new Dictionary<string, string?>(StringComparer.OrdinalIgnoreCase);
|
||||
|
||||
foreach (var imageRef in imageRefs)
|
||||
{
|
||||
results[imageRef] = await ResolveDigestAsync(imageRef, ct).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,113 @@
|
||||
// <copyright file="ISbomComponentProvider.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Enrichment;
|
||||
|
||||
/// <summary>
|
||||
/// Interface for retrieving SBOM component PURLs by image digest.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// Implementations typically query the SBOM service to get component metadata.
|
||||
/// The returned PURLs can be used to correlate runtime evidence with static SBOM data.
|
||||
/// </remarks>
|
||||
public interface ISbomComponentProvider
|
||||
{
|
||||
/// <summary>
|
||||
/// Get top-level component PURLs for an image.
|
||||
/// </summary>
|
||||
/// <param name="imageDigest">Image digest (format: "sha256:...").</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
/// <returns>List of component PURLs; empty list if SBOM not found or no components.</returns>
|
||||
Task<IReadOnlyList<string>> GetComponentPurlsAsync(string imageDigest, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Check if SBOM data is available for an image.
|
||||
/// </summary>
|
||||
/// <param name="imageDigest">Image digest (format: "sha256:...").</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
/// <returns>True if SBOM exists for this image.</returns>
|
||||
Task<bool> HasSbomAsync(string imageDigest, CancellationToken ct = default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Null implementation that returns empty results.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// Use this when SBOM service integration is not available.
|
||||
/// </remarks>
|
||||
public sealed class NullSbomComponentProvider : ISbomComponentProvider
|
||||
{
|
||||
/// <summary>
|
||||
/// Singleton instance.
|
||||
/// </summary>
|
||||
public static readonly NullSbomComponentProvider Instance = new();
|
||||
|
||||
private NullSbomComponentProvider()
|
||||
{
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task<IReadOnlyList<string>> GetComponentPurlsAsync(string imageDigest, CancellationToken ct = default)
|
||||
{
|
||||
return Task.FromResult<IReadOnlyList<string>>([]);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task<bool> HasSbomAsync(string imageDigest, CancellationToken ct = default)
|
||||
{
|
||||
return Task.FromResult(false);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Caching decorator for ISbomComponentProvider.
|
||||
/// </summary>
|
||||
public sealed class CachingSbomComponentProvider : ISbomComponentProvider
|
||||
{
|
||||
private readonly ISbomComponentProvider _inner;
|
||||
private readonly Dictionary<string, (IReadOnlyList<string> Purls, DateTimeOffset CachedAt)> _cache;
|
||||
private readonly TimeSpan _cacheTtl;
|
||||
private readonly object _lock = new();
|
||||
|
||||
public CachingSbomComponentProvider(ISbomComponentProvider inner, TimeSpan? cacheTtl = null)
|
||||
{
|
||||
_inner = inner;
|
||||
_cache = new(StringComparer.OrdinalIgnoreCase);
|
||||
_cacheTtl = cacheTtl ?? TimeSpan.FromMinutes(10);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<string>> GetComponentPurlsAsync(string imageDigest, CancellationToken ct = default)
|
||||
{
|
||||
// Check cache
|
||||
lock (_lock)
|
||||
{
|
||||
if (_cache.TryGetValue(imageDigest, out var entry))
|
||||
{
|
||||
if (DateTimeOffset.UtcNow - entry.CachedAt < _cacheTtl)
|
||||
{
|
||||
return entry.Purls;
|
||||
}
|
||||
|
||||
_cache.Remove(imageDigest);
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve and cache
|
||||
var purls = await _inner.GetComponentPurlsAsync(imageDigest, ct).ConfigureAwait(false);
|
||||
|
||||
lock (_lock)
|
||||
{
|
||||
_cache[imageDigest] = (purls, DateTimeOffset.UtcNow);
|
||||
}
|
||||
|
||||
return purls;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task<bool> HasSbomAsync(string imageDigest, CancellationToken ct = default)
|
||||
{
|
||||
return _inner.HasSbomAsync(imageDigest, ct);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,263 @@
|
||||
// <copyright file="RuntimeEventEnricher.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Enrichment;
|
||||
|
||||
using System.Collections.Concurrent;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.Signals.Ebpf.Cgroup;
|
||||
using StellaOps.Signals.Ebpf.Schema;
|
||||
|
||||
/// <summary>
|
||||
/// Enriches raw eBPF events with container and image metadata.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// Data flow:
|
||||
/// <code>
|
||||
/// Raw eBPF Event (pid, cgroup_id)
|
||||
/// ↓
|
||||
/// Cgroup Resolver (cgroup_id → container_id)
|
||||
/// ↓
|
||||
/// Container State (container_id → image_ref)
|
||||
/// ↓
|
||||
/// Image Digest Resolver (image_ref → image_digest)
|
||||
/// ↓
|
||||
/// Enriched Event (+ container_id, image_digest)
|
||||
/// </code>
|
||||
/// </remarks>
|
||||
public sealed class RuntimeEventEnricher : IDisposable
|
||||
{
|
||||
private readonly ILogger<RuntimeEventEnricher> _logger;
|
||||
private readonly IContainerIdentityResolver _identityResolver;
|
||||
private readonly IContainerStateProvider? _stateProvider;
|
||||
private readonly IImageDigestResolver? _digestResolver;
|
||||
private readonly ISbomComponentProvider? _sbomProvider;
|
||||
private readonly ConcurrentDictionary<string, EnrichmentCache> _enrichmentCache;
|
||||
private readonly TimeSpan _cacheTtl;
|
||||
private bool _disposed;
|
||||
|
||||
public RuntimeEventEnricher(
|
||||
ILogger<RuntimeEventEnricher> logger,
|
||||
IContainerIdentityResolver identityResolver,
|
||||
IContainerStateProvider? stateProvider = null,
|
||||
IImageDigestResolver? digestResolver = null,
|
||||
ISbomComponentProvider? sbomProvider = null,
|
||||
TimeSpan? cacheTtl = null)
|
||||
{
|
||||
_logger = logger;
|
||||
_identityResolver = identityResolver;
|
||||
_stateProvider = stateProvider;
|
||||
_digestResolver = digestResolver;
|
||||
_sbomProvider = sbomProvider;
|
||||
_enrichmentCache = new();
|
||||
_cacheTtl = cacheTtl ?? TimeSpan.FromMinutes(5);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Enrich a runtime evidence record with container and image metadata.
|
||||
/// </summary>
|
||||
/// <param name="record">Raw evidence record.</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
/// <returns>Enriched record with container_id and image_digest populated.</returns>
|
||||
public async Task<RuntimeEvidenceRecord> EnrichAsync(
|
||||
RuntimeEvidenceRecord record,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
ObjectDisposedException.ThrowIf(_disposed, this);
|
||||
|
||||
// Already enriched?
|
||||
if (!string.IsNullOrEmpty(record.ContainerId) && !string.IsNullOrEmpty(record.ImageDigest))
|
||||
{
|
||||
return record;
|
||||
}
|
||||
|
||||
// Try to resolve container identity
|
||||
ContainerIdentity? identity = null;
|
||||
string? containerId = record.ContainerId;
|
||||
string? imageDigest = record.ImageDigest;
|
||||
|
||||
if (string.IsNullOrEmpty(containerId))
|
||||
{
|
||||
// Try cgroup ID first, then PID
|
||||
if (record.CgroupId > 0)
|
||||
{
|
||||
identity = await _identityResolver.ResolveByCgroupIdAsync(record.CgroupId, ct)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
if (identity == null && record.Pid > 0)
|
||||
{
|
||||
identity = await _identityResolver.ResolveByPidAsync(record.Pid, ct)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
if (identity != null)
|
||||
{
|
||||
containerId = identity.ContainerId;
|
||||
|
||||
// Register mapping for future lookups
|
||||
if (record.CgroupId > 0)
|
||||
{
|
||||
_identityResolver.RegisterCgroupMapping(record.CgroupId, identity);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try to get image digest
|
||||
if (string.IsNullOrEmpty(imageDigest) && !string.IsNullOrEmpty(containerId))
|
||||
{
|
||||
var cached = await GetCachedEnrichmentAsync(containerId, ct).ConfigureAwait(false);
|
||||
imageDigest = cached?.ImageDigest;
|
||||
}
|
||||
|
||||
// Build enriched record
|
||||
return record with
|
||||
{
|
||||
ContainerId = containerId ?? FormatUnknownContainer(record.CgroupId),
|
||||
ImageDigest = imageDigest,
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Enrich multiple records in batch (more efficient for shared containers).
|
||||
/// </summary>
|
||||
/// <param name="records">Raw evidence records.</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
/// <returns>Enriched records.</returns>
|
||||
public async IAsyncEnumerable<RuntimeEvidenceRecord> EnrichBatchAsync(
|
||||
IAsyncEnumerable<RuntimeEvidenceRecord> records,
|
||||
[System.Runtime.CompilerServices.EnumeratorCancellation] CancellationToken ct = default)
|
||||
{
|
||||
await foreach (var record in records.WithCancellation(ct))
|
||||
{
|
||||
yield return await EnrichAsync(record, ct).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Pre-warm the enrichment cache for a container.
|
||||
/// </summary>
|
||||
/// <param name="containerId">Container ID.</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
public async Task PrewarmCacheAsync(string containerId, CancellationToken ct = default)
|
||||
{
|
||||
_ = await GetCachedEnrichmentAsync(containerId, ct, forceRefresh: true).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Invalidate cached enrichment data for a container.
|
||||
/// </summary>
|
||||
/// <param name="containerId">Container ID.</param>
|
||||
public void InvalidateCache(string containerId)
|
||||
{
|
||||
_enrichmentCache.TryRemove(containerId, out _);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get cached or fresh enrichment data for a container.
|
||||
/// </summary>
|
||||
private async Task<EnrichmentCache?> GetCachedEnrichmentAsync(
|
||||
string containerId,
|
||||
CancellationToken ct,
|
||||
bool forceRefresh = false)
|
||||
{
|
||||
// Check cache
|
||||
if (!forceRefresh && _enrichmentCache.TryGetValue(containerId, out var cached))
|
||||
{
|
||||
if (DateTimeOffset.UtcNow - cached.CachedAt < _cacheTtl)
|
||||
{
|
||||
return cached;
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch fresh data
|
||||
EnrichmentCache? enrichment = null;
|
||||
|
||||
try
|
||||
{
|
||||
string? imageRef = null;
|
||||
string? imageDigest = null;
|
||||
|
||||
// Get container metadata
|
||||
if (_stateProvider != null)
|
||||
{
|
||||
var metadata = await _stateProvider.GetContainerMetadataAsync(containerId, ct)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
if (metadata != null)
|
||||
{
|
||||
imageRef = metadata.ImageRef;
|
||||
imageDigest = metadata.ImageDigest;
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve image digest if needed
|
||||
if (string.IsNullOrEmpty(imageDigest) && !string.IsNullOrEmpty(imageRef) && _digestResolver != null)
|
||||
{
|
||||
imageDigest = await _digestResolver.ResolveDigestAsync(imageRef, ct).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
// Get SBOM component PURLs if digest available
|
||||
IReadOnlyList<string>? componentPurls = null;
|
||||
if (!string.IsNullOrEmpty(imageDigest) && _sbomProvider != null)
|
||||
{
|
||||
try
|
||||
{
|
||||
componentPurls = await _sbomProvider.GetComponentPurlsAsync(imageDigest, ct)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogDebug(ex, "Failed to get SBOM components for {ImageDigest}", imageDigest);
|
||||
}
|
||||
}
|
||||
|
||||
enrichment = new EnrichmentCache
|
||||
{
|
||||
ContainerId = containerId,
|
||||
ImageRef = imageRef,
|
||||
ImageDigest = imageDigest,
|
||||
ComponentPurls = componentPurls ?? [],
|
||||
CachedAt = DateTimeOffset.UtcNow,
|
||||
};
|
||||
|
||||
_enrichmentCache[containerId] = enrichment;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogWarning(ex, "Failed to fetch enrichment data for container {ContainerId}", containerId);
|
||||
}
|
||||
|
||||
return enrichment;
|
||||
}
|
||||
|
||||
private static string? FormatUnknownContainer(ulong cgroupId)
|
||||
{
|
||||
if (cgroupId == 0)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
return $"unknown:{cgroupId}";
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public void Dispose()
|
||||
{
|
||||
if (!_disposed)
|
||||
{
|
||||
_enrichmentCache.Clear();
|
||||
_disposed = true;
|
||||
}
|
||||
}
|
||||
|
||||
private sealed record EnrichmentCache
|
||||
{
|
||||
public required string ContainerId { get; init; }
|
||||
public string? ImageRef { get; init; }
|
||||
public string? ImageDigest { get; init; }
|
||||
public IReadOnlyList<string> ComponentPurls { get; init; } = [];
|
||||
public required DateTimeOffset CachedAt { get; init; }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,435 @@
|
||||
// <copyright file="RuntimeEvidenceNdjsonWriter.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Output;
|
||||
|
||||
using System.Buffers;
|
||||
using System.IO.Compression;
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using System.Text.Json.Serialization;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.Signals.Ebpf.Schema;
|
||||
|
||||
/// <summary>
|
||||
/// High-performance, deterministic NDJSON writer for runtime evidence.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// Features:
|
||||
/// - Deterministic output (sorted keys, canonical JSON)
|
||||
/// - Streaming writes with configurable buffer
|
||||
/// - Size-based and time-based rotation
|
||||
/// - Optional gzip compression
|
||||
/// - Rolling hash computation (BLAKE3-like using SHA256 incremental)
|
||||
/// </remarks>
|
||||
public sealed class RuntimeEvidenceNdjsonWriter : IDisposable, IAsyncDisposable
|
||||
{
|
||||
private readonly ILogger<RuntimeEvidenceNdjsonWriter> _logger;
|
||||
private readonly string _outputDirectory;
|
||||
private readonly NdjsonWriterOptions _options;
|
||||
private readonly JsonSerializerOptions _jsonOptions;
|
||||
private readonly SemaphoreSlim _writeLock = new(1, 1);
|
||||
|
||||
private Stream? _currentStream;
|
||||
private StreamWriter? _currentWriter;
|
||||
private string? _currentFilePath;
|
||||
private long _currentSize;
|
||||
private long _eventCount;
|
||||
private DateTimeOffset _chunkStartTime;
|
||||
private int _chunkSequence;
|
||||
private IncrementalHash? _hashContext;
|
||||
private string? _previousChunkHash;
|
||||
private bool _disposed;
|
||||
|
||||
/// <summary>
|
||||
/// Event raised when a chunk is rotated.
|
||||
/// </summary>
|
||||
public event Func<ChunkRotatedEventArgs, CancellationToken, Task>? ChunkRotated;
|
||||
|
||||
public RuntimeEvidenceNdjsonWriter(
|
||||
ILogger<RuntimeEvidenceNdjsonWriter> logger,
|
||||
string outputDirectory,
|
||||
NdjsonWriterOptions? options = null)
|
||||
{
|
||||
_logger = logger;
|
||||
_outputDirectory = outputDirectory;
|
||||
_options = options ?? new NdjsonWriterOptions();
|
||||
|
||||
// Configure JSON serialization for determinism
|
||||
_jsonOptions = new JsonSerializerOptions
|
||||
{
|
||||
PropertyNamingPolicy = JsonNamingPolicy.SnakeCaseLower,
|
||||
DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull,
|
||||
WriteIndented = false,
|
||||
// Sort properties alphabetically for determinism
|
||||
PropertyNameCaseInsensitive = false,
|
||||
Encoder = System.Text.Encodings.Web.JavaScriptEncoder.UnsafeRelaxedJsonEscaping,
|
||||
};
|
||||
|
||||
Directory.CreateDirectory(_outputDirectory);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Write an evidence record.
|
||||
/// </summary>
|
||||
public async Task WriteAsync(RuntimeEvidenceRecord record, CancellationToken ct = default)
|
||||
{
|
||||
ObjectDisposedException.ThrowIf(_disposed, this);
|
||||
|
||||
await _writeLock.WaitAsync(ct);
|
||||
try
|
||||
{
|
||||
await EnsureStreamAsync(ct);
|
||||
|
||||
// Serialize to JSON
|
||||
var json = JsonSerializer.Serialize(record, _jsonOptions);
|
||||
var bytes = Encoding.UTF8.GetBytes(json);
|
||||
|
||||
// Check if rotation is needed
|
||||
if (ShouldRotate(bytes.Length))
|
||||
{
|
||||
await RotateAsync(ct);
|
||||
await EnsureStreamAsync(ct);
|
||||
}
|
||||
|
||||
// Write to stream
|
||||
await _currentWriter!.WriteLineAsync(json);
|
||||
_currentSize += bytes.Length + 1; // +1 for newline
|
||||
_eventCount++;
|
||||
|
||||
// Update hash
|
||||
_hashContext?.AppendData(bytes);
|
||||
_hashContext?.AppendData("\n"u8);
|
||||
}
|
||||
finally
|
||||
{
|
||||
_writeLock.Release();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Write multiple evidence records.
|
||||
/// </summary>
|
||||
public async Task WriteBatchAsync(
|
||||
IEnumerable<RuntimeEvidenceRecord> records,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
foreach (var record in records)
|
||||
{
|
||||
await WriteAsync(record, ct);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Flush and finalize the current chunk.
|
||||
/// </summary>
|
||||
public async Task FlushAsync(CancellationToken ct = default)
|
||||
{
|
||||
await _writeLock.WaitAsync(ct);
|
||||
try
|
||||
{
|
||||
if (_currentWriter != null)
|
||||
{
|
||||
await _currentWriter.FlushAsync(ct);
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
_writeLock.Release();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Force rotation of the current chunk.
|
||||
/// </summary>
|
||||
public async Task RotateAsync(CancellationToken ct = default)
|
||||
{
|
||||
if (_currentStream == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
await _writeLock.WaitAsync(ct);
|
||||
try
|
||||
{
|
||||
await CloseCurrentChunkAsync(ct);
|
||||
}
|
||||
finally
|
||||
{
|
||||
_writeLock.Release();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get current chunk statistics.
|
||||
/// </summary>
|
||||
public ChunkStatistics GetCurrentChunkStats()
|
||||
{
|
||||
return new ChunkStatistics
|
||||
{
|
||||
FilePath = _currentFilePath,
|
||||
Size = _currentSize,
|
||||
EventCount = _eventCount,
|
||||
StartTime = _chunkStartTime,
|
||||
Duration = DateTimeOffset.UtcNow - _chunkStartTime,
|
||||
};
|
||||
}
|
||||
|
||||
private bool ShouldRotate(int pendingBytes)
|
||||
{
|
||||
// Size-based rotation
|
||||
if (_currentSize + pendingBytes > _options.MaxChunkSizeBytes)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
// Time-based rotation
|
||||
if (_options.MaxChunkDuration.HasValue &&
|
||||
DateTimeOffset.UtcNow - _chunkStartTime > _options.MaxChunkDuration.Value)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
private async Task EnsureStreamAsync(CancellationToken ct)
|
||||
{
|
||||
if (_currentStream != null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
_chunkSequence++;
|
||||
_chunkStartTime = DateTimeOffset.UtcNow;
|
||||
_currentSize = 0;
|
||||
_eventCount = 0;
|
||||
|
||||
var timestamp = _chunkStartTime.ToString("yyyyMMddHHmmss");
|
||||
var filename = $"evidence-{timestamp}-{_chunkSequence:D6}.ndjson";
|
||||
|
||||
if (_options.UseGzipCompression)
|
||||
{
|
||||
filename += ".gz";
|
||||
}
|
||||
|
||||
_currentFilePath = Path.Combine(_outputDirectory, filename);
|
||||
|
||||
_logger.LogInformation("Starting new evidence chunk: {FilePath}", _currentFilePath);
|
||||
|
||||
var fileStream = new FileStream(
|
||||
_currentFilePath,
|
||||
FileMode.Create,
|
||||
FileAccess.Write,
|
||||
FileShare.Read,
|
||||
bufferSize: _options.BufferSize,
|
||||
FileOptions.Asynchronous);
|
||||
|
||||
if (_options.UseGzipCompression)
|
||||
{
|
||||
_currentStream = new GZipStream(
|
||||
fileStream,
|
||||
CompressionLevel.Optimal,
|
||||
leaveOpen: false);
|
||||
}
|
||||
else
|
||||
{
|
||||
_currentStream = fileStream;
|
||||
}
|
||||
|
||||
_currentWriter = new StreamWriter(_currentStream, Encoding.UTF8, _options.BufferSize, leaveOpen: true);
|
||||
_hashContext = IncrementalHash.CreateHash(HashAlgorithmName.SHA256);
|
||||
}
|
||||
|
||||
private async Task CloseCurrentChunkAsync(CancellationToken ct)
|
||||
{
|
||||
if (_currentWriter == null || _currentStream == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
await _currentWriter.FlushAsync(ct);
|
||||
await _currentWriter.DisposeAsync();
|
||||
_currentWriter = null;
|
||||
|
||||
await _currentStream.FlushAsync(ct);
|
||||
await _currentStream.DisposeAsync();
|
||||
_currentStream = null;
|
||||
|
||||
// Finalize hash
|
||||
var hashBytes = _hashContext?.GetCurrentHash() ?? Array.Empty<byte>();
|
||||
var hashHex = Convert.ToHexString(hashBytes).ToLowerInvariant();
|
||||
_hashContext?.Dispose();
|
||||
_hashContext = null;
|
||||
|
||||
var stats = new ChunkStatistics
|
||||
{
|
||||
FilePath = _currentFilePath!,
|
||||
Size = _currentSize,
|
||||
EventCount = _eventCount,
|
||||
StartTime = _chunkStartTime,
|
||||
Duration = DateTimeOffset.UtcNow - _chunkStartTime,
|
||||
ContentHash = $"sha256:{hashHex}",
|
||||
ChunkSequence = _chunkSequence,
|
||||
};
|
||||
|
||||
_logger.LogInformation(
|
||||
"Closed evidence chunk: {FilePath}, {EventCount} events, {Size} bytes, hash: {Hash}",
|
||||
_currentFilePath,
|
||||
_eventCount,
|
||||
_currentSize,
|
||||
stats.ContentHash);
|
||||
|
||||
// Notify listeners
|
||||
if (ChunkRotated != null)
|
||||
{
|
||||
var args = new ChunkRotatedEventArgs
|
||||
{
|
||||
Statistics = stats,
|
||||
PreviousChunkHash = _previousChunkHash,
|
||||
};
|
||||
|
||||
try
|
||||
{
|
||||
await ChunkRotated(args, ct);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogWarning(ex, "ChunkRotated handler failed");
|
||||
}
|
||||
}
|
||||
|
||||
// Track for next chunk's chain linking
|
||||
_previousChunkHash = stats.ContentHash;
|
||||
_currentFilePath = null;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public void Dispose()
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
_writeLock.Wait();
|
||||
try
|
||||
{
|
||||
_currentWriter?.Dispose();
|
||||
_currentStream?.Dispose();
|
||||
_hashContext?.Dispose();
|
||||
_disposed = true;
|
||||
}
|
||||
finally
|
||||
{
|
||||
_writeLock.Release();
|
||||
_writeLock.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async ValueTask DisposeAsync()
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
await _writeLock.WaitAsync();
|
||||
try
|
||||
{
|
||||
await CloseCurrentChunkAsync(CancellationToken.None);
|
||||
_disposed = true;
|
||||
}
|
||||
finally
|
||||
{
|
||||
_writeLock.Release();
|
||||
_writeLock.Dispose();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Options for NDJSON writer.
|
||||
/// </summary>
|
||||
public sealed record NdjsonWriterOptions
|
||||
{
|
||||
/// <summary>
|
||||
/// Maximum chunk size in bytes before rotation (default: 100MB).
|
||||
/// </summary>
|
||||
public long MaxChunkSizeBytes { get; init; } = 100 * 1024 * 1024;
|
||||
|
||||
/// <summary>
|
||||
/// Maximum chunk duration before rotation (default: 1 hour).
|
||||
/// </summary>
|
||||
public TimeSpan? MaxChunkDuration { get; init; } = TimeSpan.FromHours(1);
|
||||
|
||||
/// <summary>
|
||||
/// Write buffer size (default: 64KB).
|
||||
/// </summary>
|
||||
public int BufferSize { get; init; } = 64 * 1024;
|
||||
|
||||
/// <summary>
|
||||
/// Whether to use gzip compression (default: false).
|
||||
/// </summary>
|
||||
public bool UseGzipCompression { get; init; } = false;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Statistics for a completed chunk.
|
||||
/// </summary>
|
||||
public sealed record ChunkStatistics
|
||||
{
|
||||
/// <summary>
|
||||
/// Chunk file path.
|
||||
/// </summary>
|
||||
public required string? FilePath { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Chunk size in bytes.
|
||||
/// </summary>
|
||||
public required long Size { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Number of events in chunk.
|
||||
/// </summary>
|
||||
public required long EventCount { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// When the chunk was started.
|
||||
/// </summary>
|
||||
public required DateTimeOffset StartTime { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Chunk duration.
|
||||
/// </summary>
|
||||
public required TimeSpan Duration { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Content hash (sha256:hex).
|
||||
/// </summary>
|
||||
public string? ContentHash { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Chunk sequence number.
|
||||
/// </summary>
|
||||
public int ChunkSequence { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Event args for chunk rotation.
|
||||
/// </summary>
|
||||
public sealed record ChunkRotatedEventArgs
|
||||
{
|
||||
/// <summary>
|
||||
/// Statistics for the completed chunk.
|
||||
/// </summary>
|
||||
public required ChunkStatistics Statistics { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Hash of the previous chunk (for chain linking).
|
||||
/// </summary>
|
||||
public string? PreviousChunkHash { get; init; }
|
||||
}
|
||||
@@ -0,0 +1,379 @@
|
||||
// <copyright file="EventParser.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Parsers;
|
||||
|
||||
using System.Buffers.Binary;
|
||||
using System.Runtime.InteropServices;
|
||||
using System.Text;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.Signals.Ebpf.Schema;
|
||||
using StellaOps.Signals.Ebpf.Symbols;
|
||||
|
||||
/// <summary>
|
||||
/// Parses raw binary events from eBPF ring buffer into typed records.
|
||||
/// </summary>
|
||||
public sealed class EventParser
|
||||
{
|
||||
private readonly ILogger<EventParser> _logger;
|
||||
private readonly ISymbolResolver _symbolResolver;
|
||||
|
||||
/// <summary>
|
||||
/// Minimum event size (header only).
|
||||
/// </summary>
|
||||
private const int MinEventSize = 40; // sizeof(EventHeader)
|
||||
|
||||
public EventParser(ILogger<EventParser> logger, ISymbolResolver symbolResolver)
|
||||
{
|
||||
_logger = logger;
|
||||
_symbolResolver = symbolResolver;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Parse a raw event from the ring buffer.
|
||||
/// </summary>
|
||||
/// <param name="data">Raw event bytes.</param>
|
||||
/// <returns>Parsed evidence record, or null if parsing failed.</returns>
|
||||
public RuntimeEvidenceRecord? Parse(ReadOnlySpan<byte> data)
|
||||
{
|
||||
if (data.Length < MinEventSize)
|
||||
{
|
||||
_logger.LogWarning("Event too small: {Size} bytes", data.Length);
|
||||
return null;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
// Read header to determine event type
|
||||
var header = ParseHeader(data);
|
||||
|
||||
return header.EventType switch
|
||||
{
|
||||
EbpfEventType.FileOpen => ParseFileOpenEvent(data, header),
|
||||
EbpfEventType.ProcessExec => ParseProcessExecEvent(data, header),
|
||||
EbpfEventType.TcpState => ParseTcpStateEvent(data, header),
|
||||
EbpfEventType.NetConnect => ParseNetConnectEvent(data, header),
|
||||
EbpfEventType.SslOp => ParseSslOpEvent(data, header),
|
||||
EbpfEventType.FunctionCall => ParseFunctionCallEvent(data, header),
|
||||
_ => null,
|
||||
};
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogWarning(ex, "Failed to parse event");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Parse the common event header.
|
||||
/// </summary>
|
||||
private static ParsedHeader ParseHeader(ReadOnlySpan<byte> data)
|
||||
{
|
||||
return new ParsedHeader
|
||||
{
|
||||
TimestampNs = BinaryPrimitives.ReadUInt64LittleEndian(data[0..8]),
|
||||
Pid = BinaryPrimitives.ReadUInt32LittleEndian(data[8..12]),
|
||||
Tid = BinaryPrimitives.ReadUInt32LittleEndian(data[12..16]),
|
||||
CgroupId = BinaryPrimitives.ReadUInt64LittleEndian(data[16..24]),
|
||||
EventType = (EbpfEventType)data[24],
|
||||
Comm = ReadNullTerminatedString(data[32..48]),
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Parse file open event.
|
||||
/// </summary>
|
||||
private RuntimeEvidenceRecord ParseFileOpenEvent(ReadOnlySpan<byte> data, ParsedHeader header)
|
||||
{
|
||||
// Offsets after header (48 bytes)
|
||||
const int HeaderSize = 48;
|
||||
const int FilenameOffset = HeaderSize + 8; // After dfd(4) + flags(4) + mode(2) + reserved(2)
|
||||
const int MaxFilenameLen = 256;
|
||||
|
||||
var dfd = BinaryPrimitives.ReadInt32LittleEndian(data[HeaderSize..(HeaderSize + 4)]);
|
||||
var flags = BinaryPrimitives.ReadInt32LittleEndian(data[(HeaderSize + 4)..(HeaderSize + 8)]);
|
||||
var mode = BinaryPrimitives.ReadUInt16LittleEndian(data[(HeaderSize + 8)..(HeaderSize + 10)]);
|
||||
|
||||
var filenameEnd = Math.Min(data.Length, FilenameOffset + MaxFilenameLen);
|
||||
var filename = ReadNullTerminatedString(data[FilenameOffset..filenameEnd]);
|
||||
|
||||
return new RuntimeEvidenceRecord
|
||||
{
|
||||
TimestampNs = header.TimestampNs,
|
||||
Source = "sys_enter_openat",
|
||||
Pid = (int)header.Pid,
|
||||
Tid = (int)header.Tid,
|
||||
CgroupId = header.CgroupId,
|
||||
Comm = header.Comm,
|
||||
Event = new FileOpenEvent
|
||||
{
|
||||
Path = filename,
|
||||
Flags = flags,
|
||||
Dfd = dfd,
|
||||
Mode = mode,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Parse process exec event.
|
||||
/// </summary>
|
||||
private RuntimeEvidenceRecord ParseProcessExecEvent(ReadOnlySpan<byte> data, ParsedHeader header)
|
||||
{
|
||||
const int HeaderSize = 48;
|
||||
const int FilenameOffset = HeaderSize + 8; // After ppid(4) + reserved(4)
|
||||
const int MaxFilenameLen = 256;
|
||||
const int Argv0Offset = FilenameOffset + MaxFilenameLen;
|
||||
const int MaxArgv0Len = 128;
|
||||
|
||||
var ppid = BinaryPrimitives.ReadUInt32LittleEndian(data[HeaderSize..(HeaderSize + 4)]);
|
||||
|
||||
var filenameEnd = Math.Min(data.Length, FilenameOffset + MaxFilenameLen);
|
||||
var filename = ReadNullTerminatedString(data[FilenameOffset..filenameEnd]);
|
||||
|
||||
string? argv0 = null;
|
||||
if (data.Length > Argv0Offset)
|
||||
{
|
||||
var argv0End = Math.Min(data.Length, Argv0Offset + MaxArgv0Len);
|
||||
argv0 = ReadNullTerminatedString(data[Argv0Offset..argv0End]);
|
||||
if (string.IsNullOrEmpty(argv0))
|
||||
{
|
||||
argv0 = null;
|
||||
}
|
||||
}
|
||||
|
||||
return new RuntimeEvidenceRecord
|
||||
{
|
||||
TimestampNs = header.TimestampNs,
|
||||
Source = "sched_process_exec",
|
||||
Pid = (int)header.Pid,
|
||||
Tid = (int)header.Tid,
|
||||
CgroupId = header.CgroupId,
|
||||
Comm = header.Comm,
|
||||
Event = new ProcessExecEvent
|
||||
{
|
||||
Filename = filename,
|
||||
Ppid = (int)ppid,
|
||||
Argv0 = argv0,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Parse TCP state change event.
|
||||
/// </summary>
|
||||
private RuntimeEvidenceRecord ParseTcpStateEvent(ReadOnlySpan<byte> data, ParsedHeader header)
|
||||
{
|
||||
const int HeaderSize = 48;
|
||||
|
||||
var oldState = data[HeaderSize];
|
||||
var newState = data[HeaderSize + 1];
|
||||
var family = data[HeaderSize + 2];
|
||||
var sport = BinaryPrimitives.ReadUInt16LittleEndian(data[(HeaderSize + 4)..(HeaderSize + 6)]);
|
||||
var dport = BinaryPrimitives.ReadUInt16LittleEndian(data[(HeaderSize + 6)..(HeaderSize + 8)]);
|
||||
|
||||
string saddr, daddr;
|
||||
string familyStr;
|
||||
|
||||
if (family == 2) // AF_INET
|
||||
{
|
||||
var saddrV4 = BinaryPrimitives.ReadUInt32LittleEndian(data[(HeaderSize + 8)..(HeaderSize + 12)]);
|
||||
var daddrV4 = BinaryPrimitives.ReadUInt32LittleEndian(data[(HeaderSize + 12)..(HeaderSize + 16)]);
|
||||
saddr = IpAddressHelper.FormatIPv4(saddrV4);
|
||||
daddr = IpAddressHelper.FormatIPv4(daddrV4);
|
||||
familyStr = "inet";
|
||||
}
|
||||
else // AF_INET6
|
||||
{
|
||||
saddr = IpAddressHelper.FormatIPv6(data[(HeaderSize + 16)..(HeaderSize + 32)].ToArray());
|
||||
daddr = IpAddressHelper.FormatIPv6(data[(HeaderSize + 32)..(HeaderSize + 48)].ToArray());
|
||||
familyStr = "inet6";
|
||||
}
|
||||
|
||||
return new RuntimeEvidenceRecord
|
||||
{
|
||||
TimestampNs = header.TimestampNs,
|
||||
Source = "inet_sock_set_state",
|
||||
Pid = (int)header.Pid,
|
||||
Tid = (int)header.Tid,
|
||||
CgroupId = header.CgroupId,
|
||||
Comm = header.Comm,
|
||||
Event = new TcpStateEvent
|
||||
{
|
||||
OldState = TcpStateHelper.ToString(oldState),
|
||||
NewState = TcpStateHelper.ToString(newState),
|
||||
DestAddress = daddr,
|
||||
DestPort = dport,
|
||||
SourceAddress = saddr,
|
||||
SourcePort = sport,
|
||||
Family = familyStr,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Parse network connect event.
|
||||
/// </summary>
|
||||
private RuntimeEvidenceRecord ParseNetConnectEvent(ReadOnlySpan<byte> data, ParsedHeader header)
|
||||
{
|
||||
const int HeaderSize = 48;
|
||||
|
||||
var fd = BinaryPrimitives.ReadInt32LittleEndian(data[HeaderSize..(HeaderSize + 4)]);
|
||||
var ret = BinaryPrimitives.ReadInt32LittleEndian(data[(HeaderSize + 4)..(HeaderSize + 8)]);
|
||||
var family = BinaryPrimitives.ReadUInt16LittleEndian(data[(HeaderSize + 8)..(HeaderSize + 10)]);
|
||||
var port = BinaryPrimitives.ReadUInt16LittleEndian(data[(HeaderSize + 10)..(HeaderSize + 12)]);
|
||||
|
||||
string addr;
|
||||
if (family == 2) // AF_INET
|
||||
{
|
||||
var addrV4 = BinaryPrimitives.ReadUInt32LittleEndian(data[(HeaderSize + 12)..(HeaderSize + 16)]);
|
||||
addr = IpAddressHelper.FormatIPv4(addrV4);
|
||||
}
|
||||
else
|
||||
{
|
||||
addr = IpAddressHelper.FormatIPv6(data[(HeaderSize + 12)..(HeaderSize + 28)].ToArray());
|
||||
}
|
||||
|
||||
return new RuntimeEvidenceRecord
|
||||
{
|
||||
TimestampNs = header.TimestampNs,
|
||||
Source = "uprobe:connect",
|
||||
Pid = (int)header.Pid,
|
||||
Tid = (int)header.Tid,
|
||||
CgroupId = header.CgroupId,
|
||||
Comm = header.Comm,
|
||||
Event = new NetConnectEvent
|
||||
{
|
||||
Fd = fd,
|
||||
Address = addr,
|
||||
Port = port,
|
||||
Success = ret == 0,
|
||||
Error = ret < 0 ? -ret : 0,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Parse SSL operation event.
|
||||
/// </summary>
|
||||
private RuntimeEvidenceRecord ParseSslOpEvent(ReadOnlySpan<byte> data, ParsedHeader header)
|
||||
{
|
||||
const int HeaderSize = 48;
|
||||
|
||||
var sslPtr = BinaryPrimitives.ReadUInt64LittleEndian(data[HeaderSize..(HeaderSize + 8)]);
|
||||
var requestedBytes = BinaryPrimitives.ReadUInt32LittleEndian(data[(HeaderSize + 8)..(HeaderSize + 12)]);
|
||||
var actualBytes = BinaryPrimitives.ReadUInt32LittleEndian(data[(HeaderSize + 12)..(HeaderSize + 16)]);
|
||||
var operation = data[HeaderSize + 16];
|
||||
|
||||
return new RuntimeEvidenceRecord
|
||||
{
|
||||
TimestampNs = header.TimestampNs,
|
||||
Source = operation == 0 ? "uprobe:SSL_read" : "uprobe:SSL_write",
|
||||
Pid = (int)header.Pid,
|
||||
Tid = (int)header.Tid,
|
||||
CgroupId = header.CgroupId,
|
||||
Comm = header.Comm,
|
||||
Event = new SslOpEvent
|
||||
{
|
||||
Operation = operation == 0 ? "read" : "write",
|
||||
Bytes = (int)actualBytes,
|
||||
SslPtr = $"0x{sslPtr:X}",
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Parse function call event.
|
||||
/// </summary>
|
||||
private RuntimeEvidenceRecord ParseFunctionCallEvent(ReadOnlySpan<byte> data, ParsedHeader header)
|
||||
{
|
||||
const int HeaderSize = 48;
|
||||
const int MaxStackDepth = 16;
|
||||
|
||||
var funcAddr = BinaryPrimitives.ReadUInt64LittleEndian(data[HeaderSize..(HeaderSize + 8)]);
|
||||
var returnAddr = BinaryPrimitives.ReadUInt64LittleEndian(data[(HeaderSize + 8)..(HeaderSize + 16)]);
|
||||
|
||||
// Stack trace starts at HeaderSize + 16, each entry is 8 bytes
|
||||
var stackOffset = HeaderSize + 16;
|
||||
var stackDepthOffset = stackOffset + (MaxStackDepth * 8);
|
||||
var stackDepth = data.Length > stackDepthOffset ? data[stackDepthOffset] : (byte)0;
|
||||
var runtimeType = data.Length > stackDepthOffset + 1 ? data[stackDepthOffset + 1] : (byte)0;
|
||||
|
||||
List<string>? stack = null;
|
||||
if (stackDepth > 0)
|
||||
{
|
||||
stack = new List<string>(stackDepth);
|
||||
for (int i = 0; i < stackDepth && i < MaxStackDepth; i++)
|
||||
{
|
||||
var addr = BinaryPrimitives.ReadUInt64LittleEndian(
|
||||
data[(stackOffset + i * 8)..(stackOffset + i * 8 + 8)]);
|
||||
if (addr != 0)
|
||||
{
|
||||
stack.Add($"0x{addr:X}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try to resolve symbol
|
||||
var (symbol, library, purl) = _symbolResolver.Resolve((int)header.Pid, funcAddr);
|
||||
|
||||
return new RuntimeEvidenceRecord
|
||||
{
|
||||
TimestampNs = header.TimestampNs,
|
||||
Source = "uprobe:function_entry",
|
||||
Pid = (int)header.Pid,
|
||||
Tid = (int)header.Tid,
|
||||
CgroupId = header.CgroupId,
|
||||
Comm = header.Comm,
|
||||
Event = new FunctionCallEvent
|
||||
{
|
||||
Address = $"0x{funcAddr:X}",
|
||||
Symbol = symbol,
|
||||
Library = library,
|
||||
Runtime = GetRuntimeName(runtimeType),
|
||||
Stack = stack?.Count > 0 ? stack : null,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Read a null-terminated string from a span.
|
||||
/// </summary>
|
||||
private static string ReadNullTerminatedString(ReadOnlySpan<byte> data)
|
||||
{
|
||||
var nullIndex = data.IndexOf((byte)0);
|
||||
var length = nullIndex >= 0 ? nullIndex : data.Length;
|
||||
return Encoding.UTF8.GetString(data[..length]);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get runtime type name.
|
||||
/// </summary>
|
||||
private static string? GetRuntimeName(byte runtimeType) => runtimeType switch
|
||||
{
|
||||
0 => "native",
|
||||
1 => "jvm",
|
||||
2 => "node",
|
||||
3 => "python",
|
||||
4 => "dotnet",
|
||||
5 => "go",
|
||||
6 => "ruby",
|
||||
255 => null,
|
||||
_ => null,
|
||||
};
|
||||
|
||||
/// <summary>
|
||||
/// Parsed header for internal use.
|
||||
/// </summary>
|
||||
private readonly struct ParsedHeader
|
||||
{
|
||||
public ulong TimestampNs { get; init; }
|
||||
public uint Pid { get; init; }
|
||||
public uint Tid { get; init; }
|
||||
public ulong CgroupId { get; init; }
|
||||
public EbpfEventType EventType { get; init; }
|
||||
public string Comm { get; init; }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,153 @@
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
# Stella Ops eBPF Probe Compilation
|
||||
#
|
||||
# Prerequisites:
|
||||
# - clang >= 10 (with BPF target support)
|
||||
# - llvm >= 10
|
||||
# - libbpf-dev or libbpf headers
|
||||
# - Linux kernel headers
|
||||
#
|
||||
# Usage:
|
||||
# make # Build all probes
|
||||
# make clean # Remove build artifacts
|
||||
# make install # Install to /usr/share/stellaops/probes
|
||||
# make BTF=0 # Build without BTF (legacy mode)
|
||||
|
||||
# Configuration
|
||||
CLANG ?= clang
|
||||
LLC ?= llc
|
||||
LLVM_STRIP ?= llvm-strip
|
||||
BPFTOOL ?= bpftool
|
||||
|
||||
# Build flags
|
||||
BTF ?= 1
|
||||
DEBUG ?= 0
|
||||
ARCH ?= $(shell uname -m | sed 's/x86_64/x86/' | sed 's/aarch64/arm64/')
|
||||
|
||||
# Directories
|
||||
OUTPUT_DIR ?= ../../probes
|
||||
LIBBPF_INCLUDE ?= /usr/include
|
||||
VMLINUX_H ?= vmlinux_subset.h
|
||||
|
||||
# Source files
|
||||
BPF_SOURCES = \
|
||||
function_tracer.bpf.c \
|
||||
syscall_openat.bpf.c \
|
||||
syscall_exec.bpf.c \
|
||||
syscall_network.bpf.c \
|
||||
uprobe_libc.bpf.c \
|
||||
uprobe_openssl.bpf.c
|
||||
|
||||
# Object files
|
||||
BPF_OBJECTS = $(patsubst %.bpf.c,$(OUTPUT_DIR)/%.bpf.o,$(BPF_SOURCES))
|
||||
|
||||
# Clang flags for BPF compilation
|
||||
CFLAGS := -g -O2 \
|
||||
-target bpf \
|
||||
-D__TARGET_ARCH_$(ARCH) \
|
||||
-I$(LIBBPF_INCLUDE) \
|
||||
-I. \
|
||||
-Wall \
|
||||
-Wno-unused-value \
|
||||
-Wno-pointer-sign \
|
||||
-Wno-compare-distinct-pointer-types \
|
||||
-Wno-address-of-packed-member
|
||||
|
||||
# Add BTF support if enabled
|
||||
ifeq ($(BTF),1)
|
||||
CFLAGS += -g
|
||||
endif
|
||||
|
||||
# Add debug info if enabled
|
||||
ifeq ($(DEBUG),1)
|
||||
CFLAGS += -DDEBUG
|
||||
endif
|
||||
|
||||
# Targets
|
||||
.PHONY: all clean install verify
|
||||
|
||||
all: $(OUTPUT_DIR) $(BPF_OBJECTS) manifest
|
||||
|
||||
$(OUTPUT_DIR):
|
||||
@mkdir -p $(OUTPUT_DIR)
|
||||
|
||||
# Compile BPF programs
|
||||
$(OUTPUT_DIR)/%.bpf.o: %.bpf.c stella_common.h $(VMLINUX_H)
|
||||
@echo " CC $@"
|
||||
$(CLANG) $(CFLAGS) -c $< -o $@
|
||||
ifeq ($(BTF),1)
|
||||
@# Strip DWARF but keep BTF
|
||||
$(LLVM_STRIP) -g $@ 2>/dev/null || true
|
||||
endif
|
||||
|
||||
# Generate probe manifest
|
||||
manifest: $(BPF_OBJECTS)
|
||||
@echo " MANIFEST $(OUTPUT_DIR)/probe-manifest.json"
|
||||
@echo '{' > $(OUTPUT_DIR)/probe-manifest.json
|
||||
@echo ' "version": "1.0.0",' >> $(OUTPUT_DIR)/probe-manifest.json
|
||||
@echo ' "generated_at": "'$$(date -u +%Y-%m-%dT%H:%M:%SZ)'",' >> $(OUTPUT_DIR)/probe-manifest.json
|
||||
@echo ' "arch": "$(ARCH)",' >> $(OUTPUT_DIR)/probe-manifest.json
|
||||
@echo ' "btf_enabled": $(BTF),' >> $(OUTPUT_DIR)/probe-manifest.json
|
||||
@echo ' "probes": [' >> $(OUTPUT_DIR)/probe-manifest.json
|
||||
@first=1; for obj in $(BPF_OBJECTS); do \
|
||||
name=$$(basename $$obj .bpf.o); \
|
||||
sha256=$$(sha256sum $$obj | cut -d' ' -f1); \
|
||||
size=$$(stat -c%s $$obj 2>/dev/null || stat -f%z $$obj); \
|
||||
if [ $$first -eq 0 ]; then echo ','; fi >> $(OUTPUT_DIR)/probe-manifest.json; \
|
||||
first=0; \
|
||||
echo ' {' >> $(OUTPUT_DIR)/probe-manifest.json; \
|
||||
echo ' "name": "'$$name'",' >> $(OUTPUT_DIR)/probe-manifest.json; \
|
||||
echo ' "file": "'$$(basename $$obj)'",' >> $(OUTPUT_DIR)/probe-manifest.json; \
|
||||
echo ' "sha256": "'$$sha256'",' >> $(OUTPUT_DIR)/probe-manifest.json; \
|
||||
echo ' "size": '$$size',' >> $(OUTPUT_DIR)/probe-manifest.json; \
|
||||
echo ' "min_kernel": "4.14",' >> $(OUTPUT_DIR)/probe-manifest.json; \
|
||||
echo ' "btf_required": '$(BTF) >> $(OUTPUT_DIR)/probe-manifest.json; \
|
||||
echo -n ' }' >> $(OUTPUT_DIR)/probe-manifest.json; \
|
||||
done
|
||||
@echo '' >> $(OUTPUT_DIR)/probe-manifest.json
|
||||
@echo ' ]' >> $(OUTPUT_DIR)/probe-manifest.json
|
||||
@echo '}' >> $(OUTPUT_DIR)/probe-manifest.json
|
||||
|
||||
# Verify probes load correctly (requires root)
|
||||
verify: $(BPF_OBJECTS)
|
||||
@echo "Verifying probe programs..."
|
||||
@for obj in $(BPF_OBJECTS); do \
|
||||
echo " VERIFY $$obj"; \
|
||||
$(BPFTOOL) prog load $$obj /sys/fs/bpf/stella_verify_$$(basename $$obj .bpf.o) 2>/dev/null && \
|
||||
$(BPFTOOL) prog show pinned /sys/fs/bpf/stella_verify_$$(basename $$obj .bpf.o) && \
|
||||
rm -f /sys/fs/bpf/stella_verify_$$(basename $$obj .bpf.o) || \
|
||||
echo " SKIP (verification requires root or failed)"; \
|
||||
done
|
||||
|
||||
# Install probes to system location
|
||||
install: $(BPF_OBJECTS) manifest
|
||||
@echo "Installing probes to /usr/share/stellaops/probes..."
|
||||
@mkdir -p /usr/share/stellaops/probes
|
||||
@cp $(OUTPUT_DIR)/*.bpf.o /usr/share/stellaops/probes/
|
||||
@cp $(OUTPUT_DIR)/probe-manifest.json /usr/share/stellaops/probes/
|
||||
@chmod 644 /usr/share/stellaops/probes/*
|
||||
@echo "Installation complete."
|
||||
|
||||
# Clean build artifacts
|
||||
clean:
|
||||
@echo "Cleaning build artifacts..."
|
||||
@rm -f $(OUTPUT_DIR)/*.bpf.o
|
||||
@rm -f $(OUTPUT_DIR)/probe-manifest.json
|
||||
|
||||
# Help
|
||||
help:
|
||||
@echo "Stella Ops eBPF Probe Build System"
|
||||
@echo ""
|
||||
@echo "Targets:"
|
||||
@echo " all - Build all probes (default)"
|
||||
@echo " clean - Remove build artifacts"
|
||||
@echo " install - Install to /usr/share/stellaops/probes"
|
||||
@echo " verify - Verify probes can be loaded (requires root)"
|
||||
@echo " help - Show this help"
|
||||
@echo ""
|
||||
@echo "Variables:"
|
||||
@echo " BTF=1 - Enable BTF debug info (default: 1)"
|
||||
@echo " DEBUG=1 - Enable debug mode (default: 0)"
|
||||
@echo " ARCH=x86 - Target architecture (default: auto-detect)"
|
||||
@echo " CLANG= - Path to clang compiler"
|
||||
@echo " OUTPUT_DIR= - Output directory for compiled probes"
|
||||
@@ -0,0 +1,188 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
// Stella Ops eBPF probe: Generic function tracer
|
||||
// Captures function call evidence for reachability proofs via uprobes
|
||||
|
||||
#include "stella_common.h"
|
||||
|
||||
char LICENSE[] SEC("license") = "Dual BSD/GPL";
|
||||
|
||||
// Configuration for function tracing
|
||||
struct tracer_config {
|
||||
u8 capture_stack; // Capture call stack
|
||||
u8 max_stack_depth; // Maximum stack frames (up to MAX_STACK_DEPTH)
|
||||
u8 filter_by_symbol; // Only trace symbols in target_symbols map
|
||||
u8 sample_rate; // Sample every N calls (1 = all, 10 = 10%)
|
||||
u8 reserved[4];
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, u32);
|
||||
__type(value, struct tracer_config);
|
||||
} tracer_config SEC(".maps");
|
||||
|
||||
// Per-symbol call counter for sampling
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_LRU_HASH);
|
||||
__uint(max_entries, 100000);
|
||||
__type(key, u64); // function address
|
||||
__type(value, u64); // call count
|
||||
} symbol_call_counts SEC(".maps");
|
||||
|
||||
// Detect runtime type from process characteristics
|
||||
static __always_inline u8 detect_runtime_type(void) {
|
||||
char comm[TASK_COMM_LEN];
|
||||
bpf_get_current_comm(&comm, sizeof(comm));
|
||||
|
||||
// Check for common runtime process names
|
||||
// Java: java, java-*, javac
|
||||
if (comm[0] == 'j' && comm[1] == 'a' && comm[2] == 'v' && comm[3] == 'a') {
|
||||
return 1; // RuntimeType::Jvm
|
||||
}
|
||||
|
||||
// Node.js: node, nodejs
|
||||
if (comm[0] == 'n' && comm[1] == 'o' && comm[2] == 'd' && comm[3] == 'e') {
|
||||
return 2; // RuntimeType::Node
|
||||
}
|
||||
|
||||
// Python: python, python3, python3.x
|
||||
if (comm[0] == 'p' && comm[1] == 'y' && comm[2] == 't' && comm[3] == 'h') {
|
||||
return 3; // RuntimeType::Python
|
||||
}
|
||||
|
||||
// .NET: dotnet
|
||||
if (comm[0] == 'd' && comm[1] == 'o' && comm[2] == 't' && comm[3] == 'n') {
|
||||
return 4; // RuntimeType::DotNet
|
||||
}
|
||||
|
||||
// Go binaries typically have no distinctive comm name
|
||||
// Would need to check for go runtime symbols
|
||||
|
||||
// Ruby: ruby, ruby3.x
|
||||
if (comm[0] == 'r' && comm[1] == 'u' && comm[2] == 'b' && comm[3] == 'y') {
|
||||
return 6; // RuntimeType::Ruby
|
||||
}
|
||||
|
||||
return 0; // RuntimeType::Native (default)
|
||||
}
|
||||
|
||||
// Check if we should sample this call
|
||||
static __always_inline bool should_sample(u64 func_addr, u8 sample_rate) {
|
||||
if (sample_rate <= 1) {
|
||||
return true; // Sample everything
|
||||
}
|
||||
|
||||
u64 *count = bpf_map_lookup_elem(&symbol_call_counts, &func_addr);
|
||||
u64 current_count;
|
||||
|
||||
if (count) {
|
||||
current_count = __sync_fetch_and_add(count, 1);
|
||||
} else {
|
||||
current_count = 0;
|
||||
u64 initial = 1;
|
||||
bpf_map_update_elem(&symbol_call_counts, &func_addr, &initial, BPF_NOEXIST);
|
||||
}
|
||||
|
||||
return (current_count % sample_rate) == 0;
|
||||
}
|
||||
|
||||
// Generic function entry probe
|
||||
// This is attached to specific functions via bpf_program__attach_uprobe
|
||||
SEC("uprobe/function_entry")
|
||||
int uprobe_function_entry(struct pt_regs *ctx) {
|
||||
u64 cgroup_id = bpf_get_current_cgroup_id();
|
||||
|
||||
// Check container filter
|
||||
if (!should_trace_cgroup(cgroup_id)) {
|
||||
update_stats(false, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 func_addr = PT_REGS_IP(ctx);
|
||||
|
||||
// Check symbol filter
|
||||
u32 zero = 0;
|
||||
struct tracer_config *cfg = bpf_map_lookup_elem(&tracer_config, &zero);
|
||||
|
||||
if (cfg && cfg->filter_by_symbol) {
|
||||
u8 *target = bpf_map_lookup_elem(&target_symbols, &func_addr);
|
||||
if (!target || *target == 0) {
|
||||
update_stats(false, true);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Check sampling
|
||||
u8 sample_rate = cfg ? cfg->sample_rate : 1;
|
||||
if (!should_sample(func_addr, sample_rate)) {
|
||||
update_stats(false, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Reserve event
|
||||
struct function_call_event *event;
|
||||
event = bpf_ringbuf_reserve(&events, sizeof(*event), 0);
|
||||
if (!event) {
|
||||
update_stats(true, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Fill common header
|
||||
fill_event_header(&event->hdr, EVENT_TYPE_FUNCTION_CALL);
|
||||
|
||||
// Fill function-specific fields
|
||||
event->function_addr = func_addr;
|
||||
event->return_addr = 0; // Would need frame pointer to get this
|
||||
event->runtime_type = detect_runtime_type();
|
||||
|
||||
// Capture call stack if configured
|
||||
event->stack_depth = 0;
|
||||
__builtin_memset(event->stack_trace, 0, sizeof(event->stack_trace));
|
||||
|
||||
if (cfg && cfg->capture_stack) {
|
||||
u8 max_depth = cfg->max_stack_depth;
|
||||
if (max_depth == 0 || max_depth > MAX_STACK_DEPTH) {
|
||||
max_depth = MAX_STACK_DEPTH;
|
||||
}
|
||||
|
||||
// bpf_get_stack returns negative on error, positive bytes on success
|
||||
int stack_size = bpf_get_stack(ctx, event->stack_trace,
|
||||
max_depth * sizeof(u64), BPF_F_USER_STACK);
|
||||
|
||||
if (stack_size > 0) {
|
||||
event->stack_depth = stack_size / sizeof(u64);
|
||||
}
|
||||
}
|
||||
|
||||
__builtin_memset(event->reserved, 0, sizeof(event->reserved));
|
||||
|
||||
bpf_ringbuf_submit(event, 0);
|
||||
update_stats(false, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Function return probe (optional, for timing/return value capture)
|
||||
SEC("uretprobe/function_return")
|
||||
int uretprobe_function_return(struct pt_regs *ctx) {
|
||||
// For now, we don't emit separate return events
|
||||
// Could be extended to capture return values or timing
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Batch symbol addition helpers (populated from user space)
|
||||
// ============================================================================
|
||||
|
||||
// User space calls this via BPF_MAP_UPDATE_ELEM to add symbols to trace
|
||||
// Key: function address
|
||||
// Value: 1 (trace) or 0 (ignore)
|
||||
|
||||
// Statistics retrieval
|
||||
SEC("uprobe/get_stats")
|
||||
int uprobe_get_stats(struct pt_regs *ctx) {
|
||||
// This is a dummy probe that allows user space to read stats
|
||||
// by triggering a probe and then reading the stats map
|
||||
return 0;
|
||||
}
|
||||
@@ -0,0 +1,222 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
// Stella Ops eBPF common definitions
|
||||
// Shared event structures and constants across all probes
|
||||
|
||||
#ifndef __STELLA_COMMON_H__
|
||||
#define __STELLA_COMMON_H__
|
||||
|
||||
#include "vmlinux_subset.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
|
||||
// ============================================================================
|
||||
// Constants
|
||||
// ============================================================================
|
||||
|
||||
#define TASK_COMM_LEN 16
|
||||
#define MAX_FILENAME_LEN 256
|
||||
#define MAX_ARGV_LEN 128
|
||||
#define MAX_STACK_DEPTH 16
|
||||
#define MAX_PATH_LEN 256
|
||||
|
||||
// Event types (discriminator for union in user space)
|
||||
#define EVENT_TYPE_FUNCTION_CALL 1
|
||||
#define EVENT_TYPE_FILE_OPEN 2
|
||||
#define EVENT_TYPE_PROCESS_EXEC 3
|
||||
#define EVENT_TYPE_TCP_STATE 4
|
||||
#define EVENT_TYPE_NET_CONNECT 5
|
||||
#define EVENT_TYPE_SSL_OP 6
|
||||
|
||||
// ============================================================================
|
||||
// Common Event Header
|
||||
// ============================================================================
|
||||
|
||||
// All events share this header for efficient parsing
|
||||
struct event_header {
|
||||
u64 timestamp_ns; // ktime_get_boot_ns()
|
||||
u32 pid; // Process ID
|
||||
u32 tid; // Thread ID
|
||||
u64 cgroup_id; // Container cgroup ID
|
||||
u8 event_type; // EVENT_TYPE_* discriminator
|
||||
u8 reserved[7]; // Padding for alignment
|
||||
char comm[TASK_COMM_LEN];
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// Function Call Event (uprobe)
|
||||
// ============================================================================
|
||||
|
||||
struct function_call_event {
|
||||
struct event_header hdr;
|
||||
u64 function_addr; // Address of called function
|
||||
u64 return_addr; // Return address
|
||||
u64 stack_trace[MAX_STACK_DEPTH]; // Call stack addresses
|
||||
u8 stack_depth; // Actual stack depth captured
|
||||
u8 runtime_type; // RuntimeType enum
|
||||
u8 reserved[6];
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// File Open Event (tracepoint:syscalls:sys_enter_openat)
|
||||
// ============================================================================
|
||||
|
||||
struct file_open_event {
|
||||
struct event_header hdr;
|
||||
int dfd; // Directory file descriptor
|
||||
int flags; // Open flags (O_RDONLY, etc.)
|
||||
u16 mode; // File mode
|
||||
u16 reserved;
|
||||
char filename[MAX_FILENAME_LEN]; // File path
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// Process Exec Event (tracepoint:sched:sched_process_exec)
|
||||
// ============================================================================
|
||||
|
||||
struct process_exec_event {
|
||||
struct event_header hdr;
|
||||
u32 ppid; // Parent process ID
|
||||
u32 reserved;
|
||||
char filename[MAX_FILENAME_LEN]; // Executed file path
|
||||
char argv0[MAX_ARGV_LEN]; // First argument
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// TCP State Change Event (tracepoint:sock:inet_sock_set_state)
|
||||
// ============================================================================
|
||||
|
||||
struct tcp_state_event {
|
||||
struct event_header hdr;
|
||||
u8 oldstate; // Previous TCP state
|
||||
u8 newstate; // New TCP state
|
||||
u8 family; // AF_INET or AF_INET6
|
||||
u8 protocol; // IPPROTO_TCP
|
||||
u16 sport; // Source port
|
||||
u16 dport; // Destination port
|
||||
union {
|
||||
u32 saddr_v4; // IPv4 source address
|
||||
u8 saddr_v6[16]; // IPv6 source address
|
||||
};
|
||||
union {
|
||||
u32 daddr_v4; // IPv4 destination address
|
||||
u8 daddr_v6[16]; // IPv6 destination address
|
||||
};
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// Network Connect Event (uprobe:libc:connect)
|
||||
// ============================================================================
|
||||
|
||||
struct net_connect_event {
|
||||
struct event_header hdr;
|
||||
int fd; // Socket file descriptor
|
||||
int ret; // Return value (0 = success)
|
||||
u16 family; // Address family
|
||||
u16 port; // Destination port
|
||||
union {
|
||||
u32 addr_v4; // IPv4 address
|
||||
u8 addr_v6[16]; // IPv6 address
|
||||
};
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// SSL Operation Event (uprobe:libssl:SSL_read/SSL_write)
|
||||
// ============================================================================
|
||||
|
||||
struct ssl_op_event {
|
||||
struct event_header hdr;
|
||||
u64 ssl_ptr; // SSL* pointer for correlation
|
||||
u32 requested_bytes; // Bytes requested
|
||||
u32 actual_bytes; // Bytes actually transferred
|
||||
u8 operation; // 0 = read, 1 = write
|
||||
u8 reserved[7];
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// BPF Maps
|
||||
// ============================================================================
|
||||
|
||||
// Ring buffer for sending events to user space
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_RINGBUF);
|
||||
__uint(max_entries, 256 * 1024); // 256KB default
|
||||
} events SEC(".maps");
|
||||
|
||||
// Hash map for filtering by cgroup (container targeting)
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1024);
|
||||
__type(key, u64); // cgroup_id
|
||||
__type(value, u8); // 1 = trace, 0 = ignore
|
||||
} target_cgroups SEC(".maps");
|
||||
|
||||
// Hash map for symbol filtering (target symbols only)
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 10000);
|
||||
__type(key, u64); // function address
|
||||
__type(value, u8); // 1 = trace
|
||||
} target_symbols SEC(".maps");
|
||||
|
||||
// Per-CPU array for statistics
|
||||
struct stats {
|
||||
u64 events_total;
|
||||
u64 events_dropped;
|
||||
u64 events_filtered;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, u32);
|
||||
__type(value, struct stats);
|
||||
} probe_stats SEC(".maps");
|
||||
|
||||
// ============================================================================
|
||||
// Helper Functions
|
||||
// ============================================================================
|
||||
|
||||
// Check if we should trace this cgroup (container filtering)
|
||||
static __always_inline bool should_trace_cgroup(u64 cgroup_id) {
|
||||
// If no targets specified, trace everything
|
||||
u32 zero = 0;
|
||||
u8 *target = bpf_map_lookup_elem(&target_cgroups, &cgroup_id);
|
||||
|
||||
// If map is empty or cgroup is in map, trace it
|
||||
return target == NULL || *target == 1;
|
||||
}
|
||||
|
||||
// Fill common event header
|
||||
static __always_inline void fill_event_header(struct event_header *hdr, u8 event_type) {
|
||||
hdr->timestamp_ns = bpf_ktime_get_boot_ns();
|
||||
hdr->pid = bpf_get_current_pid_tgid() >> 32;
|
||||
hdr->tid = bpf_get_current_pid_tgid() & 0xFFFFFFFF;
|
||||
hdr->cgroup_id = bpf_get_current_cgroup_id();
|
||||
hdr->event_type = event_type;
|
||||
bpf_get_current_comm(&hdr->comm, sizeof(hdr->comm));
|
||||
}
|
||||
|
||||
// Update statistics
|
||||
static __always_inline void update_stats(bool dropped, bool filtered) {
|
||||
u32 zero = 0;
|
||||
struct stats *s = bpf_map_lookup_elem(&probe_stats, &zero);
|
||||
if (s) {
|
||||
__sync_fetch_and_add(&s->events_total, 1);
|
||||
if (dropped) {
|
||||
__sync_fetch_and_add(&s->events_dropped, 1);
|
||||
}
|
||||
if (filtered) {
|
||||
__sync_fetch_and_add(&s->events_filtered, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Submit event to ring buffer with size
|
||||
static __always_inline int submit_event(void *event, size_t size) {
|
||||
int ret = bpf_ringbuf_output(&events, event, size, 0);
|
||||
update_stats(ret != 0, false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif // __STELLA_COMMON_H__
|
||||
@@ -0,0 +1,214 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
// Stella Ops eBPF probe: sched_process_exec tracepoint
|
||||
// Captures process execution evidence for reachability proofs
|
||||
|
||||
#include "stella_common.h"
|
||||
|
||||
char LICENSE[] SEC("license") = "Dual BSD/GPL";
|
||||
|
||||
// Parent process tracking for exec chain analysis
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 10000);
|
||||
__type(key, u32); // PID
|
||||
__type(value, u32); // PPID
|
||||
} pid_ppid_map SEC(".maps");
|
||||
|
||||
SEC("tracepoint/sched/sched_process_exec")
|
||||
int trace_sched_process_exec(struct trace_event_raw_sched_process_exec *ctx) {
|
||||
u64 cgroup_id = bpf_get_current_cgroup_id();
|
||||
|
||||
// Check if we should trace this container
|
||||
if (!should_trace_cgroup(cgroup_id)) {
|
||||
update_stats(false, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Reserve space in ring buffer
|
||||
struct process_exec_event *event;
|
||||
event = bpf_ringbuf_reserve(&events, sizeof(*event), 0);
|
||||
if (!event) {
|
||||
update_stats(true, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Fill common header
|
||||
fill_event_header(&event->hdr, EVENT_TYPE_PROCESS_EXEC);
|
||||
|
||||
// Get parent PID
|
||||
struct task_struct *task = (struct task_struct *)bpf_get_current_task();
|
||||
u32 ppid = 0;
|
||||
if (task) {
|
||||
// Read parent pointer using CO-RE
|
||||
struct task_struct *parent = NULL;
|
||||
bpf_probe_read_kernel(&parent, sizeof(parent),
|
||||
&task->__bindgen_anon_1.__bindgen_anon_1.parent);
|
||||
if (parent) {
|
||||
bpf_probe_read_kernel(&ppid, sizeof(ppid), &parent->tgid);
|
||||
}
|
||||
}
|
||||
event->ppid = ppid;
|
||||
event->reserved = 0;
|
||||
|
||||
// Store PID -> PPID mapping for exec chain analysis
|
||||
u32 pid = event->hdr.pid;
|
||||
bpf_map_update_elem(&pid_ppid_map, &pid, &ppid, BPF_ANY);
|
||||
|
||||
// Read filename from tracepoint data
|
||||
// The filename is at variable offset indicated by __data_loc_filename
|
||||
// Format: (offset << 16) | length
|
||||
u32 data_loc = ctx->__data_loc_filename;
|
||||
u32 offset = data_loc >> 16;
|
||||
u32 len = data_loc & 0xFFFF;
|
||||
|
||||
// Bound the length
|
||||
if (len > sizeof(event->filename) - 1) {
|
||||
len = sizeof(event->filename) - 1;
|
||||
}
|
||||
|
||||
// Read filename from tracepoint data area
|
||||
char *data_ptr = (char *)ctx + offset;
|
||||
int ret = bpf_probe_read_kernel_str(
|
||||
event->filename,
|
||||
sizeof(event->filename),
|
||||
data_ptr);
|
||||
|
||||
if (ret < 0) {
|
||||
event->filename[0] = '\0';
|
||||
}
|
||||
|
||||
// Initialize argv0 (we'll populate this from a separate probe if needed)
|
||||
event->argv0[0] = '\0';
|
||||
|
||||
// Submit event
|
||||
bpf_ringbuf_submit(event, 0);
|
||||
update_stats(false, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Track process exit for cleanup
|
||||
SEC("tracepoint/sched/sched_process_exit")
|
||||
int trace_sched_process_exit(void *ctx) {
|
||||
u32 pid = bpf_get_current_pid_tgid() >> 32;
|
||||
bpf_map_delete_elem(&pid_ppid_map, &pid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Capture argv from execve syscall entry for richer context
|
||||
SEC("tracepoint/syscalls/sys_enter_execve")
|
||||
int trace_sys_enter_execve(struct trace_event_raw_sys_enter *ctx) {
|
||||
u64 cgroup_id = bpf_get_current_cgroup_id();
|
||||
|
||||
if (!should_trace_cgroup(cgroup_id)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// execve(const char *filename, char *const argv[], char *const envp[])
|
||||
const char *filename = (const char *)ctx->args[0];
|
||||
const char **argv = (const char **)ctx->args[1];
|
||||
|
||||
// Reserve event
|
||||
struct process_exec_event *event;
|
||||
event = bpf_ringbuf_reserve(&events, sizeof(*event), 0);
|
||||
if (!event) {
|
||||
update_stats(true, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
fill_event_header(&event->hdr, EVENT_TYPE_PROCESS_EXEC);
|
||||
event->ppid = 0; // Will be filled by sched_process_exec
|
||||
event->reserved = 0;
|
||||
|
||||
// Read filename
|
||||
int ret = bpf_probe_read_user_str(
|
||||
event->filename,
|
||||
sizeof(event->filename),
|
||||
filename);
|
||||
|
||||
if (ret < 0) {
|
||||
event->filename[0] = '\0';
|
||||
}
|
||||
|
||||
// Read argv[0] if available
|
||||
if (argv) {
|
||||
const char *argv0 = NULL;
|
||||
ret = bpf_probe_read_user(&argv0, sizeof(argv0), &argv[0]);
|
||||
if (ret == 0 && argv0) {
|
||||
ret = bpf_probe_read_user_str(
|
||||
event->argv0,
|
||||
sizeof(event->argv0),
|
||||
argv0);
|
||||
if (ret < 0) {
|
||||
event->argv0[0] = '\0';
|
||||
}
|
||||
} else {
|
||||
event->argv0[0] = '\0';
|
||||
}
|
||||
} else {
|
||||
event->argv0[0] = '\0';
|
||||
}
|
||||
|
||||
bpf_ringbuf_submit(event, 0);
|
||||
update_stats(false, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Also capture execveat for completeness
|
||||
SEC("tracepoint/syscalls/sys_enter_execveat")
|
||||
int trace_sys_enter_execveat(struct trace_event_raw_sys_enter *ctx) {
|
||||
u64 cgroup_id = bpf_get_current_cgroup_id();
|
||||
|
||||
if (!should_trace_cgroup(cgroup_id)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// execveat(int dirfd, const char *pathname, char *const argv[],
|
||||
// char *const envp[], int flags)
|
||||
const char *filename = (const char *)ctx->args[1];
|
||||
const char **argv = (const char **)ctx->args[2];
|
||||
|
||||
struct process_exec_event *event;
|
||||
event = bpf_ringbuf_reserve(&events, sizeof(*event), 0);
|
||||
if (!event) {
|
||||
update_stats(true, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
fill_event_header(&event->hdr, EVENT_TYPE_PROCESS_EXEC);
|
||||
event->ppid = 0;
|
||||
event->reserved = 0;
|
||||
|
||||
int ret = bpf_probe_read_user_str(
|
||||
event->filename,
|
||||
sizeof(event->filename),
|
||||
filename);
|
||||
|
||||
if (ret < 0) {
|
||||
event->filename[0] = '\0';
|
||||
}
|
||||
|
||||
if (argv) {
|
||||
const char *argv0 = NULL;
|
||||
ret = bpf_probe_read_user(&argv0, sizeof(argv0), &argv[0]);
|
||||
if (ret == 0 && argv0) {
|
||||
ret = bpf_probe_read_user_str(
|
||||
event->argv0,
|
||||
sizeof(event->argv0),
|
||||
argv0);
|
||||
if (ret < 0) {
|
||||
event->argv0[0] = '\0';
|
||||
}
|
||||
} else {
|
||||
event->argv0[0] = '\0';
|
||||
}
|
||||
} else {
|
||||
event->argv0[0] = '\0';
|
||||
}
|
||||
|
||||
bpf_ringbuf_submit(event, 0);
|
||||
update_stats(false, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -0,0 +1,173 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
// Stella Ops eBPF probe: inet_sock_set_state tracepoint
|
||||
// Captures TCP connection state changes for reachability proofs
|
||||
|
||||
#include "stella_common.h"
|
||||
|
||||
char LICENSE[] SEC("license") = "Dual BSD/GPL";
|
||||
|
||||
// Configuration for TCP state filtering
|
||||
struct tcp_config {
|
||||
u8 capture_established; // Capture transitions to ESTABLISHED
|
||||
u8 capture_close; // Capture transitions to CLOSE
|
||||
u8 capture_all_states; // Capture all state transitions
|
||||
u8 filter_loopback; // Filter out 127.0.0.0/8 and ::1
|
||||
u8 reserved[4];
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, u32);
|
||||
__type(value, struct tcp_config);
|
||||
} tcp_filter_config SEC(".maps");
|
||||
|
||||
// CIDR allowlist for destination filtering (optional)
|
||||
// Key: network prefix (e.g., 10.0.0.0), Value: prefix length (e.g., 8)
|
||||
struct cidr_entry {
|
||||
u32 network;
|
||||
u8 prefix_len;
|
||||
u8 include; // 1 = include, 0 = exclude
|
||||
u8 reserved[2];
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 256);
|
||||
__type(key, u32);
|
||||
__type(value, struct cidr_entry);
|
||||
} dest_cidr_filters SEC(".maps");
|
||||
|
||||
// Check if IPv4 address matches CIDR
|
||||
static __always_inline bool ipv4_in_cidr(u32 addr, u32 network, u8 prefix_len) {
|
||||
if (prefix_len == 0) return true;
|
||||
if (prefix_len > 32) return false;
|
||||
|
||||
u32 mask = ~((1U << (32 - prefix_len)) - 1);
|
||||
return (addr & mask) == (network & mask);
|
||||
}
|
||||
|
||||
// Check if address is loopback
|
||||
static __always_inline bool is_loopback_v4(u32 addr) {
|
||||
// 127.0.0.0/8 in network byte order
|
||||
return (addr & 0x000000FF) == 0x0000007F;
|
||||
}
|
||||
|
||||
static __always_inline bool is_loopback_v6(const u8 *addr) {
|
||||
// ::1
|
||||
static const u8 loopback[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1};
|
||||
#pragma unroll
|
||||
for (int i = 0; i < 16; i++) {
|
||||
if (addr[i] != loopback[i]) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if state transition is interesting
|
||||
static __always_inline bool should_capture_state(u8 oldstate, u8 newstate) {
|
||||
u32 zero = 0;
|
||||
struct tcp_config *cfg = bpf_map_lookup_elem(&tcp_filter_config, &zero);
|
||||
|
||||
if (!cfg || cfg->capture_all_states) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Capture transitions to ESTABLISHED (connection made)
|
||||
if (cfg->capture_established && newstate == TCP_ESTABLISHED) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Capture transitions to CLOSE (connection ended)
|
||||
if (cfg->capture_close && newstate == TCP_CLOSE) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
SEC("tracepoint/sock/inet_sock_set_state")
|
||||
int trace_inet_sock_set_state(struct trace_event_raw_inet_sock_set_state *ctx) {
|
||||
u64 cgroup_id = bpf_get_current_cgroup_id();
|
||||
|
||||
// Check if we should trace this container
|
||||
if (!should_trace_cgroup(cgroup_id)) {
|
||||
update_stats(false, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Check state transition filter
|
||||
u8 oldstate = ctx->oldstate;
|
||||
u8 newstate = ctx->newstate;
|
||||
|
||||
if (!should_capture_state(oldstate, newstate)) {
|
||||
update_stats(false, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Check loopback filter
|
||||
u32 zero = 0;
|
||||
struct tcp_config *cfg = bpf_map_lookup_elem(&tcp_filter_config, &zero);
|
||||
|
||||
if (cfg && cfg->filter_loopback) {
|
||||
if (ctx->family == AF_INET) {
|
||||
u32 daddr;
|
||||
bpf_probe_read_kernel(&daddr, sizeof(daddr), &ctx->daddr);
|
||||
if (is_loopback_v4(daddr)) {
|
||||
update_stats(false, true);
|
||||
return 0;
|
||||
}
|
||||
} else if (ctx->family == AF_INET6) {
|
||||
if (is_loopback_v6(ctx->daddr_v6)) {
|
||||
update_stats(false, true);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reserve space in ring buffer
|
||||
struct tcp_state_event *event;
|
||||
event = bpf_ringbuf_reserve(&events, sizeof(*event), 0);
|
||||
if (!event) {
|
||||
update_stats(true, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Fill common header
|
||||
fill_event_header(&event->hdr, EVENT_TYPE_TCP_STATE);
|
||||
|
||||
// Fill TCP-specific fields
|
||||
event->oldstate = oldstate;
|
||||
event->newstate = newstate;
|
||||
event->family = ctx->family;
|
||||
event->protocol = ctx->protocol;
|
||||
event->sport = ctx->sport;
|
||||
event->dport = ctx->dport;
|
||||
|
||||
// Copy addresses based on family
|
||||
if (ctx->family == AF_INET) {
|
||||
bpf_probe_read_kernel(&event->saddr_v4, sizeof(event->saddr_v4), &ctx->saddr);
|
||||
bpf_probe_read_kernel(&event->daddr_v4, sizeof(event->daddr_v4), &ctx->daddr);
|
||||
// Zero out v6 portion
|
||||
__builtin_memset(event->saddr_v6, 0, sizeof(event->saddr_v6));
|
||||
__builtin_memset(event->daddr_v6, 0, sizeof(event->daddr_v6));
|
||||
} else if (ctx->family == AF_INET6) {
|
||||
bpf_probe_read_kernel(event->saddr_v6, sizeof(event->saddr_v6), ctx->saddr_v6);
|
||||
bpf_probe_read_kernel(event->daddr_v6, sizeof(event->daddr_v6), ctx->daddr_v6);
|
||||
event->saddr_v4 = 0;
|
||||
event->daddr_v4 = 0;
|
||||
}
|
||||
|
||||
// Submit event
|
||||
bpf_ringbuf_submit(event, 0);
|
||||
update_stats(false, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Alternative: trace tcp_set_state kernel function (fallback for older kernels)
|
||||
SEC("kprobe/tcp_set_state")
|
||||
int kprobe_tcp_set_state(struct pt_regs *ctx) {
|
||||
// This is a fallback for kernels without inet_sock_set_state tracepoint
|
||||
// Implementation would be similar but with different argument extraction
|
||||
return 0;
|
||||
}
|
||||
@@ -0,0 +1,182 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
// Stella Ops eBPF probe: sys_enter_openat tracepoint
|
||||
// Captures file access evidence for reachability proofs
|
||||
|
||||
#include "stella_common.h"
|
||||
|
||||
char LICENSE[] SEC("license") = "Dual BSD/GPL";
|
||||
|
||||
// Path filtering configuration (populated from user space)
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1000);
|
||||
__type(key, char[64]); // Path prefix to filter
|
||||
__type(value, u8); // 1 = include, 0 = exclude
|
||||
} path_filters SEC(".maps");
|
||||
|
||||
// Configuration flags
|
||||
struct config {
|
||||
u8 filter_proc_sys; // Filter /proc and /sys paths
|
||||
u8 filter_dev; // Filter /dev paths
|
||||
u8 capture_read_only; // Only capture read operations
|
||||
u8 reserved[5];
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, u32);
|
||||
__type(value, struct config);
|
||||
} openat_config SEC(".maps");
|
||||
|
||||
// Check if path should be filtered out
|
||||
static __always_inline bool should_filter_path(const char *path, int len) {
|
||||
u32 zero = 0;
|
||||
struct config *cfg = bpf_map_lookup_elem(&openat_config, &zero);
|
||||
|
||||
if (!cfg) {
|
||||
return false; // No config = capture everything
|
||||
}
|
||||
|
||||
// Filter /proc and /sys if configured
|
||||
if (cfg->filter_proc_sys && len >= 5) {
|
||||
if (path[0] == '/' && path[1] == 'p' && path[2] == 'r' &&
|
||||
path[3] == 'o' && path[4] == 'c') {
|
||||
return true;
|
||||
}
|
||||
if (path[0] == '/' && path[1] == 's' && path[2] == 'y' &&
|
||||
path[3] == 's' && path[4] == '/') {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Filter /dev if configured
|
||||
if (cfg->filter_dev && len >= 4) {
|
||||
if (path[0] == '/' && path[1] == 'd' && path[2] == 'e' &&
|
||||
path[3] == 'v') {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
SEC("tracepoint/syscalls/sys_enter_openat")
|
||||
int trace_sys_enter_openat(struct trace_event_raw_sys_enter *ctx) {
|
||||
// Get cgroup ID for container filtering
|
||||
u64 cgroup_id = bpf_get_current_cgroup_id();
|
||||
|
||||
// Check if we should trace this container
|
||||
if (!should_trace_cgroup(cgroup_id)) {
|
||||
update_stats(false, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Extract syscall arguments
|
||||
// openat(int dfd, const char *filename, int flags, umode_t mode)
|
||||
int dfd = (int)ctx->args[0];
|
||||
const char *filename = (const char *)ctx->args[1];
|
||||
int flags = (int)ctx->args[2];
|
||||
u16 mode = (u16)ctx->args[3];
|
||||
|
||||
// Check read-only filter
|
||||
u32 zero = 0;
|
||||
struct config *cfg = bpf_map_lookup_elem(&openat_config, &zero);
|
||||
if (cfg && cfg->capture_read_only) {
|
||||
int access_mode = flags & 0x3; // O_RDONLY=0, O_WRONLY=1, O_RDWR=2
|
||||
if (access_mode != O_RDONLY) {
|
||||
update_stats(false, true);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Reserve space in ring buffer
|
||||
struct file_open_event *event;
|
||||
event = bpf_ringbuf_reserve(&events, sizeof(*event), 0);
|
||||
if (!event) {
|
||||
update_stats(true, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Fill common header
|
||||
fill_event_header(&event->hdr, EVENT_TYPE_FILE_OPEN);
|
||||
|
||||
// Fill event-specific fields
|
||||
event->dfd = dfd;
|
||||
event->flags = flags;
|
||||
event->mode = mode;
|
||||
event->reserved = 0;
|
||||
|
||||
// Read filename from user space (with bounds checking)
|
||||
int ret = bpf_probe_read_user_str(
|
||||
event->filename,
|
||||
sizeof(event->filename),
|
||||
filename);
|
||||
|
||||
if (ret < 0) {
|
||||
// Failed to read filename, still submit with empty path
|
||||
event->filename[0] = '\0';
|
||||
}
|
||||
|
||||
// Check path filter after reading
|
||||
if (should_filter_path(event->filename, ret)) {
|
||||
bpf_ringbuf_discard(event, 0);
|
||||
update_stats(false, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Submit event
|
||||
bpf_ringbuf_submit(event, 0);
|
||||
update_stats(false, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Also handle legacy open() syscall for older kernels
|
||||
SEC("tracepoint/syscalls/sys_enter_open")
|
||||
int trace_sys_enter_open(struct trace_event_raw_sys_enter *ctx) {
|
||||
u64 cgroup_id = bpf_get_current_cgroup_id();
|
||||
|
||||
if (!should_trace_cgroup(cgroup_id)) {
|
||||
update_stats(false, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// open(const char *filename, int flags, umode_t mode)
|
||||
const char *filename = (const char *)ctx->args[0];
|
||||
int flags = (int)ctx->args[1];
|
||||
u16 mode = (u16)ctx->args[2];
|
||||
|
||||
struct file_open_event *event;
|
||||
event = bpf_ringbuf_reserve(&events, sizeof(*event), 0);
|
||||
if (!event) {
|
||||
update_stats(true, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
fill_event_header(&event->hdr, EVENT_TYPE_FILE_OPEN);
|
||||
event->dfd = -100; // AT_FDCWD equivalent for legacy open
|
||||
event->flags = flags;
|
||||
event->mode = mode;
|
||||
event->reserved = 0;
|
||||
|
||||
int ret = bpf_probe_read_user_str(
|
||||
event->filename,
|
||||
sizeof(event->filename),
|
||||
filename);
|
||||
|
||||
if (ret < 0) {
|
||||
event->filename[0] = '\0';
|
||||
}
|
||||
|
||||
if (should_filter_path(event->filename, ret)) {
|
||||
bpf_ringbuf_discard(event, 0);
|
||||
update_stats(false, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bpf_ringbuf_submit(event, 0);
|
||||
update_stats(false, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -0,0 +1,369 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
// Stella Ops eBPF probe: libc network function uprobes
|
||||
// Captures connect/accept/read/write for network evidence
|
||||
|
||||
#include "stella_common.h"
|
||||
|
||||
char LICENSE[] SEC("license") = "Dual BSD/GPL";
|
||||
|
||||
// Track in-flight connect() calls to capture return values
|
||||
struct connect_args {
|
||||
int fd;
|
||||
u16 family;
|
||||
u16 port;
|
||||
union {
|
||||
u32 addr_v4;
|
||||
u8 addr_v6[16];
|
||||
};
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 10000);
|
||||
__type(key, u64); // pid_tgid
|
||||
__type(value, struct connect_args);
|
||||
} connect_args_map SEC(".maps");
|
||||
|
||||
// Track byte counts per (pid, fd) for traffic volume
|
||||
struct fd_stats {
|
||||
u64 bytes_read;
|
||||
u64 bytes_written;
|
||||
u64 read_count;
|
||||
u64 write_count;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_LRU_HASH);
|
||||
__uint(max_entries, 50000);
|
||||
__type(key, u64); // (pid << 32) | fd
|
||||
__type(value, struct fd_stats);
|
||||
} fd_byte_counts SEC(".maps");
|
||||
|
||||
// ============================================================================
|
||||
// connect() probes
|
||||
// ============================================================================
|
||||
|
||||
SEC("uprobe/libc:connect")
|
||||
int uprobe_connect(struct pt_regs *ctx) {
|
||||
u64 cgroup_id = bpf_get_current_cgroup_id();
|
||||
|
||||
if (!should_trace_cgroup(cgroup_id)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen)
|
||||
int fd = (int)PT_REGS_PARM1(ctx);
|
||||
const struct sockaddr *addr = (const struct sockaddr *)PT_REGS_PARM2(ctx);
|
||||
|
||||
if (!addr) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Read address family
|
||||
u16 family = 0;
|
||||
bpf_probe_read_user(&family, sizeof(family), &addr->sa_family);
|
||||
|
||||
// Only track AF_INET and AF_INET6
|
||||
if (family != AF_INET && family != AF_INET6) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Store arguments for retrieval in uretprobe
|
||||
u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
struct connect_args args = {
|
||||
.fd = fd,
|
||||
.family = family,
|
||||
.port = 0,
|
||||
.addr_v4 = 0,
|
||||
};
|
||||
|
||||
if (family == AF_INET) {
|
||||
const struct sockaddr_in *sin = (const struct sockaddr_in *)addr;
|
||||
bpf_probe_read_user(&args.port, sizeof(args.port), &sin->sin_port);
|
||||
bpf_probe_read_user(&args.addr_v4, sizeof(args.addr_v4), &sin->sin_addr);
|
||||
} else if (family == AF_INET6) {
|
||||
const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)addr;
|
||||
bpf_probe_read_user(&args.port, sizeof(args.port), &sin6->sin6_port);
|
||||
bpf_probe_read_user(args.addr_v6, sizeof(args.addr_v6), &sin6->sin6_addr);
|
||||
}
|
||||
|
||||
// Convert port from network byte order
|
||||
args.port = __builtin_bswap16(args.port);
|
||||
|
||||
bpf_map_update_elem(&connect_args_map, &pid_tgid, &args, BPF_ANY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("uretprobe/libc:connect")
|
||||
int uretprobe_connect(struct pt_regs *ctx) {
|
||||
u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
|
||||
// Retrieve saved arguments
|
||||
struct connect_args *args = bpf_map_lookup_elem(&connect_args_map, &pid_tgid);
|
||||
if (!args) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ret = (int)PT_REGS_RET(ctx);
|
||||
|
||||
// Reserve event
|
||||
struct net_connect_event *event;
|
||||
event = bpf_ringbuf_reserve(&events, sizeof(*event), 0);
|
||||
if (!event) {
|
||||
bpf_map_delete_elem(&connect_args_map, &pid_tgid);
|
||||
update_stats(true, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
fill_event_header(&event->hdr, EVENT_TYPE_NET_CONNECT);
|
||||
|
||||
event->fd = args->fd;
|
||||
event->ret = ret;
|
||||
event->family = args->family;
|
||||
event->port = args->port;
|
||||
|
||||
if (args->family == AF_INET) {
|
||||
event->addr_v4 = args->addr_v4;
|
||||
__builtin_memset(event->addr_v6, 0, sizeof(event->addr_v6));
|
||||
} else {
|
||||
event->addr_v4 = 0;
|
||||
__builtin_memcpy(event->addr_v6, args->addr_v6, sizeof(event->addr_v6));
|
||||
}
|
||||
|
||||
bpf_ringbuf_submit(event, 0);
|
||||
bpf_map_delete_elem(&connect_args_map, &pid_tgid);
|
||||
update_stats(false, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// accept() probes (for inbound connections)
|
||||
// ============================================================================
|
||||
|
||||
struct accept_args {
|
||||
int sockfd;
|
||||
struct sockaddr *addr;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 10000);
|
||||
__type(key, u64);
|
||||
__type(value, struct accept_args);
|
||||
} accept_args_map SEC(".maps");
|
||||
|
||||
SEC("uprobe/libc:accept")
|
||||
int uprobe_accept(struct pt_regs *ctx) {
|
||||
u64 cgroup_id = bpf_get_current_cgroup_id();
|
||||
|
||||
if (!should_trace_cgroup(cgroup_id)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// accept(int sockfd, struct sockaddr *addr, socklen_t *addrlen)
|
||||
int sockfd = (int)PT_REGS_PARM1(ctx);
|
||||
struct sockaddr *addr = (struct sockaddr *)PT_REGS_PARM2(ctx);
|
||||
|
||||
u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
struct accept_args args = {
|
||||
.sockfd = sockfd,
|
||||
.addr = addr,
|
||||
};
|
||||
|
||||
bpf_map_update_elem(&accept_args_map, &pid_tgid, &args, BPF_ANY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("uretprobe/libc:accept")
|
||||
int uretprobe_accept(struct pt_regs *ctx) {
|
||||
u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
|
||||
struct accept_args *args = bpf_map_lookup_elem(&accept_args_map, &pid_tgid);
|
||||
if (!args) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int new_fd = (int)PT_REGS_RET(ctx);
|
||||
if (new_fd < 0) {
|
||||
// Accept failed
|
||||
bpf_map_delete_elem(&accept_args_map, &pid_tgid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct net_connect_event *event;
|
||||
event = bpf_ringbuf_reserve(&events, sizeof(*event), 0);
|
||||
if (!event) {
|
||||
bpf_map_delete_elem(&accept_args_map, &pid_tgid);
|
||||
update_stats(true, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
fill_event_header(&event->hdr, EVENT_TYPE_NET_CONNECT);
|
||||
event->fd = new_fd;
|
||||
event->ret = 0; // Success
|
||||
|
||||
// Read peer address if provided
|
||||
if (args->addr) {
|
||||
u16 family = 0;
|
||||
bpf_probe_read_user(&family, sizeof(family), &args->addr->sa_family);
|
||||
event->family = family;
|
||||
|
||||
if (family == AF_INET) {
|
||||
const struct sockaddr_in *sin = (const struct sockaddr_in *)args->addr;
|
||||
u16 port = 0;
|
||||
bpf_probe_read_user(&port, sizeof(port), &sin->sin_port);
|
||||
event->port = __builtin_bswap16(port);
|
||||
bpf_probe_read_user(&event->addr_v4, sizeof(event->addr_v4), &sin->sin_addr);
|
||||
} else if (family == AF_INET6) {
|
||||
const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)args->addr;
|
||||
u16 port = 0;
|
||||
bpf_probe_read_user(&port, sizeof(port), &sin6->sin6_port);
|
||||
event->port = __builtin_bswap16(port);
|
||||
bpf_probe_read_user(event->addr_v6, sizeof(event->addr_v6), &sin6->sin6_addr);
|
||||
}
|
||||
} else {
|
||||
event->family = 0;
|
||||
event->port = 0;
|
||||
event->addr_v4 = 0;
|
||||
}
|
||||
|
||||
bpf_ringbuf_submit(event, 0);
|
||||
bpf_map_delete_elem(&accept_args_map, &pid_tgid);
|
||||
update_stats(false, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// accept4() has the same signature with an extra flags parameter
|
||||
SEC("uprobe/libc:accept4")
|
||||
int uprobe_accept4(struct pt_regs *ctx) {
|
||||
return uprobe_accept(ctx);
|
||||
}
|
||||
|
||||
SEC("uretprobe/libc:accept4")
|
||||
int uretprobe_accept4(struct pt_regs *ctx) {
|
||||
return uretprobe_accept(ctx);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// read()/write() probes for byte counting
|
||||
// ============================================================================
|
||||
|
||||
struct rw_args {
|
||||
int fd;
|
||||
size_t count;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 10000);
|
||||
__type(key, u64);
|
||||
__type(value, struct rw_args);
|
||||
} rw_args_map SEC(".maps");
|
||||
|
||||
SEC("uprobe/libc:read")
|
||||
int uprobe_read(struct pt_regs *ctx) {
|
||||
u64 cgroup_id = bpf_get_current_cgroup_id();
|
||||
|
||||
if (!should_trace_cgroup(cgroup_id)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// read(int fd, void *buf, size_t count)
|
||||
int fd = (int)PT_REGS_PARM1(ctx);
|
||||
size_t count = (size_t)PT_REGS_PARM3(ctx);
|
||||
|
||||
u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
struct rw_args args = { .fd = fd, .count = count };
|
||||
bpf_map_update_elem(&rw_args_map, &pid_tgid, &args, BPF_ANY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("uretprobe/libc:read")
|
||||
int uretprobe_read(struct pt_regs *ctx) {
|
||||
u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
|
||||
struct rw_args *args = bpf_map_lookup_elem(&rw_args_map, &pid_tgid);
|
||||
if (!args) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
ssize_t bytes = (ssize_t)PT_REGS_RET(ctx);
|
||||
if (bytes > 0) {
|
||||
u32 pid = pid_tgid >> 32;
|
||||
u64 key = ((u64)pid << 32) | (u32)args->fd;
|
||||
|
||||
struct fd_stats *stats = bpf_map_lookup_elem(&fd_byte_counts, &key);
|
||||
if (stats) {
|
||||
__sync_fetch_and_add(&stats->bytes_read, bytes);
|
||||
__sync_fetch_and_add(&stats->read_count, 1);
|
||||
} else {
|
||||
struct fd_stats new_stats = {
|
||||
.bytes_read = bytes,
|
||||
.bytes_written = 0,
|
||||
.read_count = 1,
|
||||
.write_count = 0,
|
||||
};
|
||||
bpf_map_update_elem(&fd_byte_counts, &key, &new_stats, BPF_NOEXIST);
|
||||
}
|
||||
}
|
||||
|
||||
bpf_map_delete_elem(&rw_args_map, &pid_tgid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("uprobe/libc:write")
|
||||
int uprobe_write(struct pt_regs *ctx) {
|
||||
u64 cgroup_id = bpf_get_current_cgroup_id();
|
||||
|
||||
if (!should_trace_cgroup(cgroup_id)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// write(int fd, const void *buf, size_t count)
|
||||
int fd = (int)PT_REGS_PARM1(ctx);
|
||||
size_t count = (size_t)PT_REGS_PARM3(ctx);
|
||||
|
||||
u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
struct rw_args args = { .fd = fd, .count = count };
|
||||
bpf_map_update_elem(&rw_args_map, &pid_tgid, &args, BPF_ANY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("uretprobe/libc:write")
|
||||
int uretprobe_write(struct pt_regs *ctx) {
|
||||
u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
|
||||
struct rw_args *args = bpf_map_lookup_elem(&rw_args_map, &pid_tgid);
|
||||
if (!args) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
ssize_t bytes = (ssize_t)PT_REGS_RET(ctx);
|
||||
if (bytes > 0) {
|
||||
u32 pid = pid_tgid >> 32;
|
||||
u64 key = ((u64)pid << 32) | (u32)args->fd;
|
||||
|
||||
struct fd_stats *stats = bpf_map_lookup_elem(&fd_byte_counts, &key);
|
||||
if (stats) {
|
||||
__sync_fetch_and_add(&stats->bytes_written, bytes);
|
||||
__sync_fetch_and_add(&stats->write_count, 1);
|
||||
} else {
|
||||
struct fd_stats new_stats = {
|
||||
.bytes_read = 0,
|
||||
.bytes_written = bytes,
|
||||
.read_count = 0,
|
||||
.write_count = 1,
|
||||
};
|
||||
bpf_map_update_elem(&fd_byte_counts, &key, &new_stats, BPF_NOEXIST);
|
||||
}
|
||||
}
|
||||
|
||||
bpf_map_delete_elem(&rw_args_map, &pid_tgid);
|
||||
return 0;
|
||||
}
|
||||
@@ -0,0 +1,322 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
// Stella Ops eBPF probe: OpenSSL SSL_read/SSL_write uprobes
|
||||
// Captures TLS operation evidence for encrypted traffic correlation
|
||||
|
||||
#include "stella_common.h"
|
||||
|
||||
char LICENSE[] SEC("license") = "Dual BSD/GPL";
|
||||
|
||||
#define SSL_OP_READ 0
|
||||
#define SSL_OP_WRITE 1
|
||||
|
||||
// Track in-flight SSL operations
|
||||
struct ssl_args {
|
||||
u64 ssl_ptr; // SSL* pointer for correlation
|
||||
u32 requested_bytes; // Bytes requested
|
||||
u8 operation; // SSL_OP_READ or SSL_OP_WRITE
|
||||
u8 reserved[3];
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 10000);
|
||||
__type(key, u64); // pid_tgid
|
||||
__type(value, struct ssl_args);
|
||||
} ssl_args_map SEC(".maps");
|
||||
|
||||
// Aggregate SSL traffic per (pid, ssl_ptr) to reduce event volume
|
||||
struct ssl_session_stats {
|
||||
u64 bytes_read;
|
||||
u64 bytes_written;
|
||||
u64 read_count;
|
||||
u64 write_count;
|
||||
u64 first_seen_ns;
|
||||
u64 last_seen_ns;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_LRU_HASH);
|
||||
__uint(max_entries, 50000);
|
||||
__type(key, u64); // (pid << 32) | (ssl_ptr & 0xFFFFFFFF)
|
||||
__type(value, struct ssl_session_stats);
|
||||
} ssl_session_stats SEC(".maps");
|
||||
|
||||
// Configuration
|
||||
struct ssl_config {
|
||||
u8 emit_per_call; // Emit event per call (vs aggregated)
|
||||
u8 min_bytes_threshold; // Minimum bytes to emit event (0 = all)
|
||||
u8 reserved[6];
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, u32);
|
||||
__type(value, struct ssl_config);
|
||||
} ssl_op_config SEC(".maps");
|
||||
|
||||
// ============================================================================
|
||||
// SSL_read probes
|
||||
// ============================================================================
|
||||
|
||||
// int SSL_read(SSL *ssl, void *buf, int num)
|
||||
SEC("uprobe/libssl:SSL_read")
|
||||
int uprobe_ssl_read(struct pt_regs *ctx) {
|
||||
u64 cgroup_id = bpf_get_current_cgroup_id();
|
||||
|
||||
if (!should_trace_cgroup(cgroup_id)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 ssl_ptr = (u64)PT_REGS_PARM1(ctx);
|
||||
int num = (int)PT_REGS_PARM3(ctx);
|
||||
|
||||
u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
struct ssl_args args = {
|
||||
.ssl_ptr = ssl_ptr,
|
||||
.requested_bytes = (u32)num,
|
||||
.operation = SSL_OP_READ,
|
||||
};
|
||||
|
||||
bpf_map_update_elem(&ssl_args_map, &pid_tgid, &args, BPF_ANY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("uretprobe/libssl:SSL_read")
|
||||
int uretprobe_ssl_read(struct pt_regs *ctx) {
|
||||
u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
|
||||
struct ssl_args *args = bpf_map_lookup_elem(&ssl_args_map, &pid_tgid);
|
||||
if (!args) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bytes = (int)PT_REGS_RET(ctx);
|
||||
u32 actual_bytes = (bytes > 0) ? (u32)bytes : 0;
|
||||
|
||||
// Update session stats
|
||||
u32 pid = pid_tgid >> 32;
|
||||
u64 session_key = ((u64)pid << 32) | (args->ssl_ptr & 0xFFFFFFFF);
|
||||
|
||||
struct ssl_session_stats *stats = bpf_map_lookup_elem(&ssl_session_stats, &session_key);
|
||||
u64 now_ns = bpf_ktime_get_boot_ns();
|
||||
|
||||
if (stats) {
|
||||
if (actual_bytes > 0) {
|
||||
__sync_fetch_and_add(&stats->bytes_read, actual_bytes);
|
||||
__sync_fetch_and_add(&stats->read_count, 1);
|
||||
}
|
||||
stats->last_seen_ns = now_ns;
|
||||
} else {
|
||||
struct ssl_session_stats new_stats = {
|
||||
.bytes_read = actual_bytes,
|
||||
.bytes_written = 0,
|
||||
.read_count = (actual_bytes > 0) ? 1 : 0,
|
||||
.write_count = 0,
|
||||
.first_seen_ns = now_ns,
|
||||
.last_seen_ns = now_ns,
|
||||
};
|
||||
bpf_map_update_elem(&ssl_session_stats, &session_key, &new_stats, BPF_NOEXIST);
|
||||
}
|
||||
|
||||
// Check if we should emit per-call events
|
||||
u32 zero = 0;
|
||||
struct ssl_config *cfg = bpf_map_lookup_elem(&ssl_op_config, &zero);
|
||||
|
||||
bool emit_event = true;
|
||||
if (cfg) {
|
||||
if (!cfg->emit_per_call) {
|
||||
emit_event = false; // Only aggregate, don't emit per-call
|
||||
}
|
||||
if (cfg->min_bytes_threshold > 0 && actual_bytes < cfg->min_bytes_threshold) {
|
||||
emit_event = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (emit_event && actual_bytes > 0) {
|
||||
struct ssl_op_event *event;
|
||||
event = bpf_ringbuf_reserve(&events, sizeof(*event), 0);
|
||||
if (event) {
|
||||
fill_event_header(&event->hdr, EVENT_TYPE_SSL_OP);
|
||||
event->ssl_ptr = args->ssl_ptr;
|
||||
event->requested_bytes = args->requested_bytes;
|
||||
event->actual_bytes = actual_bytes;
|
||||
event->operation = SSL_OP_READ;
|
||||
__builtin_memset(event->reserved, 0, sizeof(event->reserved));
|
||||
|
||||
bpf_ringbuf_submit(event, 0);
|
||||
update_stats(false, false);
|
||||
} else {
|
||||
update_stats(true, false);
|
||||
}
|
||||
}
|
||||
|
||||
bpf_map_delete_elem(&ssl_args_map, &pid_tgid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SSL_write probes
|
||||
// ============================================================================
|
||||
|
||||
// int SSL_write(SSL *ssl, const void *buf, int num)
|
||||
SEC("uprobe/libssl:SSL_write")
|
||||
int uprobe_ssl_write(struct pt_regs *ctx) {
|
||||
u64 cgroup_id = bpf_get_current_cgroup_id();
|
||||
|
||||
if (!should_trace_cgroup(cgroup_id)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 ssl_ptr = (u64)PT_REGS_PARM1(ctx);
|
||||
int num = (int)PT_REGS_PARM3(ctx);
|
||||
|
||||
u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
struct ssl_args args = {
|
||||
.ssl_ptr = ssl_ptr,
|
||||
.requested_bytes = (u32)num,
|
||||
.operation = SSL_OP_WRITE,
|
||||
};
|
||||
|
||||
bpf_map_update_elem(&ssl_args_map, &pid_tgid, &args, BPF_ANY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("uretprobe/libssl:SSL_write")
|
||||
int uretprobe_ssl_write(struct pt_regs *ctx) {
|
||||
u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
|
||||
struct ssl_args *args = bpf_map_lookup_elem(&ssl_args_map, &pid_tgid);
|
||||
if (!args) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bytes = (int)PT_REGS_RET(ctx);
|
||||
u32 actual_bytes = (bytes > 0) ? (u32)bytes : 0;
|
||||
|
||||
// Update session stats
|
||||
u32 pid = pid_tgid >> 32;
|
||||
u64 session_key = ((u64)pid << 32) | (args->ssl_ptr & 0xFFFFFFFF);
|
||||
|
||||
struct ssl_session_stats *stats = bpf_map_lookup_elem(&ssl_session_stats, &session_key);
|
||||
u64 now_ns = bpf_ktime_get_boot_ns();
|
||||
|
||||
if (stats) {
|
||||
if (actual_bytes > 0) {
|
||||
__sync_fetch_and_add(&stats->bytes_written, actual_bytes);
|
||||
__sync_fetch_and_add(&stats->write_count, 1);
|
||||
}
|
||||
stats->last_seen_ns = now_ns;
|
||||
} else {
|
||||
struct ssl_session_stats new_stats = {
|
||||
.bytes_read = 0,
|
||||
.bytes_written = actual_bytes,
|
||||
.read_count = 0,
|
||||
.write_count = (actual_bytes > 0) ? 1 : 0,
|
||||
.first_seen_ns = now_ns,
|
||||
.last_seen_ns = now_ns,
|
||||
};
|
||||
bpf_map_update_elem(&ssl_session_stats, &session_key, &new_stats, BPF_NOEXIST);
|
||||
}
|
||||
|
||||
// Check if we should emit per-call events
|
||||
u32 zero = 0;
|
||||
struct ssl_config *cfg = bpf_map_lookup_elem(&ssl_op_config, &zero);
|
||||
|
||||
bool emit_event = true;
|
||||
if (cfg) {
|
||||
if (!cfg->emit_per_call) {
|
||||
emit_event = false;
|
||||
}
|
||||
if (cfg->min_bytes_threshold > 0 && actual_bytes < cfg->min_bytes_threshold) {
|
||||
emit_event = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (emit_event && actual_bytes > 0) {
|
||||
struct ssl_op_event *event;
|
||||
event = bpf_ringbuf_reserve(&events, sizeof(*event), 0);
|
||||
if (event) {
|
||||
fill_event_header(&event->hdr, EVENT_TYPE_SSL_OP);
|
||||
event->ssl_ptr = args->ssl_ptr;
|
||||
event->requested_bytes = args->requested_bytes;
|
||||
event->actual_bytes = actual_bytes;
|
||||
event->operation = SSL_OP_WRITE;
|
||||
__builtin_memset(event->reserved, 0, sizeof(event->reserved));
|
||||
|
||||
bpf_ringbuf_submit(event, 0);
|
||||
update_stats(false, false);
|
||||
} else {
|
||||
update_stats(true, false);
|
||||
}
|
||||
}
|
||||
|
||||
bpf_map_delete_elem(&ssl_args_map, &pid_tgid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SSL_read_ex / SSL_write_ex (OpenSSL 1.1.1+)
|
||||
// ============================================================================
|
||||
|
||||
// int SSL_read_ex(SSL *ssl, void *buf, size_t num, size_t *readbytes)
|
||||
SEC("uprobe/libssl:SSL_read_ex")
|
||||
int uprobe_ssl_read_ex(struct pt_regs *ctx) {
|
||||
u64 cgroup_id = bpf_get_current_cgroup_id();
|
||||
|
||||
if (!should_trace_cgroup(cgroup_id)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 ssl_ptr = (u64)PT_REGS_PARM1(ctx);
|
||||
size_t num = (size_t)PT_REGS_PARM3(ctx);
|
||||
|
||||
u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
struct ssl_args args = {
|
||||
.ssl_ptr = ssl_ptr,
|
||||
.requested_bytes = (u32)num,
|
||||
.operation = SSL_OP_READ,
|
||||
};
|
||||
|
||||
bpf_map_update_elem(&ssl_args_map, &pid_tgid, &args, BPF_ANY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// For SSL_read_ex, return value is 1 on success, 0 on failure
|
||||
// Actual bytes are in *readbytes parameter - we'd need to track that pointer
|
||||
// For simplicity, use the same return handling as SSL_read
|
||||
SEC("uretprobe/libssl:SSL_read_ex")
|
||||
int uretprobe_ssl_read_ex(struct pt_regs *ctx) {
|
||||
return uretprobe_ssl_read(ctx);
|
||||
}
|
||||
|
||||
SEC("uprobe/libssl:SSL_write_ex")
|
||||
int uprobe_ssl_write_ex(struct pt_regs *ctx) {
|
||||
u64 cgroup_id = bpf_get_current_cgroup_id();
|
||||
|
||||
if (!should_trace_cgroup(cgroup_id)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 ssl_ptr = (u64)PT_REGS_PARM1(ctx);
|
||||
size_t num = (size_t)PT_REGS_PARM3(ctx);
|
||||
|
||||
u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
struct ssl_args args = {
|
||||
.ssl_ptr = ssl_ptr,
|
||||
.requested_bytes = (u32)num,
|
||||
.operation = SSL_OP_WRITE,
|
||||
};
|
||||
|
||||
bpf_map_update_elem(&ssl_args_map, &pid_tgid, &args, BPF_ANY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("uretprobe/libssl:SSL_write_ex")
|
||||
int uretprobe_ssl_write_ex(struct pt_regs *ctx) {
|
||||
return uretprobe_ssl_write(ctx);
|
||||
}
|
||||
@@ -0,0 +1,205 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
// Minimal vmlinux subset for Stella Ops eBPF probes
|
||||
// This provides kernel type definitions needed for CO-RE without full vmlinux.h
|
||||
|
||||
#ifndef __VMLINUX_SUBSET_H__
|
||||
#define __VMLINUX_SUBSET_H__
|
||||
|
||||
typedef unsigned char __u8;
|
||||
typedef unsigned short __u16;
|
||||
typedef unsigned int __u32;
|
||||
typedef unsigned long long __u64;
|
||||
typedef signed char __s8;
|
||||
typedef signed short __s16;
|
||||
typedef signed int __s32;
|
||||
typedef signed long long __s64;
|
||||
|
||||
typedef __u8 u8;
|
||||
typedef __u16 u16;
|
||||
typedef __u32 u32;
|
||||
typedef __u64 u64;
|
||||
typedef __s8 s8;
|
||||
typedef __s16 s16;
|
||||
typedef __s32 s32;
|
||||
typedef __s64 s64;
|
||||
|
||||
typedef _Bool bool;
|
||||
#define true 1
|
||||
#define false 0
|
||||
|
||||
// Process and task structures
|
||||
struct task_struct {
|
||||
int pid;
|
||||
int tgid;
|
||||
char comm[16];
|
||||
} __attribute__((preserve_access_index));
|
||||
|
||||
// For ktime_get_boot_ns()
|
||||
typedef u64 ktime_t;
|
||||
|
||||
// Socket address structures
|
||||
struct sockaddr {
|
||||
unsigned short sa_family;
|
||||
char sa_data[14];
|
||||
};
|
||||
|
||||
struct sockaddr_in {
|
||||
unsigned short sin_family;
|
||||
unsigned short sin_port;
|
||||
struct {
|
||||
unsigned int s_addr;
|
||||
} sin_addr;
|
||||
char sin_zero[8];
|
||||
};
|
||||
|
||||
struct sockaddr_in6 {
|
||||
unsigned short sin6_family;
|
||||
unsigned short sin6_port;
|
||||
unsigned int sin6_flowinfo;
|
||||
struct {
|
||||
unsigned char s6_addr[16];
|
||||
} sin6_addr;
|
||||
unsigned int sin6_scope_id;
|
||||
};
|
||||
|
||||
// File descriptor table
|
||||
struct file {
|
||||
void *f_path;
|
||||
void *f_inode;
|
||||
unsigned int f_flags;
|
||||
} __attribute__((preserve_access_index));
|
||||
|
||||
// Tracepoint context for syscalls
|
||||
struct trace_event_raw_sys_enter {
|
||||
unsigned short common_type;
|
||||
unsigned char common_flags;
|
||||
unsigned char common_preempt_count;
|
||||
int common_pid;
|
||||
long id;
|
||||
unsigned long args[6];
|
||||
} __attribute__((preserve_access_index));
|
||||
|
||||
struct trace_event_raw_sys_exit {
|
||||
unsigned short common_type;
|
||||
unsigned char common_flags;
|
||||
unsigned char common_preempt_count;
|
||||
int common_pid;
|
||||
long id;
|
||||
long ret;
|
||||
} __attribute__((preserve_access_index));
|
||||
|
||||
// Tracepoint context for sched events
|
||||
struct trace_event_raw_sched_process_exec {
|
||||
unsigned short common_type;
|
||||
unsigned char common_flags;
|
||||
unsigned char common_preempt_count;
|
||||
int common_pid;
|
||||
int __data_loc_filename;
|
||||
int pid;
|
||||
int old_pid;
|
||||
} __attribute__((preserve_access_index));
|
||||
|
||||
// TCP state tracepoint
|
||||
struct trace_event_raw_inet_sock_set_state {
|
||||
unsigned short common_type;
|
||||
unsigned char common_flags;
|
||||
unsigned char common_preempt_count;
|
||||
int common_pid;
|
||||
const void *skaddr;
|
||||
int oldstate;
|
||||
int newstate;
|
||||
unsigned short sport;
|
||||
unsigned short dport;
|
||||
unsigned short family;
|
||||
unsigned short protocol;
|
||||
unsigned char saddr[4];
|
||||
unsigned char daddr[4];
|
||||
unsigned char saddr_v6[16];
|
||||
unsigned char daddr_v6[16];
|
||||
} __attribute__((preserve_access_index));
|
||||
|
||||
// PT_REGS for uprobes
|
||||
struct pt_regs {
|
||||
#if defined(__x86_64__)
|
||||
unsigned long r15;
|
||||
unsigned long r14;
|
||||
unsigned long r13;
|
||||
unsigned long r12;
|
||||
unsigned long rbp;
|
||||
unsigned long rbx;
|
||||
unsigned long r11;
|
||||
unsigned long r10;
|
||||
unsigned long r9;
|
||||
unsigned long r8;
|
||||
unsigned long rax;
|
||||
unsigned long rcx;
|
||||
unsigned long rdx;
|
||||
unsigned long rsi;
|
||||
unsigned long rdi;
|
||||
unsigned long orig_rax;
|
||||
unsigned long rip;
|
||||
unsigned long cs;
|
||||
unsigned long eflags;
|
||||
unsigned long rsp;
|
||||
unsigned long ss;
|
||||
#elif defined(__aarch64__)
|
||||
unsigned long regs[31];
|
||||
unsigned long sp;
|
||||
unsigned long pc;
|
||||
unsigned long pstate;
|
||||
#endif
|
||||
} __attribute__((preserve_access_index));
|
||||
|
||||
// Helper macros for argument access
|
||||
#if defined(__x86_64__)
|
||||
#define PT_REGS_PARM1(x) ((x)->rdi)
|
||||
#define PT_REGS_PARM2(x) ((x)->rsi)
|
||||
#define PT_REGS_PARM3(x) ((x)->rdx)
|
||||
#define PT_REGS_PARM4(x) ((x)->rcx)
|
||||
#define PT_REGS_PARM5(x) ((x)->r8)
|
||||
#define PT_REGS_PARM6(x) ((x)->r9)
|
||||
#define PT_REGS_RET(x) ((x)->rax)
|
||||
#define PT_REGS_IP(x) ((x)->rip)
|
||||
#define PT_REGS_SP(x) ((x)->rsp)
|
||||
#elif defined(__aarch64__)
|
||||
#define PT_REGS_PARM1(x) ((x)->regs[0])
|
||||
#define PT_REGS_PARM2(x) ((x)->regs[1])
|
||||
#define PT_REGS_PARM3(x) ((x)->regs[2])
|
||||
#define PT_REGS_PARM4(x) ((x)->regs[3])
|
||||
#define PT_REGS_PARM5(x) ((x)->regs[4])
|
||||
#define PT_REGS_PARM6(x) ((x)->regs[5])
|
||||
#define PT_REGS_RET(x) ((x)->regs[0])
|
||||
#define PT_REGS_IP(x) ((x)->pc)
|
||||
#define PT_REGS_SP(x) ((x)->sp)
|
||||
#endif
|
||||
|
||||
// TCP states
|
||||
enum {
|
||||
TCP_ESTABLISHED = 1,
|
||||
TCP_SYN_SENT = 2,
|
||||
TCP_SYN_RECV = 3,
|
||||
TCP_FIN_WAIT1 = 4,
|
||||
TCP_FIN_WAIT2 = 5,
|
||||
TCP_TIME_WAIT = 6,
|
||||
TCP_CLOSE = 7,
|
||||
TCP_CLOSE_WAIT = 8,
|
||||
TCP_LAST_ACK = 9,
|
||||
TCP_LISTEN = 10,
|
||||
TCP_CLOSING = 11,
|
||||
TCP_NEW_SYN_RECV = 12,
|
||||
};
|
||||
|
||||
// Address families
|
||||
#define AF_INET 2
|
||||
#define AF_INET6 10
|
||||
|
||||
// Open flags
|
||||
#define O_RDONLY 0x0000
|
||||
#define O_WRONLY 0x0001
|
||||
#define O_RDWR 0x0002
|
||||
#define O_CREAT 0x0040
|
||||
#define O_EXCL 0x0080
|
||||
#define O_TRUNC 0x0200
|
||||
#define O_APPEND 0x0400
|
||||
|
||||
#endif // __VMLINUX_SUBSET_H__
|
||||
@@ -8,6 +8,7 @@ using System.Runtime.CompilerServices;
|
||||
using System.Runtime.InteropServices;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.Signals.Ebpf.Services;
|
||||
using StellaOps.Signals.Ebpf.Symbols;
|
||||
|
||||
/// <summary>
|
||||
/// CO-RE (Compile Once, Run Everywhere) eBPF probe loader.
|
||||
@@ -355,140 +356,3 @@ public sealed class CoreProbeLoader : IEbpfProbeLoader
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Symbol resolver interface.
|
||||
/// </summary>
|
||||
public interface ISymbolResolver
|
||||
{
|
||||
/// <summary>
|
||||
/// Resolves an address to symbol information.
|
||||
/// </summary>
|
||||
(string? Symbol, string? Library, string? Purl) Resolve(int pid, ulong address);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// ELF symbol resolver using /proc/pid/maps and symbol tables.
|
||||
/// </summary>
|
||||
public sealed class ElfSymbolResolver : ISymbolResolver
|
||||
{
|
||||
private readonly ILogger<ElfSymbolResolver> _logger;
|
||||
private readonly Dictionary<int, ProcessMaps> _processCache;
|
||||
private readonly object _cacheLock = new();
|
||||
|
||||
public ElfSymbolResolver(ILogger<ElfSymbolResolver> logger)
|
||||
{
|
||||
_logger = logger;
|
||||
_processCache = [];
|
||||
}
|
||||
|
||||
public (string? Symbol, string? Library, string? Purl) Resolve(int pid, ulong address)
|
||||
{
|
||||
try
|
||||
{
|
||||
var maps = GetProcessMaps(pid);
|
||||
|
||||
// Find the mapping containing this address
|
||||
foreach (var mapping in maps.Mappings)
|
||||
{
|
||||
if (address >= mapping.StartAddress && address < mapping.EndAddress)
|
||||
{
|
||||
var offset = address - mapping.StartAddress + mapping.FileOffset;
|
||||
|
||||
// In real impl, would read ELF symbol table
|
||||
return (null, mapping.Pathname, null);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogDebug(ex, "Failed to resolve symbol for PID {Pid} address {Address:X16}", pid, address);
|
||||
}
|
||||
|
||||
return (null, null, null);
|
||||
}
|
||||
|
||||
private ProcessMaps GetProcessMaps(int pid)
|
||||
{
|
||||
lock (_cacheLock)
|
||||
{
|
||||
if (_processCache.TryGetValue(pid, out var cached))
|
||||
{
|
||||
return cached;
|
||||
}
|
||||
}
|
||||
|
||||
var maps = ParseMaps(pid);
|
||||
|
||||
lock (_cacheLock)
|
||||
{
|
||||
_processCache[pid] = maps;
|
||||
}
|
||||
|
||||
return maps;
|
||||
}
|
||||
|
||||
private static ProcessMaps ParseMaps(int pid)
|
||||
{
|
||||
var mapsPath = $"/proc/{pid}/maps";
|
||||
var mappings = new List<MemoryMapping>();
|
||||
|
||||
if (!File.Exists(mapsPath))
|
||||
{
|
||||
return new ProcessMaps { Mappings = mappings };
|
||||
}
|
||||
|
||||
foreach (var line in File.ReadLines(mapsPath))
|
||||
{
|
||||
// Parse: address perms offset dev inode pathname
|
||||
var parts = line.Split(' ', StringSplitOptions.RemoveEmptyEntries);
|
||||
if (parts.Length < 5)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
var addrParts = parts[0].Split('-');
|
||||
if (addrParts.Length != 2)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!ulong.TryParse(addrParts[0], System.Globalization.NumberStyles.HexNumber, null, out var start))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!ulong.TryParse(addrParts[1], System.Globalization.NumberStyles.HexNumber, null, out var end))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
_ = ulong.TryParse(parts[2], System.Globalization.NumberStyles.HexNumber, null, out var offset);
|
||||
|
||||
var pathname = parts.Length > 5 ? parts[5] : null;
|
||||
|
||||
mappings.Add(new MemoryMapping
|
||||
{
|
||||
StartAddress = start,
|
||||
EndAddress = end,
|
||||
FileOffset = offset,
|
||||
Pathname = pathname,
|
||||
});
|
||||
}
|
||||
|
||||
return new ProcessMaps { Mappings = mappings };
|
||||
}
|
||||
|
||||
private sealed record ProcessMaps
|
||||
{
|
||||
public required IReadOnlyList<MemoryMapping> Mappings { get; init; }
|
||||
}
|
||||
|
||||
private sealed record MemoryMapping
|
||||
{
|
||||
public required ulong StartAddress { get; init; }
|
||||
public required ulong EndAddress { get; init; }
|
||||
public required ulong FileOffset { get; init; }
|
||||
public string? Pathname { get; init; }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,370 @@
|
||||
// <copyright file="RuntimeEvidence.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Schema;
|
||||
|
||||
using System.Net;
|
||||
using System.Text.Json.Serialization;
|
||||
|
||||
/// <summary>
|
||||
/// Unified runtime evidence record for NDJSON output.
|
||||
/// This is the canonical schema for all syscall and symbol-level evidence.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// Schema version: runtime-evidence/v1
|
||||
/// All timestamps are in nanoseconds since boot for high precision.
|
||||
/// Container/image enrichment is added post-collection.
|
||||
/// </remarks>
|
||||
public sealed record RuntimeEvidenceRecord
|
||||
{
|
||||
/// <summary>
|
||||
/// Timestamp in nanoseconds since boot.
|
||||
/// </summary>
|
||||
[JsonPropertyName("ts_ns")]
|
||||
public required ulong TimestampNs { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Event source identifier (probe name).
|
||||
/// Examples: "sys_enter_openat", "sched_process_exec", "uprobe:SSL_read"
|
||||
/// </summary>
|
||||
[JsonPropertyName("src")]
|
||||
public required string Source { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Process ID.
|
||||
/// </summary>
|
||||
[JsonPropertyName("pid")]
|
||||
public required int Pid { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Thread ID.
|
||||
/// </summary>
|
||||
[JsonPropertyName("tid")]
|
||||
public int Tid { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Cgroup ID for container identification.
|
||||
/// </summary>
|
||||
[JsonPropertyName("cgroup_id")]
|
||||
public ulong CgroupId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Container ID (enriched post-collection).
|
||||
/// Format: "{runtime}://{id}" (e.g., "containerd://abc123...")
|
||||
/// </summary>
|
||||
[JsonPropertyName("container_id")]
|
||||
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
|
||||
public string? ContainerId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Image digest (enriched post-collection).
|
||||
/// Format: "sha256:{hex}"
|
||||
/// </summary>
|
||||
[JsonPropertyName("image_digest")]
|
||||
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
|
||||
public string? ImageDigest { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Process command name (up to 16 chars).
|
||||
/// </summary>
|
||||
[JsonPropertyName("comm")]
|
||||
public required string Comm { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Event-specific data.
|
||||
/// </summary>
|
||||
[JsonPropertyName("event")]
|
||||
public required RuntimeEventData Event { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Base class for event-specific data.
|
||||
/// </summary>
|
||||
[JsonPolymorphic(TypeDiscriminatorPropertyName = "type")]
|
||||
[JsonDerivedType(typeof(FileOpenEvent), "file_open")]
|
||||
[JsonDerivedType(typeof(ProcessExecEvent), "process_exec")]
|
||||
[JsonDerivedType(typeof(TcpStateEvent), "tcp_state")]
|
||||
[JsonDerivedType(typeof(NetConnectEvent), "net_connect")]
|
||||
[JsonDerivedType(typeof(SslOpEvent), "ssl_op")]
|
||||
[JsonDerivedType(typeof(FunctionCallEvent), "function_call")]
|
||||
public abstract record RuntimeEventData;
|
||||
|
||||
/// <summary>
|
||||
/// File open event data.
|
||||
/// </summary>
|
||||
public sealed record FileOpenEvent : RuntimeEventData
|
||||
{
|
||||
/// <summary>
|
||||
/// Opened file path.
|
||||
/// </summary>
|
||||
[JsonPropertyName("path")]
|
||||
public required string Path { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Open flags (O_RDONLY=0, O_WRONLY=1, O_RDWR=2, etc.).
|
||||
/// </summary>
|
||||
[JsonPropertyName("flags")]
|
||||
public int Flags { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Human-readable access mode.
|
||||
/// </summary>
|
||||
[JsonPropertyName("access")]
|
||||
public string Access => (Flags & 0x3) switch
|
||||
{
|
||||
0 => "read",
|
||||
1 => "write",
|
||||
2 => "read_write",
|
||||
_ => "unknown",
|
||||
};
|
||||
|
||||
/// <summary>
|
||||
/// Directory file descriptor (-100 = AT_FDCWD).
|
||||
/// </summary>
|
||||
[JsonPropertyName("dfd")]
|
||||
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
|
||||
public int Dfd { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// File mode for O_CREAT.
|
||||
/// </summary>
|
||||
[JsonPropertyName("mode")]
|
||||
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
|
||||
public int Mode { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Process execution event data.
|
||||
/// </summary>
|
||||
public sealed record ProcessExecEvent : RuntimeEventData
|
||||
{
|
||||
/// <summary>
|
||||
/// Executed file path.
|
||||
/// </summary>
|
||||
[JsonPropertyName("filename")]
|
||||
public required string Filename { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Parent process ID.
|
||||
/// </summary>
|
||||
[JsonPropertyName("ppid")]
|
||||
public int Ppid { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// First argument (argv[0]).
|
||||
/// </summary>
|
||||
[JsonPropertyName("argv0")]
|
||||
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
|
||||
public string? Argv0 { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// TCP state change event data.
|
||||
/// </summary>
|
||||
public sealed record TcpStateEvent : RuntimeEventData
|
||||
{
|
||||
/// <summary>
|
||||
/// Previous TCP state.
|
||||
/// </summary>
|
||||
[JsonPropertyName("oldstate")]
|
||||
public required string OldState { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// New TCP state.
|
||||
/// </summary>
|
||||
[JsonPropertyName("newstate")]
|
||||
public required string NewState { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Destination address (IPv4 or IPv6).
|
||||
/// </summary>
|
||||
[JsonPropertyName("daddr")]
|
||||
public required string DestAddress { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Destination port.
|
||||
/// </summary>
|
||||
[JsonPropertyName("dport")]
|
||||
public required int DestPort { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Source port.
|
||||
/// </summary>
|
||||
[JsonPropertyName("sport")]
|
||||
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
|
||||
public int SourcePort { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Source address.
|
||||
/// </summary>
|
||||
[JsonPropertyName("saddr")]
|
||||
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
|
||||
public string? SourceAddress { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Address family (inet or inet6).
|
||||
/// </summary>
|
||||
[JsonPropertyName("family")]
|
||||
public string Family { get; init; } = "inet";
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Network connect/accept event data.
|
||||
/// </summary>
|
||||
public sealed record NetConnectEvent : RuntimeEventData
|
||||
{
|
||||
/// <summary>
|
||||
/// Socket file descriptor.
|
||||
/// </summary>
|
||||
[JsonPropertyName("fd")]
|
||||
public int Fd { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Remote address.
|
||||
/// </summary>
|
||||
[JsonPropertyName("addr")]
|
||||
public required string Address { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Remote port.
|
||||
/// </summary>
|
||||
[JsonPropertyName("port")]
|
||||
public required int Port { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Whether the operation succeeded.
|
||||
/// </summary>
|
||||
[JsonPropertyName("success")]
|
||||
public bool Success { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Error code if failed.
|
||||
/// </summary>
|
||||
[JsonPropertyName("error")]
|
||||
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
|
||||
public int Error { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// SSL/TLS operation event data.
|
||||
/// </summary>
|
||||
public sealed record SslOpEvent : RuntimeEventData
|
||||
{
|
||||
/// <summary>
|
||||
/// Operation type (read or write).
|
||||
/// </summary>
|
||||
[JsonPropertyName("operation")]
|
||||
public required string Operation { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Bytes actually transferred.
|
||||
/// </summary>
|
||||
[JsonPropertyName("bytes")]
|
||||
public int Bytes { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// SSL session pointer (for correlation).
|
||||
/// </summary>
|
||||
[JsonPropertyName("ssl_ptr")]
|
||||
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
|
||||
public string? SslPtr { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Function call event data.
|
||||
/// </summary>
|
||||
public sealed record FunctionCallEvent : RuntimeEventData
|
||||
{
|
||||
/// <summary>
|
||||
/// Called function address.
|
||||
/// </summary>
|
||||
[JsonPropertyName("addr")]
|
||||
public required string Address { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Resolved symbol name (if available).
|
||||
/// </summary>
|
||||
[JsonPropertyName("symbol")]
|
||||
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
|
||||
public string? Symbol { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Library containing the function.
|
||||
/// </summary>
|
||||
[JsonPropertyName("library")]
|
||||
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
|
||||
public string? Library { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Runtime type detected.
|
||||
/// </summary>
|
||||
[JsonPropertyName("runtime")]
|
||||
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
|
||||
public string? Runtime { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Call stack (addresses).
|
||||
/// </summary>
|
||||
[JsonPropertyName("stack")]
|
||||
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
|
||||
public IReadOnlyList<string>? Stack { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Node hash for reachability joining.
|
||||
/// </summary>
|
||||
[JsonPropertyName("node_hash")]
|
||||
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
|
||||
public string? NodeHash { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Helper methods for TCP state conversion.
|
||||
/// </summary>
|
||||
public static class TcpStateHelper
|
||||
{
|
||||
/// <summary>
|
||||
/// Convert TCP state byte to string.
|
||||
/// </summary>
|
||||
public static string ToString(byte state) => state switch
|
||||
{
|
||||
1 => "ESTABLISHED",
|
||||
2 => "SYN_SENT",
|
||||
3 => "SYN_RECV",
|
||||
4 => "FIN_WAIT1",
|
||||
5 => "FIN_WAIT2",
|
||||
6 => "TIME_WAIT",
|
||||
7 => "CLOSE",
|
||||
8 => "CLOSE_WAIT",
|
||||
9 => "LAST_ACK",
|
||||
10 => "LISTEN",
|
||||
11 => "CLOSING",
|
||||
12 => "NEW_SYN_RECV",
|
||||
_ => $"UNKNOWN({state})",
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Helper methods for IP address formatting.
|
||||
/// </summary>
|
||||
public static class IpAddressHelper
|
||||
{
|
||||
/// <summary>
|
||||
/// Convert IPv4 address from network byte order to string.
|
||||
/// </summary>
|
||||
public static string FormatIPv4(uint addr)
|
||||
{
|
||||
var bytes = BitConverter.GetBytes(addr);
|
||||
return $"{bytes[0]}.{bytes[1]}.{bytes[2]}.{bytes[3]}";
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Convert IPv6 address bytes to string.
|
||||
/// </summary>
|
||||
public static string FormatIPv6(byte[] addr)
|
||||
{
|
||||
if (addr == null || addr.Length != 16)
|
||||
return "::";
|
||||
return new IPAddress(addr).ToString();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,336 @@
|
||||
// <copyright file="SyscallEvents.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Schema;
|
||||
|
||||
using System.Net;
|
||||
using System.Runtime.InteropServices;
|
||||
using System.Text.Json.Serialization;
|
||||
|
||||
/// <summary>
|
||||
/// Event types emitted by eBPF probes.
|
||||
/// </summary>
|
||||
public enum EbpfEventType : byte
|
||||
{
|
||||
/// <summary>Function call observation (uprobe).</summary>
|
||||
FunctionCall = 1,
|
||||
|
||||
/// <summary>File open syscall.</summary>
|
||||
FileOpen = 2,
|
||||
|
||||
/// <summary>Process execution.</summary>
|
||||
ProcessExec = 3,
|
||||
|
||||
/// <summary>TCP state change.</summary>
|
||||
TcpState = 4,
|
||||
|
||||
/// <summary>Network connect/accept operation.</summary>
|
||||
NetConnect = 5,
|
||||
|
||||
/// <summary>SSL/TLS read/write operation.</summary>
|
||||
SslOp = 6,
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Common header for all eBPF events.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// Maps to struct event_header in stella_common.h.
|
||||
/// All multi-byte fields are in host byte order.
|
||||
/// </remarks>
|
||||
[StructLayout(LayoutKind.Sequential, Pack = 1)]
|
||||
public readonly struct EventHeader
|
||||
{
|
||||
/// <summary>Timestamp in nanoseconds since boot.</summary>
|
||||
public readonly ulong TimestampNs;
|
||||
|
||||
/// <summary>Process ID.</summary>
|
||||
public readonly uint Pid;
|
||||
|
||||
/// <summary>Thread ID.</summary>
|
||||
public readonly uint Tid;
|
||||
|
||||
/// <summary>Cgroup ID for container identification.</summary>
|
||||
public readonly ulong CgroupId;
|
||||
|
||||
/// <summary>Event type discriminator.</summary>
|
||||
public readonly EbpfEventType EventType;
|
||||
|
||||
/// <summary>Reserved for alignment.</summary>
|
||||
private readonly byte _reserved1;
|
||||
private readonly byte _reserved2;
|
||||
private readonly byte _reserved3;
|
||||
private readonly byte _reserved4;
|
||||
private readonly byte _reserved5;
|
||||
private readonly byte _reserved6;
|
||||
private readonly byte _reserved7;
|
||||
|
||||
/// <summary>Process command name (TASK_COMM_LEN = 16).</summary>
|
||||
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 16)]
|
||||
private readonly byte[] _comm;
|
||||
|
||||
/// <summary>Gets the process command name as a string.</summary>
|
||||
public string Comm => GetNullTerminatedString(_comm);
|
||||
|
||||
private static string GetNullTerminatedString(byte[]? bytes)
|
||||
{
|
||||
if (bytes == null) return string.Empty;
|
||||
var nullIndex = Array.IndexOf(bytes, (byte)0);
|
||||
var length = nullIndex >= 0 ? nullIndex : bytes.Length;
|
||||
return System.Text.Encoding.UTF8.GetString(bytes, 0, length);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// File open event from sys_enter_openat tracepoint.
|
||||
/// </summary>
|
||||
[StructLayout(LayoutKind.Sequential, Pack = 1)]
|
||||
public readonly struct FileOpenEventRaw
|
||||
{
|
||||
/// <summary>Common event header.</summary>
|
||||
public readonly EventHeader Header;
|
||||
|
||||
/// <summary>Directory file descriptor (AT_FDCWD = -100 for relative paths).</summary>
|
||||
public readonly int Dfd;
|
||||
|
||||
/// <summary>Open flags (O_RDONLY, O_WRONLY, etc.).</summary>
|
||||
public readonly int Flags;
|
||||
|
||||
/// <summary>File mode (for O_CREAT).</summary>
|
||||
public readonly ushort Mode;
|
||||
|
||||
/// <summary>Reserved.</summary>
|
||||
private readonly ushort _reserved;
|
||||
|
||||
/// <summary>Filename (MAX_FILENAME_LEN = 256).</summary>
|
||||
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 256)]
|
||||
private readonly byte[] _filename;
|
||||
|
||||
/// <summary>Gets the filename as a string.</summary>
|
||||
public string Filename => GetNullTerminatedString(_filename);
|
||||
|
||||
private static string GetNullTerminatedString(byte[]? bytes)
|
||||
{
|
||||
if (bytes == null) return string.Empty;
|
||||
var nullIndex = Array.IndexOf(bytes, (byte)0);
|
||||
var length = nullIndex >= 0 ? nullIndex : bytes.Length;
|
||||
return System.Text.Encoding.UTF8.GetString(bytes, 0, length);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Process execution event from sched_process_exec tracepoint.
|
||||
/// </summary>
|
||||
[StructLayout(LayoutKind.Sequential, Pack = 1)]
|
||||
public readonly struct ProcessExecEventRaw
|
||||
{
|
||||
/// <summary>Common event header.</summary>
|
||||
public readonly EventHeader Header;
|
||||
|
||||
/// <summary>Parent process ID.</summary>
|
||||
public readonly uint Ppid;
|
||||
|
||||
/// <summary>Reserved.</summary>
|
||||
private readonly uint _reserved;
|
||||
|
||||
/// <summary>Executed filename (MAX_FILENAME_LEN = 256).</summary>
|
||||
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 256)]
|
||||
private readonly byte[] _filename;
|
||||
|
||||
/// <summary>First argument (MAX_ARGV_LEN = 128).</summary>
|
||||
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 128)]
|
||||
private readonly byte[] _argv0;
|
||||
|
||||
/// <summary>Gets the filename as a string.</summary>
|
||||
public string Filename => GetNullTerminatedString(_filename);
|
||||
|
||||
/// <summary>Gets argv[0] as a string.</summary>
|
||||
public string Argv0 => GetNullTerminatedString(_argv0);
|
||||
|
||||
private static string GetNullTerminatedString(byte[]? bytes)
|
||||
{
|
||||
if (bytes == null) return string.Empty;
|
||||
var nullIndex = Array.IndexOf(bytes, (byte)0);
|
||||
var length = nullIndex >= 0 ? nullIndex : bytes.Length;
|
||||
return System.Text.Encoding.UTF8.GetString(bytes, 0, length);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// TCP state change event from inet_sock_set_state tracepoint.
|
||||
/// </summary>
|
||||
[StructLayout(LayoutKind.Sequential, Pack = 1)]
|
||||
public readonly struct TcpStateEventRaw
|
||||
{
|
||||
/// <summary>Common event header.</summary>
|
||||
public readonly EventHeader Header;
|
||||
|
||||
/// <summary>Previous TCP state.</summary>
|
||||
public readonly byte OldState;
|
||||
|
||||
/// <summary>New TCP state.</summary>
|
||||
public readonly byte NewState;
|
||||
|
||||
/// <summary>Address family (AF_INET=2, AF_INET6=10).</summary>
|
||||
public readonly byte Family;
|
||||
|
||||
/// <summary>Protocol (IPPROTO_TCP=6).</summary>
|
||||
public readonly byte Protocol;
|
||||
|
||||
/// <summary>Source port (host byte order).</summary>
|
||||
public readonly ushort Sport;
|
||||
|
||||
/// <summary>Destination port (host byte order).</summary>
|
||||
public readonly ushort Dport;
|
||||
|
||||
/// <summary>IPv4 source address (network byte order).</summary>
|
||||
public readonly uint SaddrV4;
|
||||
|
||||
/// <summary>IPv4 destination address (network byte order).</summary>
|
||||
public readonly uint DaddrV4;
|
||||
|
||||
/// <summary>IPv6 source address.</summary>
|
||||
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 16)]
|
||||
private readonly byte[] _saddrV6;
|
||||
|
||||
/// <summary>IPv6 destination address.</summary>
|
||||
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 16)]
|
||||
private readonly byte[] _daddrV6;
|
||||
|
||||
/// <summary>Gets the IPv6 source address.</summary>
|
||||
public IPAddress SaddrV6 => new(_saddrV6 ?? new byte[16]);
|
||||
|
||||
/// <summary>Gets the IPv6 destination address.</summary>
|
||||
public IPAddress DaddrV6 => new(_daddrV6 ?? new byte[16]);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Network connect/accept event from libc uprobes.
|
||||
/// </summary>
|
||||
[StructLayout(LayoutKind.Sequential, Pack = 1)]
|
||||
public readonly struct NetConnectEventRaw
|
||||
{
|
||||
/// <summary>Common event header.</summary>
|
||||
public readonly EventHeader Header;
|
||||
|
||||
/// <summary>Socket file descriptor.</summary>
|
||||
public readonly int Fd;
|
||||
|
||||
/// <summary>Return value (0 = success).</summary>
|
||||
public readonly int Ret;
|
||||
|
||||
/// <summary>Address family.</summary>
|
||||
public readonly ushort Family;
|
||||
|
||||
/// <summary>Destination port (host byte order).</summary>
|
||||
public readonly ushort Port;
|
||||
|
||||
/// <summary>IPv4 address (network byte order).</summary>
|
||||
public readonly uint AddrV4;
|
||||
|
||||
/// <summary>IPv6 address.</summary>
|
||||
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 16)]
|
||||
private readonly byte[] _addrV6;
|
||||
|
||||
/// <summary>Gets the IPv6 address.</summary>
|
||||
public IPAddress AddrV6 => new(_addrV6 ?? new byte[16]);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// SSL operation event from OpenSSL uprobes.
|
||||
/// </summary>
|
||||
[StructLayout(LayoutKind.Sequential, Pack = 1)]
|
||||
public readonly struct SslOpEventRaw
|
||||
{
|
||||
/// <summary>Common event header.</summary>
|
||||
public readonly EventHeader Header;
|
||||
|
||||
/// <summary>SSL* pointer for session correlation.</summary>
|
||||
public readonly ulong SslPtr;
|
||||
|
||||
/// <summary>Bytes requested.</summary>
|
||||
public readonly uint RequestedBytes;
|
||||
|
||||
/// <summary>Bytes actually transferred.</summary>
|
||||
public readonly uint ActualBytes;
|
||||
|
||||
/// <summary>Operation type (0=read, 1=write).</summary>
|
||||
public readonly byte Operation;
|
||||
|
||||
/// <summary>Reserved.</summary>
|
||||
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 7)]
|
||||
private readonly byte[] _reserved;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Function call event from generic uprobe.
|
||||
/// </summary>
|
||||
[StructLayout(LayoutKind.Sequential, Pack = 1)]
|
||||
public readonly struct FunctionCallEventRaw
|
||||
{
|
||||
/// <summary>Common event header.</summary>
|
||||
public readonly EventHeader Header;
|
||||
|
||||
/// <summary>Called function address.</summary>
|
||||
public readonly ulong FunctionAddr;
|
||||
|
||||
/// <summary>Return address.</summary>
|
||||
public readonly ulong ReturnAddr;
|
||||
|
||||
/// <summary>Call stack addresses (MAX_STACK_DEPTH = 16).</summary>
|
||||
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 16)]
|
||||
public readonly ulong[] StackTrace;
|
||||
|
||||
/// <summary>Actual stack depth captured.</summary>
|
||||
public readonly byte StackDepth;
|
||||
|
||||
/// <summary>Runtime type.</summary>
|
||||
public readonly byte RuntimeTypeRaw;
|
||||
|
||||
/// <summary>Reserved.</summary>
|
||||
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 6)]
|
||||
private readonly byte[] _reserved;
|
||||
|
||||
/// <summary>Gets the runtime type.</summary>
|
||||
public RuntimeType RuntimeType => (RuntimeType)RuntimeTypeRaw;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// TCP connection states.
|
||||
/// </summary>
|
||||
[JsonConverter(typeof(JsonStringEnumConverter))]
|
||||
public enum TcpState : byte
|
||||
{
|
||||
Established = 1,
|
||||
SynSent = 2,
|
||||
SynRecv = 3,
|
||||
FinWait1 = 4,
|
||||
FinWait2 = 5,
|
||||
TimeWait = 6,
|
||||
Close = 7,
|
||||
CloseWait = 8,
|
||||
LastAck = 9,
|
||||
Listen = 10,
|
||||
Closing = 11,
|
||||
NewSynRecv = 12,
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// SSL operation type.
|
||||
/// </summary>
|
||||
[JsonConverter(typeof(JsonStringEnumConverter))]
|
||||
public enum SslOperationType : byte
|
||||
{
|
||||
Read = 0,
|
||||
Write = 1,
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Address family.
|
||||
/// </summary>
|
||||
public enum AddressFamily : ushort
|
||||
{
|
||||
Inet = 2,
|
||||
Inet6 = 10,
|
||||
}
|
||||
@@ -0,0 +1,189 @@
|
||||
// <copyright file="ServiceCollectionExtensions.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf;
|
||||
|
||||
using Microsoft.Extensions.Caching.Memory;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using Microsoft.Extensions.DependencyInjection.Extensions;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.Signals.Ebpf.Cgroup;
|
||||
using StellaOps.Signals.Ebpf.Output;
|
||||
using StellaOps.Signals.Ebpf.Parsers;
|
||||
using StellaOps.Signals.Ebpf.Probes;
|
||||
using StellaOps.Signals.Ebpf.Services;
|
||||
using StellaOps.Signals.Ebpf.Symbols;
|
||||
|
||||
/// <summary>
|
||||
/// DI registration extensions for eBPF runtime evidence collection.
|
||||
/// </summary>
|
||||
public static class ServiceCollectionExtensions
|
||||
{
|
||||
/// <summary>
|
||||
/// Adds eBPF runtime evidence collection services.
|
||||
/// </summary>
|
||||
/// <param name="services">The service collection.</param>
|
||||
/// <param name="configureOptions">Optional configuration callback.</param>
|
||||
/// <returns>The service collection for chaining.</returns>
|
||||
public static IServiceCollection AddEbpfRuntimeEvidence(
|
||||
this IServiceCollection services,
|
||||
Action<EbpfEvidenceOptions>? configureOptions = null)
|
||||
{
|
||||
var options = new EbpfEvidenceOptions();
|
||||
configureOptions?.Invoke(options);
|
||||
|
||||
// Register options
|
||||
services.AddSingleton(options);
|
||||
services.AddSingleton(options.WriterOptions);
|
||||
services.AddSingleton(options.CollectorOptions);
|
||||
|
||||
// Register memory cache if not already registered
|
||||
services.TryAddSingleton<IMemoryCache>(sp =>
|
||||
new MemoryCache(new MemoryCacheOptions
|
||||
{
|
||||
SizeLimit = options.SymbolCacheSizeLimit,
|
||||
}));
|
||||
|
||||
// Register symbol resolver
|
||||
services.AddSingleton<ISymbolResolver>(sp =>
|
||||
{
|
||||
var logger = sp.GetRequiredService<ILogger<EnhancedSymbolResolver>>();
|
||||
var cache = sp.GetRequiredService<IMemoryCache>();
|
||||
return new EnhancedSymbolResolver(logger, cache, options.ProcRoot);
|
||||
});
|
||||
|
||||
// Register cgroup resolver
|
||||
services.AddSingleton(sp =>
|
||||
{
|
||||
var logger = sp.GetRequiredService<ILogger<CgroupContainerResolver>>();
|
||||
return new CgroupContainerResolver(logger, options.ProcRoot, options.CgroupRoot);
|
||||
});
|
||||
|
||||
// Register event parser
|
||||
services.AddSingleton(sp =>
|
||||
{
|
||||
var logger = sp.GetRequiredService<ILogger<EventParser>>();
|
||||
var symbolResolver = sp.GetRequiredService<ISymbolResolver>();
|
||||
return new EventParser(logger, symbolResolver);
|
||||
});
|
||||
|
||||
// Register probe loader
|
||||
services.AddSingleton<IEbpfProbeLoader>(sp =>
|
||||
{
|
||||
var logger = sp.GetRequiredService<ILogger<CoreProbeLoader>>();
|
||||
var symbolResolver = sp.GetRequiredService<ISymbolResolver>();
|
||||
return new CoreProbeLoader(logger, symbolResolver, options.ProbeDirectory);
|
||||
});
|
||||
|
||||
// Register air-gap probe loader separately (different interface)
|
||||
if (options.UseAirGapMode)
|
||||
{
|
||||
services.AddSingleton<IAirGapProbeLoader>(sp =>
|
||||
{
|
||||
var airGapLogger = sp.GetRequiredService<ILogger<AirGapProbeLoader>>();
|
||||
return new AirGapProbeLoader(airGapLogger);
|
||||
});
|
||||
}
|
||||
|
||||
// Register NDJSON writer
|
||||
services.AddSingleton(sp =>
|
||||
{
|
||||
var logger = sp.GetRequiredService<ILogger<RuntimeEvidenceNdjsonWriter>>();
|
||||
return new RuntimeEvidenceNdjsonWriter(
|
||||
logger,
|
||||
options.OutputDirectory,
|
||||
options.WriterOptions);
|
||||
});
|
||||
|
||||
// Register the unified evidence collector
|
||||
services.AddSingleton(sp =>
|
||||
{
|
||||
var logger = sp.GetRequiredService<ILogger<RuntimeEvidenceCollector>>();
|
||||
var probeLoader = sp.GetRequiredService<IEbpfProbeLoader>();
|
||||
var eventParser = sp.GetRequiredService<EventParser>();
|
||||
var cgroupResolver = sp.GetRequiredService<CgroupContainerResolver>();
|
||||
var writer = sp.GetRequiredService<RuntimeEvidenceNdjsonWriter>();
|
||||
return new RuntimeEvidenceCollector(
|
||||
logger,
|
||||
probeLoader,
|
||||
eventParser,
|
||||
cgroupResolver,
|
||||
writer,
|
||||
options.CollectorOptions);
|
||||
});
|
||||
|
||||
// Register the legacy IRuntimeSignalCollector adapter
|
||||
services.AddSingleton<IRuntimeSignalCollector>(sp =>
|
||||
{
|
||||
var logger = sp.GetRequiredService<ILogger<RuntimeSignalCollector>>();
|
||||
var probeLoader = sp.GetRequiredService<IEbpfProbeLoader>();
|
||||
return new RuntimeSignalCollector(logger, probeLoader);
|
||||
});
|
||||
|
||||
return services;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Adds eBPF runtime evidence collection with air-gap mode enabled.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// Air-gap mode uses offline probe loading without network dependencies.
|
||||
/// </remarks>
|
||||
public static IServiceCollection AddEbpfRuntimeEvidenceAirGap(
|
||||
this IServiceCollection services,
|
||||
Action<EbpfEvidenceOptions>? configureOptions = null)
|
||||
{
|
||||
return services.AddEbpfRuntimeEvidence(options =>
|
||||
{
|
||||
options.UseAirGapMode = true;
|
||||
configureOptions?.Invoke(options);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Options for eBPF evidence collection services.
|
||||
/// </summary>
|
||||
public sealed class EbpfEvidenceOptions
|
||||
{
|
||||
/// <summary>
|
||||
/// Path to the /proc filesystem (default: /proc).
|
||||
/// </summary>
|
||||
public string ProcRoot { get; set; } = "/proc";
|
||||
|
||||
/// <summary>
|
||||
/// Path to the cgroup filesystem (default: /sys/fs/cgroup).
|
||||
/// </summary>
|
||||
public string CgroupRoot { get; set; } = "/sys/fs/cgroup";
|
||||
|
||||
/// <summary>
|
||||
/// Directory containing compiled BPF probe objects.
|
||||
/// </summary>
|
||||
public string? ProbeDirectory { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Directory for NDJSON evidence output.
|
||||
/// </summary>
|
||||
public string OutputDirectory { get; set; } = "/var/lib/stellaops/evidence";
|
||||
|
||||
/// <summary>
|
||||
/// Whether to use air-gap mode (offline probe loading).
|
||||
/// </summary>
|
||||
public bool UseAirGapMode { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Maximum size of the symbol resolution cache.
|
||||
/// </summary>
|
||||
public long SymbolCacheSizeLimit { get; set; } = 100000;
|
||||
|
||||
/// <summary>
|
||||
/// NDJSON writer options.
|
||||
/// </summary>
|
||||
public NdjsonWriterOptions WriterOptions { get; set; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Collector options.
|
||||
/// </summary>
|
||||
public RuntimeEvidenceCollectorOptions CollectorOptions { get; set; } = new();
|
||||
}
|
||||
@@ -0,0 +1,472 @@
|
||||
// <copyright file="RuntimeEvidenceCollector.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Services;
|
||||
|
||||
using System.Collections.Concurrent;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Threading.Channels;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.Signals.Ebpf.Cgroup;
|
||||
using StellaOps.Signals.Ebpf.Output;
|
||||
using StellaOps.Signals.Ebpf.Parsers;
|
||||
using StellaOps.Signals.Ebpf.Probes;
|
||||
using StellaOps.Signals.Ebpf.Schema;
|
||||
using StellaOps.Signals.Ebpf.Symbols;
|
||||
|
||||
/// <summary>
|
||||
/// Unified runtime evidence collector that integrates syscall tracepoints,
|
||||
/// uprobes, and symbol-level observations into a single evidence stream.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// This service coordinates:
|
||||
/// - eBPF probe loading and attachment
|
||||
/// - Binary event parsing (EventParser)
|
||||
/// - Container/cgroup resolution (CgroupContainerResolver)
|
||||
/// - Symbol resolution from ELF (EnhancedSymbolResolver)
|
||||
/// - Deterministic NDJSON output (RuntimeEvidenceNdjsonWriter)
|
||||
/// </remarks>
|
||||
public sealed class RuntimeEvidenceCollector : IAsyncDisposable
|
||||
{
|
||||
private readonly ILogger<RuntimeEvidenceCollector> _logger;
|
||||
private readonly IEbpfProbeLoader _probeLoader;
|
||||
private readonly EventParser _eventParser;
|
||||
private readonly CgroupContainerResolver _cgroupResolver;
|
||||
private readonly RuntimeEvidenceNdjsonWriter _writer;
|
||||
private readonly RuntimeEvidenceCollectorOptions _options;
|
||||
private readonly ConcurrentDictionary<Guid, CollectionSession> _sessions;
|
||||
private readonly SemaphoreSlim _sessionLock = new(1, 1);
|
||||
private bool _disposed;
|
||||
|
||||
public RuntimeEvidenceCollector(
|
||||
ILogger<RuntimeEvidenceCollector> logger,
|
||||
IEbpfProbeLoader probeLoader,
|
||||
EventParser eventParser,
|
||||
CgroupContainerResolver cgroupResolver,
|
||||
RuntimeEvidenceNdjsonWriter writer,
|
||||
RuntimeEvidenceCollectorOptions? options = null)
|
||||
{
|
||||
_logger = logger;
|
||||
_probeLoader = probeLoader;
|
||||
_eventParser = eventParser;
|
||||
_cgroupResolver = cgroupResolver;
|
||||
_writer = writer;
|
||||
_options = options ?? new RuntimeEvidenceCollectorOptions();
|
||||
_sessions = new();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Event raised when an evidence chunk is completed and signed.
|
||||
/// </summary>
|
||||
public event Func<EvidenceChunkCompletedEventArgs, CancellationToken, Task>? ChunkCompleted;
|
||||
|
||||
/// <summary>
|
||||
/// Start collecting runtime evidence for a container.
|
||||
/// </summary>
|
||||
/// <param name="containerId">Container ID to attach probes to.</param>
|
||||
/// <param name="options">Collection options.</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
/// <returns>Handle to the collection session.</returns>
|
||||
public async Task<EvidenceCollectionHandle> StartCollectionAsync(
|
||||
string containerId,
|
||||
RuntimeSignalOptions options,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
ObjectDisposedException.ThrowIf(_disposed, this);
|
||||
|
||||
await _sessionLock.WaitAsync(ct);
|
||||
try
|
||||
{
|
||||
var sessionId = Guid.NewGuid();
|
||||
|
||||
_logger.LogInformation(
|
||||
"Starting runtime evidence collection for container {ContainerId}, session {SessionId}",
|
||||
containerId,
|
||||
sessionId);
|
||||
|
||||
// Load and attach probes
|
||||
var probeHandle = await _probeLoader.LoadAndAttachAsync(containerId, options, ct);
|
||||
|
||||
// Create collection session
|
||||
var session = new CollectionSession
|
||||
{
|
||||
SessionId = sessionId,
|
||||
ContainerId = containerId,
|
||||
ProbeHandle = probeHandle,
|
||||
Options = options,
|
||||
StartedAt = DateTimeOffset.UtcNow,
|
||||
EventChannel = Channel.CreateBounded<RuntimeEvidenceRecord>(
|
||||
new BoundedChannelOptions(_options.EventChannelCapacity)
|
||||
{
|
||||
FullMode = BoundedChannelFullMode.DropOldest,
|
||||
SingleReader = true,
|
||||
SingleWriter = false,
|
||||
}),
|
||||
CancellationSource = CancellationTokenSource.CreateLinkedTokenSource(ct),
|
||||
};
|
||||
|
||||
_sessions[sessionId] = session;
|
||||
|
||||
// Start background event processing
|
||||
session.ProcessingTask = ProcessEventsAsync(session, session.CancellationSource.Token);
|
||||
|
||||
return new EvidenceCollectionHandle
|
||||
{
|
||||
SessionId = sessionId,
|
||||
ContainerId = containerId,
|
||||
StartedAt = session.StartedAt,
|
||||
Options = options,
|
||||
};
|
||||
}
|
||||
finally
|
||||
{
|
||||
_sessionLock.Release();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Stop collecting and return collection statistics.
|
||||
/// </summary>
|
||||
public async Task<EvidenceCollectionSummary> StopCollectionAsync(
|
||||
EvidenceCollectionHandle handle,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
ObjectDisposedException.ThrowIf(_disposed, this);
|
||||
|
||||
if (!_sessions.TryRemove(handle.SessionId, out var session))
|
||||
{
|
||||
throw new InvalidOperationException($"Session {handle.SessionId} not found");
|
||||
}
|
||||
|
||||
_logger.LogInformation(
|
||||
"Stopping evidence collection for session {SessionId}",
|
||||
handle.SessionId);
|
||||
|
||||
// Signal cancellation and wait for processing to complete
|
||||
await session.CancellationSource.CancelAsync();
|
||||
|
||||
try
|
||||
{
|
||||
await session.ProcessingTask.WaitAsync(TimeSpan.FromSeconds(10), ct);
|
||||
}
|
||||
catch (TimeoutException)
|
||||
{
|
||||
_logger.LogWarning("Event processing task did not complete in time for session {SessionId}", handle.SessionId);
|
||||
}
|
||||
|
||||
// Detach probes
|
||||
await _probeLoader.DetachAsync(session.ProbeHandle, ct);
|
||||
|
||||
// Flush writer
|
||||
await _writer.FlushAsync(ct);
|
||||
|
||||
var summary = new EvidenceCollectionSummary
|
||||
{
|
||||
SessionId = session.SessionId,
|
||||
ContainerId = session.ContainerId,
|
||||
StartedAt = session.StartedAt,
|
||||
StoppedAt = DateTimeOffset.UtcNow,
|
||||
TotalEvents = session.TotalEvents,
|
||||
ProcessedEvents = session.ProcessedEvents,
|
||||
DroppedEvents = session.DroppedEvents,
|
||||
ChunksWritten = session.ChunksWritten,
|
||||
};
|
||||
|
||||
// Cleanup
|
||||
session.CancellationSource.Dispose();
|
||||
|
||||
return summary;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get current collection statistics.
|
||||
/// </summary>
|
||||
public Task<EvidenceCollectionStats> GetStatsAsync(
|
||||
EvidenceCollectionHandle handle,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
ObjectDisposedException.ThrowIf(_disposed, this);
|
||||
|
||||
if (!_sessions.TryGetValue(handle.SessionId, out var session))
|
||||
{
|
||||
throw new InvalidOperationException($"Session {handle.SessionId} not found");
|
||||
}
|
||||
|
||||
var stats = new EvidenceCollectionStats
|
||||
{
|
||||
TotalEvents = session.TotalEvents,
|
||||
ProcessedEvents = session.ProcessedEvents,
|
||||
DroppedEvents = session.DroppedEvents,
|
||||
EventsPerSecond = CalculateEventsPerSecond(session),
|
||||
BufferUtilization = _probeLoader.GetBufferUtilization(session.ProbeHandle),
|
||||
CpuOverhead = _probeLoader.GetCpuOverhead(session.ProbeHandle),
|
||||
MemoryUsage = _probeLoader.GetMemoryUsage(session.ProbeHandle),
|
||||
};
|
||||
|
||||
return Task.FromResult(stats);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Stream evidence records in real-time.
|
||||
/// </summary>
|
||||
public async IAsyncEnumerable<RuntimeEvidenceRecord> StreamEvidenceAsync(
|
||||
EvidenceCollectionHandle handle,
|
||||
[EnumeratorCancellation] CancellationToken ct = default)
|
||||
{
|
||||
ObjectDisposedException.ThrowIf(_disposed, this);
|
||||
|
||||
if (!_sessions.TryGetValue(handle.SessionId, out var session))
|
||||
{
|
||||
yield break;
|
||||
}
|
||||
|
||||
await foreach (var record in session.EventChannel.Reader.ReadAllAsync(ct))
|
||||
{
|
||||
yield return record;
|
||||
}
|
||||
}
|
||||
|
||||
private async Task ProcessEventsAsync(CollectionSession session, CancellationToken ct)
|
||||
{
|
||||
try
|
||||
{
|
||||
// Subscribe to chunk rotation events
|
||||
_writer.ChunkRotated += async (args, ct) =>
|
||||
{
|
||||
Interlocked.Increment(ref session.ChunksWritten);
|
||||
if (ChunkCompleted != null)
|
||||
{
|
||||
await ChunkCompleted(new EvidenceChunkCompletedEventArgs
|
||||
{
|
||||
SessionId = session.SessionId,
|
||||
ContainerId = session.ContainerId,
|
||||
ChunkPath = args.Statistics.FilePath!,
|
||||
EventCount = args.Statistics.EventCount,
|
||||
Size = args.Statistics.Size,
|
||||
ContentHash = args.Statistics.ContentHash,
|
||||
PreviousHash = args.PreviousChunkHash,
|
||||
}, ct);
|
||||
}
|
||||
};
|
||||
|
||||
await foreach (var rawEvent in _probeLoader.ReadEventsAsync(session.ProbeHandle, ct))
|
||||
{
|
||||
Interlocked.Increment(ref session.TotalEvents);
|
||||
|
||||
// Parse the raw event
|
||||
var record = _eventParser.Parse(rawEvent.Span);
|
||||
if (record == null)
|
||||
{
|
||||
Interlocked.Increment(ref session.DroppedEvents);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Enrich with container identity
|
||||
var enrichedRecord = EnrichRecord(record, session);
|
||||
|
||||
Interlocked.Increment(ref session.ProcessedEvents);
|
||||
|
||||
// Write to NDJSON output
|
||||
await _writer.WriteAsync(enrichedRecord, ct);
|
||||
|
||||
// Push to channel for streaming consumers
|
||||
session.EventChannel.Writer.TryWrite(enrichedRecord);
|
||||
}
|
||||
}
|
||||
catch (OperationCanceledException)
|
||||
{
|
||||
// Normal cancellation
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogError(ex, "Error processing events for session {SessionId}", session.SessionId);
|
||||
}
|
||||
finally
|
||||
{
|
||||
session.EventChannel.Writer.TryComplete();
|
||||
}
|
||||
}
|
||||
|
||||
private RuntimeEvidenceRecord EnrichRecord(RuntimeEvidenceRecord record, CollectionSession session)
|
||||
{
|
||||
// Resolve container identity from cgroup ID
|
||||
string? containerId = record.ContainerId;
|
||||
string? imageDigest = record.ImageDigest;
|
||||
|
||||
if (containerId == null && record.CgroupId > 0)
|
||||
{
|
||||
var identity = _cgroupResolver.ResolveByCgroupId(record.CgroupId);
|
||||
if (identity == null)
|
||||
{
|
||||
// Try by PID
|
||||
identity = _cgroupResolver.ResolveByPid(record.Pid);
|
||||
}
|
||||
|
||||
if (identity != null)
|
||||
{
|
||||
containerId = identity.ContainerId;
|
||||
imageDigest = identity.ImageDigest;
|
||||
|
||||
// Cache for future lookups
|
||||
_cgroupResolver.RegisterCgroupMapping(record.CgroupId, identity);
|
||||
}
|
||||
}
|
||||
|
||||
// Return enriched record if we found container info
|
||||
if (containerId != null || imageDigest != null)
|
||||
{
|
||||
return record with
|
||||
{
|
||||
ContainerId = containerId,
|
||||
ImageDigest = imageDigest,
|
||||
};
|
||||
}
|
||||
|
||||
return record;
|
||||
}
|
||||
|
||||
private static double CalculateEventsPerSecond(CollectionSession session)
|
||||
{
|
||||
var duration = DateTimeOffset.UtcNow - session.StartedAt;
|
||||
if (duration.TotalSeconds < 1)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
return session.ProcessedEvents / duration.TotalSeconds;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async ValueTask DisposeAsync()
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
_disposed = true;
|
||||
|
||||
// Stop all active sessions
|
||||
foreach (var session in _sessions.Values)
|
||||
{
|
||||
try
|
||||
{
|
||||
await session.CancellationSource.CancelAsync();
|
||||
await _probeLoader.DetachAsync(session.ProbeHandle);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogWarning(ex, "Error disposing session {SessionId}", session.SessionId);
|
||||
}
|
||||
finally
|
||||
{
|
||||
session.CancellationSource.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
_sessions.Clear();
|
||||
|
||||
await _writer.DisposeAsync();
|
||||
_cgroupResolver.Dispose();
|
||||
|
||||
_sessionLock.Dispose();
|
||||
}
|
||||
|
||||
private sealed class CollectionSession
|
||||
{
|
||||
public required Guid SessionId { get; init; }
|
||||
public required string ContainerId { get; init; }
|
||||
public required EbpfProbeHandle ProbeHandle { get; init; }
|
||||
public required RuntimeSignalOptions Options { get; init; }
|
||||
public required DateTimeOffset StartedAt { get; init; }
|
||||
public required Channel<RuntimeEvidenceRecord> EventChannel { get; init; }
|
||||
public required CancellationTokenSource CancellationSource { get; init; }
|
||||
public Task ProcessingTask { get; set; } = Task.CompletedTask;
|
||||
public long TotalEvents;
|
||||
public long ProcessedEvents;
|
||||
public long DroppedEvents;
|
||||
public long ChunksWritten;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Options for the runtime evidence collector.
|
||||
/// </summary>
|
||||
public sealed record RuntimeEvidenceCollectorOptions
|
||||
{
|
||||
/// <summary>
|
||||
/// Capacity of the internal event channel.
|
||||
/// </summary>
|
||||
public int EventChannelCapacity { get; init; } = 10000;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Handle to an active evidence collection session.
|
||||
/// </summary>
|
||||
public sealed record EvidenceCollectionHandle
|
||||
{
|
||||
/// <summary>
|
||||
/// Unique session identifier.
|
||||
/// </summary>
|
||||
public required Guid SessionId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Container ID being monitored.
|
||||
/// </summary>
|
||||
public required string ContainerId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// When collection started.
|
||||
/// </summary>
|
||||
public required DateTimeOffset StartedAt { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Options used for this session.
|
||||
/// </summary>
|
||||
public required RuntimeSignalOptions Options { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Summary of completed evidence collection.
|
||||
/// </summary>
|
||||
public sealed record EvidenceCollectionSummary
|
||||
{
|
||||
public required Guid SessionId { get; init; }
|
||||
public required string ContainerId { get; init; }
|
||||
public required DateTimeOffset StartedAt { get; init; }
|
||||
public required DateTimeOffset StoppedAt { get; init; }
|
||||
public required long TotalEvents { get; init; }
|
||||
public required long ProcessedEvents { get; init; }
|
||||
public required long DroppedEvents { get; init; }
|
||||
public required long ChunksWritten { get; init; }
|
||||
public TimeSpan Duration => StoppedAt - StartedAt;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Current collection statistics.
|
||||
/// </summary>
|
||||
public sealed record EvidenceCollectionStats
|
||||
{
|
||||
public required long TotalEvents { get; init; }
|
||||
public required long ProcessedEvents { get; init; }
|
||||
public required long DroppedEvents { get; init; }
|
||||
public required double EventsPerSecond { get; init; }
|
||||
public required double BufferUtilization { get; init; }
|
||||
public required double CpuOverhead { get; init; }
|
||||
public required long MemoryUsage { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Event args for evidence chunk completion.
|
||||
/// </summary>
|
||||
public sealed record EvidenceChunkCompletedEventArgs
|
||||
{
|
||||
public required Guid SessionId { get; init; }
|
||||
public required string ContainerId { get; init; }
|
||||
public required string ChunkPath { get; init; }
|
||||
public required long EventCount { get; init; }
|
||||
public required long Size { get; init; }
|
||||
public string? ContentHash { get; init; }
|
||||
public string? PreviousHash { get; init; }
|
||||
}
|
||||
@@ -0,0 +1,326 @@
|
||||
// <copyright file="AttestorEvidenceChunkSigner.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Signing;
|
||||
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using System.Text.Json.Serialization;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.Attestor.Core.Rekor;
|
||||
using StellaOps.Attestor.Core.Signing;
|
||||
using StellaOps.Attestor.Core.Submission;
|
||||
|
||||
/// <summary>
|
||||
/// Production implementation of evidence chunk signer using Attestor services.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// Integrates with:
|
||||
/// - <see cref="IAttestationSigningService"/> for DSSE signing
|
||||
/// - <see cref="IRekorClient"/> for transparency log submission
|
||||
/// </remarks>
|
||||
public sealed class AttestorEvidenceChunkSigner : IEvidenceChunkSigner
|
||||
{
|
||||
private readonly ILogger<AttestorEvidenceChunkSigner> _logger;
|
||||
private readonly IAttestationSigningService _signingService;
|
||||
private readonly IRekorClient _rekorClient;
|
||||
private readonly RekorBackend _rekorBackend;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
private readonly JsonSerializerOptions _jsonOptions;
|
||||
|
||||
/// <summary>
|
||||
/// Predicate type URI for runtime evidence.
|
||||
/// </summary>
|
||||
public const string PredicateType = "stella.ops/runtime-evidence@v1";
|
||||
|
||||
/// <summary>
|
||||
/// DSSE payload type for in-toto statements.
|
||||
/// </summary>
|
||||
public const string PayloadType = "application/vnd.in-toto+json";
|
||||
|
||||
public AttestorEvidenceChunkSigner(
|
||||
ILogger<AttestorEvidenceChunkSigner> logger,
|
||||
IAttestationSigningService signingService,
|
||||
IRekorClient rekorClient,
|
||||
RekorBackend rekorBackend,
|
||||
TimeProvider? timeProvider = null)
|
||||
{
|
||||
_logger = logger;
|
||||
_signingService = signingService;
|
||||
_rekorClient = rekorClient;
|
||||
_rekorBackend = rekorBackend;
|
||||
_timeProvider = timeProvider ?? TimeProvider.System;
|
||||
_jsonOptions = new JsonSerializerOptions
|
||||
{
|
||||
PropertyNamingPolicy = JsonNamingPolicy.SnakeCaseLower,
|
||||
DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull,
|
||||
WriteIndented = false,
|
||||
};
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<EvidenceChunkSignResult> SignAsync(
|
||||
EvidenceChunkSignRequest request,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
var stats = request.Statistics;
|
||||
var now = _timeProvider.GetUtcNow();
|
||||
|
||||
// Build predicate
|
||||
var predicate = new RuntimeEvidencePredicate
|
||||
{
|
||||
ChunkId = stats.ContentHash ?? $"sha256:{ComputeFileHash(stats.FilePath)}",
|
||||
ChunkSequence = stats.ChunkSequence,
|
||||
PreviousChunkId = request.PreviousChunkHash,
|
||||
EventCount = stats.EventCount,
|
||||
TimeRange = new EvidenceTimeRange
|
||||
{
|
||||
Start = stats.StartTime,
|
||||
End = stats.StartTime + stats.Duration,
|
||||
},
|
||||
CollectorVersion = request.CollectorVersion,
|
||||
KernelVersion = request.KernelVersion,
|
||||
Compression = stats.FilePath?.EndsWith(".gz", StringComparison.OrdinalIgnoreCase) == true ? "gzip" : null,
|
||||
HostId = request.HostId,
|
||||
ContainerIds = request.ContainerIds,
|
||||
};
|
||||
|
||||
// Build in-toto statement
|
||||
var statement = new InTotoStatement
|
||||
{
|
||||
Type = "https://in-toto.io/Statement/v0.1",
|
||||
PredicateType = PredicateType,
|
||||
Subject = new[]
|
||||
{
|
||||
new InTotoSubject
|
||||
{
|
||||
Name = stats.FilePath ?? "unknown",
|
||||
Digest = new Dictionary<string, string>
|
||||
{
|
||||
["sha256"] = ExtractHashHex(predicate.ChunkId),
|
||||
},
|
||||
},
|
||||
},
|
||||
Predicate = predicate,
|
||||
};
|
||||
|
||||
// Serialize statement to JSON
|
||||
var statementJson = JsonSerializer.Serialize(statement, _jsonOptions);
|
||||
var statementBase64 = Convert.ToBase64String(Encoding.UTF8.GetBytes(statementJson));
|
||||
|
||||
_logger.LogDebug(
|
||||
"Signing chunk {ChunkSequence}: {ChunkId}",
|
||||
stats.ChunkSequence,
|
||||
predicate.ChunkId);
|
||||
|
||||
// Sign via Attestor service
|
||||
var signRequest = new AttestationSignRequest
|
||||
{
|
||||
KeyId = request.KeyId,
|
||||
PayloadType = PayloadType,
|
||||
PayloadBase64 = statementBase64,
|
||||
Artifact = new AttestorSubmissionRequest.ArtifactInfo
|
||||
{
|
||||
Sha256 = ExtractHashHex(predicate.ChunkId),
|
||||
Kind = "runtime-evidence",
|
||||
SubjectUri = stats.FilePath,
|
||||
},
|
||||
LogPreference = "primary",
|
||||
Archive = true,
|
||||
};
|
||||
|
||||
var context = new SubmissionContext
|
||||
{
|
||||
CallerSubject = "signals-collector",
|
||||
CallerAudience = "attestor",
|
||||
CallerClientId = "signals",
|
||||
CallerTenant = null,
|
||||
};
|
||||
|
||||
var signResult = await _signingService.SignAsync(signRequest, context, ct);
|
||||
|
||||
// Build DSSE envelope for result
|
||||
var envelope = new DsseEnvelopeDto
|
||||
{
|
||||
PayloadType = signResult.Bundle.Dsse.PayloadType,
|
||||
Payload = signResult.Bundle.Dsse.PayloadBase64,
|
||||
Signatures = signResult.Bundle.Dsse.Signatures
|
||||
.Select(s => new DsseSignatureDto { KeyId = s.KeyId, Sig = s.Signature })
|
||||
.ToArray(),
|
||||
};
|
||||
|
||||
var envelopeJson = JsonSerializer.Serialize(envelope, _jsonOptions);
|
||||
var envelopeBase64 = Convert.ToBase64String(Encoding.UTF8.GetBytes(envelopeJson));
|
||||
|
||||
string? rekorUuid = null;
|
||||
long? rekorLogIndex = null;
|
||||
string? rekorInclusionProof = null;
|
||||
|
||||
// Submit to Rekor if requested
|
||||
if (request.SubmitToRekor)
|
||||
{
|
||||
try
|
||||
{
|
||||
var submissionRequest = new AttestorSubmissionRequest
|
||||
{
|
||||
Bundle = signResult.Bundle,
|
||||
Meta = signResult.Meta,
|
||||
};
|
||||
|
||||
var rekorResponse = await _rekorClient.SubmitAsync(
|
||||
submissionRequest,
|
||||
_rekorBackend,
|
||||
ct);
|
||||
|
||||
rekorUuid = rekorResponse.Uuid;
|
||||
rekorLogIndex = rekorResponse.Index;
|
||||
|
||||
if (rekorResponse.Proof != null)
|
||||
{
|
||||
rekorInclusionProof = JsonSerializer.Serialize(rekorResponse.Proof, _jsonOptions);
|
||||
}
|
||||
|
||||
_logger.LogInformation(
|
||||
"Chunk {ChunkSequence} submitted to Rekor: UUID={Uuid}, Index={Index}",
|
||||
stats.ChunkSequence,
|
||||
rekorUuid,
|
||||
rekorLogIndex);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogWarning(
|
||||
ex,
|
||||
"Failed to submit chunk {ChunkSequence} to Rekor",
|
||||
stats.ChunkSequence);
|
||||
}
|
||||
}
|
||||
|
||||
return new EvidenceChunkSignResult
|
||||
{
|
||||
Statistics = stats,
|
||||
Predicate = predicate,
|
||||
DsseEnvelopeBase64 = envelopeBase64,
|
||||
RekorUuid = rekorUuid,
|
||||
RekorLogIndex = rekorLogIndex,
|
||||
RekorInclusionProof = rekorInclusionProof,
|
||||
SignedAt = now,
|
||||
KeyId = request.KeyId,
|
||||
};
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<bool> VerifyAsync(EvidenceChunkSignResult result, CancellationToken ct = default)
|
||||
{
|
||||
// Verify Rekor inclusion if we have a UUID
|
||||
if (!string.IsNullOrEmpty(result.RekorUuid))
|
||||
{
|
||||
try
|
||||
{
|
||||
var payloadDigest = SHA256.HashData(
|
||||
Convert.FromBase64String(result.DsseEnvelopeBase64));
|
||||
|
||||
var verification = await _rekorClient.VerifyInclusionAsync(
|
||||
result.RekorUuid,
|
||||
payloadDigest,
|
||||
_rekorBackend,
|
||||
ct);
|
||||
|
||||
if (!verification.Verified)
|
||||
{
|
||||
_logger.LogWarning(
|
||||
"Rekor inclusion verification failed for chunk {ChunkSequence}: {Reason}",
|
||||
result.Predicate.ChunkSequence,
|
||||
verification.FailureReason);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogWarning(
|
||||
ex,
|
||||
"Rekor verification failed for chunk {ChunkSequence}",
|
||||
result.Predicate.ChunkSequence);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// No Rekor UUID - can't verify transparency log inclusion
|
||||
_logger.LogDebug(
|
||||
"Chunk {ChunkSequence} has no Rekor UUID, skipping inclusion verification",
|
||||
result.Predicate.ChunkSequence);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private static string ComputeFileHash(string? filePath)
|
||||
{
|
||||
if (string.IsNullOrEmpty(filePath) || !File.Exists(filePath))
|
||||
{
|
||||
return new string('0', 64);
|
||||
}
|
||||
|
||||
using var stream = File.OpenRead(filePath);
|
||||
var hash = SHA256.HashData(stream);
|
||||
return Convert.ToHexString(hash).ToLowerInvariant();
|
||||
}
|
||||
|
||||
private static string ExtractHashHex(string chunkId)
|
||||
{
|
||||
if (chunkId.StartsWith("sha256:", StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
return chunkId[7..];
|
||||
}
|
||||
|
||||
return chunkId;
|
||||
}
|
||||
|
||||
// DTOs for JSON serialization
|
||||
private sealed record InTotoStatement
|
||||
{
|
||||
[JsonPropertyName("_type")]
|
||||
public required string Type { get; init; }
|
||||
|
||||
[JsonPropertyName("predicateType")]
|
||||
public required string PredicateType { get; init; }
|
||||
|
||||
[JsonPropertyName("subject")]
|
||||
public required InTotoSubject[] Subject { get; init; }
|
||||
|
||||
[JsonPropertyName("predicate")]
|
||||
public required RuntimeEvidencePredicate Predicate { get; init; }
|
||||
}
|
||||
|
||||
private sealed record InTotoSubject
|
||||
{
|
||||
[JsonPropertyName("name")]
|
||||
public required string Name { get; init; }
|
||||
|
||||
[JsonPropertyName("digest")]
|
||||
public required Dictionary<string, string> Digest { get; init; }
|
||||
}
|
||||
|
||||
private sealed record DsseEnvelopeDto
|
||||
{
|
||||
[JsonPropertyName("payloadType")]
|
||||
public required string PayloadType { get; init; }
|
||||
|
||||
[JsonPropertyName("payload")]
|
||||
public required string Payload { get; init; }
|
||||
|
||||
[JsonPropertyName("signatures")]
|
||||
public required DsseSignatureDto[] Signatures { get; init; }
|
||||
}
|
||||
|
||||
private sealed record DsseSignatureDto
|
||||
{
|
||||
[JsonPropertyName("keyid")]
|
||||
public string? KeyId { get; init; }
|
||||
|
||||
[JsonPropertyName("sig")]
|
||||
public required string Sig { get; init; }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,429 @@
|
||||
// <copyright file="EvidenceChunkFinalizer.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Signing;
|
||||
|
||||
using System.Collections.Concurrent;
|
||||
using System.Text.Json;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.Signals.Ebpf.Output;
|
||||
|
||||
/// <summary>
|
||||
/// Finalizes evidence chunks by signing them and maintaining chain integrity.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// Data flow:
|
||||
/// <code>
|
||||
/// ChunkRotated Event
|
||||
/// ↓
|
||||
/// Build RuntimeEvidencePredicate
|
||||
/// ↓
|
||||
/// Sign with IEvidenceChunkSigner (DSSE envelope)
|
||||
/// ↓
|
||||
/// Submit to Rekor (optional)
|
||||
/// ↓
|
||||
/// Update Chain State (previous_chunk_id linking)
|
||||
/// ↓
|
||||
/// Emit ChunkFinalized Event
|
||||
/// </code>
|
||||
/// </remarks>
|
||||
public sealed class EvidenceChunkFinalizer : IAsyncDisposable
|
||||
{
|
||||
private readonly ILogger<EvidenceChunkFinalizer> _logger;
|
||||
private readonly IEvidenceChunkSigner _signer;
|
||||
private readonly EvidenceChunkFinalizerOptions _options;
|
||||
private readonly ConcurrentDictionary<string, ChainState> _chainStates;
|
||||
private readonly SemaphoreSlim _signLock = new(1, 1);
|
||||
private bool _disposed;
|
||||
|
||||
/// <summary>
|
||||
/// Event raised when a chunk is finalized (signed and optionally logged).
|
||||
/// </summary>
|
||||
public event Func<ChunkFinalizedEventArgs, CancellationToken, Task>? ChunkFinalized;
|
||||
|
||||
public EvidenceChunkFinalizer(
|
||||
ILogger<EvidenceChunkFinalizer> logger,
|
||||
IEvidenceChunkSigner signer,
|
||||
EvidenceChunkFinalizerOptions? options = null)
|
||||
{
|
||||
_logger = logger;
|
||||
_signer = signer;
|
||||
_options = options ?? new EvidenceChunkFinalizerOptions();
|
||||
_chainStates = new(StringComparer.OrdinalIgnoreCase);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Finalize a rotated chunk by signing it and updating chain state.
|
||||
/// </summary>
|
||||
/// <param name="args">Chunk rotation event args.</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
/// <returns>Sign result with DSSE envelope and chain metadata.</returns>
|
||||
public async Task<EvidenceChunkSignResult> FinalizeChunkAsync(
|
||||
ChunkRotatedEventArgs args,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
ObjectDisposedException.ThrowIf(_disposed, this);
|
||||
|
||||
await _signLock.WaitAsync(ct);
|
||||
try
|
||||
{
|
||||
var stats = args.Statistics;
|
||||
var chainKey = GetChainKey(stats.FilePath);
|
||||
|
||||
// Get or create chain state
|
||||
var chainState = _chainStates.GetOrAdd(chainKey, _ => new ChainState());
|
||||
|
||||
_logger.LogInformation(
|
||||
"Finalizing chunk {ChunkSequence} for chain {ChainKey}: {FilePath}",
|
||||
stats.ChunkSequence,
|
||||
chainKey,
|
||||
stats.FilePath);
|
||||
|
||||
// Build sign request
|
||||
var request = new EvidenceChunkSignRequest
|
||||
{
|
||||
Statistics = stats,
|
||||
PreviousChunkHash = chainState.LastChunkHash,
|
||||
KeyId = _options.SigningKeyId,
|
||||
CollectorVersion = _options.CollectorVersion,
|
||||
KernelVersion = _options.KernelVersion,
|
||||
SubmitToRekor = _options.SubmitToRekor,
|
||||
HostId = _options.HostId,
|
||||
};
|
||||
|
||||
// Sign the chunk
|
||||
var result = await _signer.SignAsync(request, ct);
|
||||
|
||||
// Update chain state
|
||||
chainState.LastChunkHash = stats.ContentHash;
|
||||
chainState.LastChunkSequence = stats.ChunkSequence;
|
||||
chainState.TotalChunks++;
|
||||
chainState.TotalEvents += stats.EventCount;
|
||||
|
||||
_logger.LogInformation(
|
||||
"Chunk {ChunkSequence} finalized: {ChunkId}, Rekor: {RekorUuid}",
|
||||
stats.ChunkSequence,
|
||||
result.Predicate.ChunkId,
|
||||
result.RekorUuid ?? "not submitted");
|
||||
|
||||
// Save chain state to file for recovery
|
||||
await SaveChainStateAsync(chainKey, chainState, ct);
|
||||
|
||||
// Notify listeners
|
||||
await NotifyChunkFinalizedAsync(result, chainState, ct);
|
||||
|
||||
return result;
|
||||
}
|
||||
finally
|
||||
{
|
||||
_signLock.Release();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verify chain integrity from a starting point.
|
||||
/// </summary>
|
||||
/// <param name="results">Signed chunk results to verify.</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
/// <returns>Verification result.</returns>
|
||||
public async Task<ChainVerificationResult> VerifyChainAsync(
|
||||
IReadOnlyList<EvidenceChunkSignResult> results,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
ObjectDisposedException.ThrowIf(_disposed, this);
|
||||
|
||||
if (results.Count == 0)
|
||||
{
|
||||
return new ChainVerificationResult
|
||||
{
|
||||
IsValid = true,
|
||||
VerifiedChunks = 0,
|
||||
Errors = [],
|
||||
};
|
||||
}
|
||||
|
||||
var errors = new List<ChainVerificationError>();
|
||||
string? expectedPreviousHash = null;
|
||||
int expectedSequence = results[0].Predicate.ChunkSequence;
|
||||
EvidenceChunkSignResult? previousResult = null;
|
||||
|
||||
foreach (var result in results)
|
||||
{
|
||||
// Verify signature
|
||||
if (!await _signer.VerifyAsync(result, ct))
|
||||
{
|
||||
errors.Add(new ChainVerificationError
|
||||
{
|
||||
ChunkSequence = result.Predicate.ChunkSequence,
|
||||
ErrorType = "signature_invalid",
|
||||
Message = $"Signature verification failed for chunk {result.Predicate.ChunkSequence}",
|
||||
});
|
||||
}
|
||||
|
||||
// Verify chain linkage
|
||||
if (expectedPreviousHash != null && result.Predicate.PreviousChunkId != expectedPreviousHash)
|
||||
{
|
||||
errors.Add(new ChainVerificationError
|
||||
{
|
||||
ChunkSequence = result.Predicate.ChunkSequence,
|
||||
ErrorType = "chain_broken",
|
||||
Message = $"Expected previous_chunk_id {expectedPreviousHash}, got {result.Predicate.PreviousChunkId}",
|
||||
});
|
||||
}
|
||||
|
||||
// Verify sequence continuity
|
||||
if (result.Predicate.ChunkSequence != expectedSequence)
|
||||
{
|
||||
errors.Add(new ChainVerificationError
|
||||
{
|
||||
ChunkSequence = result.Predicate.ChunkSequence,
|
||||
ErrorType = "sequence_gap",
|
||||
Message = $"Expected sequence {expectedSequence}, got {result.Predicate.ChunkSequence}",
|
||||
});
|
||||
}
|
||||
|
||||
// Verify time monotonicity
|
||||
if (previousResult != null)
|
||||
{
|
||||
if (result.Predicate.TimeRange.Start < previousResult.Predicate.TimeRange.End)
|
||||
{
|
||||
errors.Add(new ChainVerificationError
|
||||
{
|
||||
ChunkSequence = result.Predicate.ChunkSequence,
|
||||
ErrorType = "time_overlap",
|
||||
Message = $"Time range overlaps with previous chunk",
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
expectedPreviousHash = result.Predicate.ChunkId;
|
||||
expectedSequence++;
|
||||
previousResult = result;
|
||||
}
|
||||
|
||||
return new ChainVerificationResult
|
||||
{
|
||||
IsValid = errors.Count == 0,
|
||||
VerifiedChunks = results.Count,
|
||||
Errors = errors,
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Load chain state from disk for recovery after restart.
|
||||
/// </summary>
|
||||
/// <param name="chainKey">Chain identifier.</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
public async Task LoadChainStateAsync(string chainKey, CancellationToken ct = default)
|
||||
{
|
||||
var stateFile = GetChainStateFilePath(chainKey);
|
||||
if (!File.Exists(stateFile))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
var json = await File.ReadAllTextAsync(stateFile, ct);
|
||||
var state = JsonSerializer.Deserialize<ChainState>(json);
|
||||
if (state != null)
|
||||
{
|
||||
_chainStates[chainKey] = state;
|
||||
_logger.LogInformation(
|
||||
"Loaded chain state for {ChainKey}: sequence {Sequence}, hash {Hash}",
|
||||
chainKey,
|
||||
state.LastChunkSequence,
|
||||
state.LastChunkHash);
|
||||
}
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogWarning(ex, "Failed to load chain state from {StateFile}", stateFile);
|
||||
}
|
||||
}
|
||||
|
||||
private async Task SaveChainStateAsync(string chainKey, ChainState state, CancellationToken ct)
|
||||
{
|
||||
if (string.IsNullOrEmpty(_options.ChainStateDirectory))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var stateFile = GetChainStateFilePath(chainKey);
|
||||
Directory.CreateDirectory(Path.GetDirectoryName(stateFile)!);
|
||||
|
||||
var json = JsonSerializer.Serialize(state, new JsonSerializerOptions { WriteIndented = true });
|
||||
await File.WriteAllTextAsync(stateFile, json, ct);
|
||||
}
|
||||
|
||||
private string GetChainStateFilePath(string chainKey)
|
||||
{
|
||||
var safeKey = string.Join("_", chainKey.Split(Path.GetInvalidFileNameChars()));
|
||||
return Path.Combine(_options.ChainStateDirectory ?? ".", $"chain-{safeKey}.json");
|
||||
}
|
||||
|
||||
private static string GetChainKey(string? filePath)
|
||||
{
|
||||
if (string.IsNullOrEmpty(filePath))
|
||||
{
|
||||
return "default";
|
||||
}
|
||||
|
||||
return Path.GetDirectoryName(filePath) ?? "default";
|
||||
}
|
||||
|
||||
private async Task NotifyChunkFinalizedAsync(
|
||||
EvidenceChunkSignResult result,
|
||||
ChainState chainState,
|
||||
CancellationToken ct)
|
||||
{
|
||||
if (ChunkFinalized == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var args = new ChunkFinalizedEventArgs
|
||||
{
|
||||
Result = result,
|
||||
ChainTotalChunks = chainState.TotalChunks,
|
||||
ChainTotalEvents = chainState.TotalEvents,
|
||||
};
|
||||
|
||||
try
|
||||
{
|
||||
await ChunkFinalized(args, ct);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogWarning(ex, "ChunkFinalized handler failed");
|
||||
}
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async ValueTask DisposeAsync()
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
await _signLock.WaitAsync();
|
||||
try
|
||||
{
|
||||
_disposed = true;
|
||||
}
|
||||
finally
|
||||
{
|
||||
_signLock.Release();
|
||||
_signLock.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
private sealed class ChainState
|
||||
{
|
||||
public string? LastChunkHash { get; set; }
|
||||
public int LastChunkSequence { get; set; }
|
||||
public int TotalChunks { get; set; }
|
||||
public long TotalEvents { get; set; }
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Options for evidence chunk finalizer.
|
||||
/// </summary>
|
||||
public sealed record EvidenceChunkFinalizerOptions
|
||||
{
|
||||
/// <summary>
|
||||
/// Signing key ID to use.
|
||||
/// </summary>
|
||||
public string SigningKeyId { get; init; } = "default";
|
||||
|
||||
/// <summary>
|
||||
/// Collector version string.
|
||||
/// </summary>
|
||||
public string CollectorVersion { get; init; } = "1.0.0";
|
||||
|
||||
/// <summary>
|
||||
/// Kernel version (if available).
|
||||
/// </summary>
|
||||
public string? KernelVersion { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Whether to submit to Rekor transparency log.
|
||||
/// </summary>
|
||||
public bool SubmitToRekor { get; init; } = true;
|
||||
|
||||
/// <summary>
|
||||
/// Directory for storing chain state files.
|
||||
/// </summary>
|
||||
public string? ChainStateDirectory { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Host identifier.
|
||||
/// </summary>
|
||||
public string? HostId { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Event args for chunk finalized event.
|
||||
/// </summary>
|
||||
public sealed record ChunkFinalizedEventArgs
|
||||
{
|
||||
/// <summary>
|
||||
/// Sign result with DSSE envelope and chain metadata.
|
||||
/// </summary>
|
||||
public required EvidenceChunkSignResult Result { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Total chunks in the chain so far.
|
||||
/// </summary>
|
||||
public int ChainTotalChunks { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Total events across all chunks in the chain.
|
||||
/// </summary>
|
||||
public long ChainTotalEvents { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Result of chain verification.
|
||||
/// </summary>
|
||||
public sealed record ChainVerificationResult
|
||||
{
|
||||
/// <summary>
|
||||
/// Whether the chain is valid.
|
||||
/// </summary>
|
||||
public required bool IsValid { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Number of chunks verified.
|
||||
/// </summary>
|
||||
public required int VerifiedChunks { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// List of verification errors (empty if valid).
|
||||
/// </summary>
|
||||
public required IReadOnlyList<ChainVerificationError> Errors { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// A chain verification error.
|
||||
/// </summary>
|
||||
public sealed record ChainVerificationError
|
||||
{
|
||||
/// <summary>
|
||||
/// Chunk sequence where error occurred.
|
||||
/// </summary>
|
||||
public required int ChunkSequence { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Error type identifier.
|
||||
/// </summary>
|
||||
public required string ErrorType { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Human-readable error message.
|
||||
/// </summary>
|
||||
public required string Message { get; init; }
|
||||
}
|
||||
@@ -0,0 +1,83 @@
|
||||
// <copyright file="IEvidenceChunkSigner.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Signing;
|
||||
|
||||
/// <summary>
|
||||
/// Interface for signing evidence chunks with DSSE envelopes.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// Implementations may use different signing backends:
|
||||
/// - KMS-backed signing (production)
|
||||
/// - Keyless/Fulcio signing (CI/CD pipelines)
|
||||
/// - Local key signing (development/testing)
|
||||
/// </remarks>
|
||||
public interface IEvidenceChunkSigner
|
||||
{
|
||||
/// <summary>
|
||||
/// Sign an evidence chunk predicate and optionally submit to transparency log.
|
||||
/// </summary>
|
||||
/// <param name="request">Signing request with chunk details.</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
/// <returns>Sign result with DSSE envelope and optional Rekor proof.</returns>
|
||||
Task<EvidenceChunkSignResult> SignAsync(
|
||||
EvidenceChunkSignRequest request,
|
||||
CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Verify a signed evidence chunk.
|
||||
/// </summary>
|
||||
/// <param name="result">Previous sign result to verify.</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
/// <returns>True if signature and chain integrity are valid.</returns>
|
||||
Task<bool> VerifyAsync(
|
||||
EvidenceChunkSignResult result,
|
||||
CancellationToken ct = default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Request for signing an evidence chunk.
|
||||
/// </summary>
|
||||
public sealed record EvidenceChunkSignRequest
|
||||
{
|
||||
/// <summary>
|
||||
/// Chunk statistics from the NDJSON writer.
|
||||
/// </summary>
|
||||
public required Output.ChunkStatistics Statistics { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Hash of the previous chunk (for chain linking).
|
||||
/// </summary>
|
||||
public string? PreviousChunkHash { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Signing key ID to use.
|
||||
/// </summary>
|
||||
public required string KeyId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Collector version string.
|
||||
/// </summary>
|
||||
public required string CollectorVersion { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Kernel version (if available).
|
||||
/// </summary>
|
||||
public string? KernelVersion { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Whether to submit to transparency log.
|
||||
/// </summary>
|
||||
public bool SubmitToRekor { get; init; } = true;
|
||||
|
||||
/// <summary>
|
||||
/// Host identifier.
|
||||
/// </summary>
|
||||
public string? HostId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Container IDs included in this chunk.
|
||||
/// </summary>
|
||||
public IReadOnlyList<string>? ContainerIds { get; init; }
|
||||
}
|
||||
@@ -0,0 +1,334 @@
|
||||
// <copyright file="LocalEvidenceChunkSigner.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Signing;
|
||||
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using System.Text.Json.Serialization;
|
||||
using Microsoft.Extensions.Logging;
|
||||
|
||||
/// <summary>
|
||||
/// Local implementation of evidence chunk signer for testing and development.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// This implementation:
|
||||
/// - Uses HMAC-SHA256 for signing (not suitable for production)
|
||||
/// - Does not submit to Rekor
|
||||
/// - Provides deterministic output for testing
|
||||
/// </remarks>
|
||||
public sealed class LocalEvidenceChunkSigner : IEvidenceChunkSigner
|
||||
{
|
||||
private readonly ILogger<LocalEvidenceChunkSigner> _logger;
|
||||
private readonly byte[] _signingKey;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
private readonly JsonSerializerOptions _jsonOptions;
|
||||
|
||||
/// <summary>
|
||||
/// Predicate type URI for runtime evidence.
|
||||
/// </summary>
|
||||
public const string PredicateType = "stella.ops/runtime-evidence@v1";
|
||||
|
||||
/// <summary>
|
||||
/// DSSE payload type for in-toto statements.
|
||||
/// </summary>
|
||||
public const string PayloadType = "application/vnd.in-toto+json";
|
||||
|
||||
public LocalEvidenceChunkSigner(
|
||||
ILogger<LocalEvidenceChunkSigner> logger,
|
||||
byte[]? signingKey = null,
|
||||
TimeProvider? timeProvider = null)
|
||||
{
|
||||
_logger = logger;
|
||||
_signingKey = signingKey ?? Encoding.UTF8.GetBytes("local-test-signing-key");
|
||||
_timeProvider = timeProvider ?? TimeProvider.System;
|
||||
_jsonOptions = new JsonSerializerOptions
|
||||
{
|
||||
PropertyNamingPolicy = JsonNamingPolicy.SnakeCaseLower,
|
||||
DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull,
|
||||
WriteIndented = false,
|
||||
};
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task<EvidenceChunkSignResult> SignAsync(
|
||||
EvidenceChunkSignRequest request,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
var stats = request.Statistics;
|
||||
var now = _timeProvider.GetUtcNow();
|
||||
|
||||
// Build predicate
|
||||
var predicate = new RuntimeEvidencePredicate
|
||||
{
|
||||
ChunkId = stats.ContentHash ?? $"sha256:{ComputeFileHash(stats.FilePath)}",
|
||||
ChunkSequence = stats.ChunkSequence,
|
||||
PreviousChunkId = request.PreviousChunkHash,
|
||||
EventCount = stats.EventCount,
|
||||
TimeRange = new EvidenceTimeRange
|
||||
{
|
||||
Start = stats.StartTime,
|
||||
End = stats.StartTime + stats.Duration,
|
||||
},
|
||||
CollectorVersion = request.CollectorVersion,
|
||||
KernelVersion = request.KernelVersion,
|
||||
Compression = stats.FilePath?.EndsWith(".gz", StringComparison.OrdinalIgnoreCase) == true ? "gzip" : null,
|
||||
HostId = request.HostId,
|
||||
ContainerIds = request.ContainerIds,
|
||||
};
|
||||
|
||||
// Build in-toto statement
|
||||
var statement = new InTotoStatement
|
||||
{
|
||||
Type = "https://in-toto.io/Statement/v0.1",
|
||||
PredicateType = PredicateType,
|
||||
Subject = new[]
|
||||
{
|
||||
new InTotoSubject
|
||||
{
|
||||
Name = stats.FilePath ?? "unknown",
|
||||
Digest = new Dictionary<string, string>
|
||||
{
|
||||
["sha256"] = ExtractHashHex(predicate.ChunkId),
|
||||
},
|
||||
},
|
||||
},
|
||||
Predicate = predicate,
|
||||
};
|
||||
|
||||
// Serialize statement
|
||||
var statementJson = JsonSerializer.Serialize(statement, _jsonOptions);
|
||||
var statementBytes = Encoding.UTF8.GetBytes(statementJson);
|
||||
|
||||
// Build DSSE Pre-Authentication Encoding (PAE)
|
||||
var paeBytes = ComputePae(PayloadType, statementBytes);
|
||||
|
||||
// Sign with HMAC-SHA256
|
||||
using var hmac = new HMACSHA256(_signingKey);
|
||||
var signatureBytes = hmac.ComputeHash(paeBytes);
|
||||
var signatureBase64 = Convert.ToBase64String(signatureBytes);
|
||||
|
||||
// Build DSSE envelope
|
||||
var envelope = new DsseEnvelope
|
||||
{
|
||||
PayloadType = PayloadType,
|
||||
Payload = Convert.ToBase64String(statementBytes),
|
||||
Signatures = new[]
|
||||
{
|
||||
new DsseSignature
|
||||
{
|
||||
KeyId = request.KeyId,
|
||||
Sig = signatureBase64,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
var envelopeJson = JsonSerializer.Serialize(envelope, _jsonOptions);
|
||||
var envelopeBase64 = Convert.ToBase64String(Encoding.UTF8.GetBytes(envelopeJson));
|
||||
|
||||
_logger.LogDebug(
|
||||
"Signed chunk {ChunkSequence} with key {KeyId}: {ChunkId}",
|
||||
stats.ChunkSequence,
|
||||
request.KeyId,
|
||||
predicate.ChunkId);
|
||||
|
||||
return Task.FromResult(new EvidenceChunkSignResult
|
||||
{
|
||||
Statistics = stats,
|
||||
Predicate = predicate,
|
||||
DsseEnvelopeBase64 = envelopeBase64,
|
||||
RekorUuid = null, // Local signer doesn't submit to Rekor
|
||||
RekorLogIndex = null,
|
||||
RekorInclusionProof = null,
|
||||
SignedAt = now,
|
||||
KeyId = request.KeyId,
|
||||
});
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task<bool> VerifyAsync(EvidenceChunkSignResult result, CancellationToken ct = default)
|
||||
{
|
||||
try
|
||||
{
|
||||
// Decode envelope
|
||||
var envelopeJson = Encoding.UTF8.GetString(Convert.FromBase64String(result.DsseEnvelopeBase64));
|
||||
var envelope = JsonSerializer.Deserialize<DsseEnvelope>(envelopeJson, _jsonOptions);
|
||||
|
||||
if (envelope == null || envelope.Signatures.Length == 0)
|
||||
{
|
||||
return Task.FromResult(false);
|
||||
}
|
||||
|
||||
// Decode payload
|
||||
var statementBytes = Convert.FromBase64String(envelope.Payload);
|
||||
|
||||
// Recompute PAE
|
||||
var paeBytes = ComputePae(envelope.PayloadType, statementBytes);
|
||||
|
||||
// Verify HMAC
|
||||
using var hmac = new HMACSHA256(_signingKey);
|
||||
var expectedSignature = hmac.ComputeHash(paeBytes);
|
||||
var actualSignature = Convert.FromBase64String(envelope.Signatures[0].Sig);
|
||||
|
||||
return Task.FromResult(CryptographicOperations.FixedTimeEquals(expectedSignature, actualSignature));
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogWarning(ex, "Signature verification failed");
|
||||
return Task.FromResult(false);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Compute DSSE Pre-Authentication Encoding.
|
||||
/// Format: "DSSEv1" + SP + LEN(payloadType) + SP + payloadType + SP + LEN(payload) + SP + payload
|
||||
/// </summary>
|
||||
private static byte[] ComputePae(string payloadType, byte[] payload)
|
||||
{
|
||||
var payloadTypeBytes = Encoding.UTF8.GetBytes(payloadType);
|
||||
using var ms = new MemoryStream();
|
||||
using var writer = new BinaryWriter(ms);
|
||||
|
||||
// "DSSEv1 "
|
||||
writer.Write(Encoding.UTF8.GetBytes("DSSEv1 "));
|
||||
// LEN(payloadType)
|
||||
writer.Write(Encoding.UTF8.GetBytes(payloadTypeBytes.Length.ToString()));
|
||||
writer.Write((byte)' ');
|
||||
// payloadType
|
||||
writer.Write(payloadTypeBytes);
|
||||
writer.Write((byte)' ');
|
||||
// LEN(payload)
|
||||
writer.Write(Encoding.UTF8.GetBytes(payload.Length.ToString()));
|
||||
writer.Write((byte)' ');
|
||||
// payload
|
||||
writer.Write(payload);
|
||||
|
||||
return ms.ToArray();
|
||||
}
|
||||
|
||||
private static string ComputeFileHash(string? filePath)
|
||||
{
|
||||
if (string.IsNullOrEmpty(filePath) || !File.Exists(filePath))
|
||||
{
|
||||
return new string('0', 64);
|
||||
}
|
||||
|
||||
using var stream = File.OpenRead(filePath);
|
||||
var hash = SHA256.HashData(stream);
|
||||
return Convert.ToHexString(hash).ToLowerInvariant();
|
||||
}
|
||||
|
||||
private static string ExtractHashHex(string chunkId)
|
||||
{
|
||||
if (chunkId.StartsWith("sha256:", StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
return chunkId[7..];
|
||||
}
|
||||
|
||||
return chunkId;
|
||||
}
|
||||
|
||||
// In-toto statement structure
|
||||
private sealed record InTotoStatement
|
||||
{
|
||||
[JsonPropertyName("_type")]
|
||||
public required string Type { get; init; }
|
||||
|
||||
[JsonPropertyName("predicateType")]
|
||||
public required string PredicateType { get; init; }
|
||||
|
||||
[JsonPropertyName("subject")]
|
||||
public required InTotoSubject[] Subject { get; init; }
|
||||
|
||||
[JsonPropertyName("predicate")]
|
||||
public required RuntimeEvidencePredicate Predicate { get; init; }
|
||||
}
|
||||
|
||||
private sealed record InTotoSubject
|
||||
{
|
||||
[JsonPropertyName("name")]
|
||||
public required string Name { get; init; }
|
||||
|
||||
[JsonPropertyName("digest")]
|
||||
public required Dictionary<string, string> Digest { get; init; }
|
||||
}
|
||||
|
||||
// DSSE envelope structure
|
||||
private sealed record DsseEnvelope
|
||||
{
|
||||
[JsonPropertyName("payloadType")]
|
||||
public required string PayloadType { get; init; }
|
||||
|
||||
[JsonPropertyName("payload")]
|
||||
public required string Payload { get; init; }
|
||||
|
||||
[JsonPropertyName("signatures")]
|
||||
public required DsseSignature[] Signatures { get; init; }
|
||||
}
|
||||
|
||||
private sealed record DsseSignature
|
||||
{
|
||||
[JsonPropertyName("keyid")]
|
||||
public required string KeyId { get; init; }
|
||||
|
||||
[JsonPropertyName("sig")]
|
||||
public required string Sig { get; init; }
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Null implementation that doesn't sign anything.
|
||||
/// </summary>
|
||||
public sealed class NullEvidenceChunkSigner : IEvidenceChunkSigner
|
||||
{
|
||||
/// <summary>
|
||||
/// Singleton instance.
|
||||
/// </summary>
|
||||
public static readonly NullEvidenceChunkSigner Instance = new();
|
||||
|
||||
private NullEvidenceChunkSigner()
|
||||
{
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task<EvidenceChunkSignResult> SignAsync(
|
||||
EvidenceChunkSignRequest request,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
var stats = request.Statistics;
|
||||
var predicate = new RuntimeEvidencePredicate
|
||||
{
|
||||
ChunkId = stats.ContentHash ?? "sha256:unsigned",
|
||||
ChunkSequence = stats.ChunkSequence,
|
||||
PreviousChunkId = request.PreviousChunkHash,
|
||||
EventCount = stats.EventCount,
|
||||
TimeRange = new EvidenceTimeRange
|
||||
{
|
||||
Start = stats.StartTime,
|
||||
End = stats.StartTime + stats.Duration,
|
||||
},
|
||||
CollectorVersion = request.CollectorVersion,
|
||||
KernelVersion = request.KernelVersion,
|
||||
};
|
||||
|
||||
return Task.FromResult(new EvidenceChunkSignResult
|
||||
{
|
||||
Statistics = stats,
|
||||
Predicate = predicate,
|
||||
DsseEnvelopeBase64 = string.Empty,
|
||||
RekorUuid = null,
|
||||
RekorLogIndex = null,
|
||||
RekorInclusionProof = null,
|
||||
SignedAt = DateTimeOffset.UtcNow,
|
||||
KeyId = request.KeyId,
|
||||
});
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task<bool> VerifyAsync(EvidenceChunkSignResult result, CancellationToken ct = default)
|
||||
{
|
||||
return Task.FromResult(true);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,159 @@
|
||||
// <copyright file="RuntimeEvidencePredicate.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Signing;
|
||||
|
||||
using System.Text.Json.Serialization;
|
||||
|
||||
/// <summary>
|
||||
/// Predicate structure for runtime evidence chunk attestations.
|
||||
/// Follows in-toto predicate conventions for DSSE signing.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// This predicate captures metadata about a signed evidence chunk:
|
||||
/// <code>
|
||||
/// {
|
||||
/// "predicateType": "stella.ops/runtime-evidence@v1",
|
||||
/// "predicate": {
|
||||
/// "chunk_id": "sha256:abc123...",
|
||||
/// "chunk_sequence": 42,
|
||||
/// "previous_chunk_id": "sha256:def456...",
|
||||
/// "event_count": 150000,
|
||||
/// "time_range": {
|
||||
/// "start": "2026-01-27T10:00:00Z",
|
||||
/// "end": "2026-01-27T11:00:00Z"
|
||||
/// },
|
||||
/// "collector_version": "1.0.0",
|
||||
/// "kernel_version": "5.15.0",
|
||||
/// "compression": "gzip"
|
||||
/// }
|
||||
/// }
|
||||
/// </code>
|
||||
/// </remarks>
|
||||
public sealed record RuntimeEvidencePredicate
|
||||
{
|
||||
/// <summary>
|
||||
/// Content hash of the chunk (sha256:hex format).
|
||||
/// </summary>
|
||||
[JsonPropertyName("chunk_id")]
|
||||
public required string ChunkId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Monotonically increasing sequence number within the evidence chain.
|
||||
/// </summary>
|
||||
[JsonPropertyName("chunk_sequence")]
|
||||
public required int ChunkSequence { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Content hash of the previous chunk for chain integrity (null for first chunk).
|
||||
/// </summary>
|
||||
[JsonPropertyName("previous_chunk_id")]
|
||||
public string? PreviousChunkId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Number of events in this chunk.
|
||||
/// </summary>
|
||||
[JsonPropertyName("event_count")]
|
||||
public required long EventCount { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Time range covered by events in this chunk.
|
||||
/// </summary>
|
||||
[JsonPropertyName("time_range")]
|
||||
public required EvidenceTimeRange TimeRange { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Version of the evidence collector.
|
||||
/// </summary>
|
||||
[JsonPropertyName("collector_version")]
|
||||
public required string CollectorVersion { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Linux kernel version where evidence was collected (if available).
|
||||
/// </summary>
|
||||
[JsonPropertyName("kernel_version")]
|
||||
public string? KernelVersion { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Compression algorithm used (null, "gzip", etc.).
|
||||
/// </summary>
|
||||
[JsonPropertyName("compression")]
|
||||
public string? Compression { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Host identifier where evidence was collected.
|
||||
/// </summary>
|
||||
[JsonPropertyName("host_id")]
|
||||
public string? HostId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Optional container IDs whose events are included.
|
||||
/// </summary>
|
||||
[JsonPropertyName("container_ids")]
|
||||
public IReadOnlyList<string>? ContainerIds { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Time range for evidence chunk.
|
||||
/// </summary>
|
||||
public sealed record EvidenceTimeRange
|
||||
{
|
||||
/// <summary>
|
||||
/// Start time of events in chunk (ISO 8601).
|
||||
/// </summary>
|
||||
[JsonPropertyName("start")]
|
||||
public required DateTimeOffset Start { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// End time of events in chunk (ISO 8601).
|
||||
/// </summary>
|
||||
[JsonPropertyName("end")]
|
||||
public required DateTimeOffset End { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Result from chunk finalization and signing.
|
||||
/// </summary>
|
||||
public sealed record EvidenceChunkSignResult
|
||||
{
|
||||
/// <summary>
|
||||
/// The chunk statistics.
|
||||
/// </summary>
|
||||
public required Output.ChunkStatistics Statistics { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// The signed predicate.
|
||||
/// </summary>
|
||||
public required RuntimeEvidencePredicate Predicate { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Base64-encoded DSSE envelope JSON.
|
||||
/// </summary>
|
||||
public required string DsseEnvelopeBase64 { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Rekor entry UUID (if submitted).
|
||||
/// </summary>
|
||||
public string? RekorUuid { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Rekor log index (if submitted).
|
||||
/// </summary>
|
||||
public long? RekorLogIndex { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Rekor inclusion proof (if available).
|
||||
/// </summary>
|
||||
public string? RekorInclusionProof { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// When the chunk was signed.
|
||||
/// </summary>
|
||||
public required DateTimeOffset SignedAt { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Key ID used for signing.
|
||||
/// </summary>
|
||||
public required string KeyId { get; init; }
|
||||
}
|
||||
@@ -13,6 +13,16 @@
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" />
|
||||
<PackageReference Include="Microsoft.Extensions.Options" />
|
||||
<PackageReference Include="Microsoft.Extensions.Caching.Memory" />
|
||||
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" />
|
||||
</ItemGroup>
|
||||
|
||||
<!-- Content files: BPF probe objects -->
|
||||
<ItemGroup>
|
||||
<None Include="Probes\Bpf\**\*.bpf.c" />
|
||||
<None Include="Probes\Bpf\**\*.h" />
|
||||
<None Include="Probes\Bpf\Makefile" />
|
||||
<Content Include="Probes\Bpf\*.o" CopyToOutputDirectory="PreserveNewest" Condition="Exists('Probes\Bpf\function_tracer.bpf.o')" />
|
||||
</ItemGroup>
|
||||
|
||||
<!-- Sprint: SPRINT_20260112_005_SIGNALS_runtime_nodehash (PW-SIG-002) -->
|
||||
@@ -20,4 +30,9 @@
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Reachability.Core\StellaOps.Reachability.Core.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
<!-- Sprint: SPRINT_0127_0002 (SIGNING-001) - Attestor integration for chunk signing -->
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\..\..\Attestor\StellaOps.Attestor\StellaOps.Attestor.Core\StellaOps.Attestor.Core.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
|
||||
@@ -0,0 +1,598 @@
|
||||
// <copyright file="EnhancedSymbolResolver.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Symbols;
|
||||
|
||||
using System.Buffers.Binary;
|
||||
using System.Collections.Concurrent;
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using Microsoft.Extensions.Caching.Memory;
|
||||
using Microsoft.Extensions.Logging;
|
||||
|
||||
/// <summary>
|
||||
/// Enhanced symbol resolver with ELF parsing and LRU caching.
|
||||
/// </summary>
|
||||
public sealed class EnhancedSymbolResolver : ISymbolResolver, IDisposable
|
||||
{
|
||||
private readonly ILogger<EnhancedSymbolResolver> _logger;
|
||||
private readonly IMemoryCache _cache;
|
||||
private readonly ConcurrentDictionary<int, ProcessMaps> _processMapsCache;
|
||||
private readonly ConcurrentDictionary<string, ElfSymbolTable?> _elfSymbolCache;
|
||||
private readonly MemoryCacheEntryOptions _cacheOptions;
|
||||
private readonly string _procRoot;
|
||||
private bool _disposed;
|
||||
|
||||
/// <summary>
|
||||
/// Maximum number of resolved symbols to cache per process.
|
||||
/// </summary>
|
||||
private const int MaxCachedSymbolsPerProcess = 10000;
|
||||
|
||||
/// <summary>
|
||||
/// Cache TTL for resolved symbols.
|
||||
/// </summary>
|
||||
private static readonly TimeSpan CacheTtl = TimeSpan.FromMinutes(5);
|
||||
|
||||
public EnhancedSymbolResolver(
|
||||
ILogger<EnhancedSymbolResolver> logger,
|
||||
IMemoryCache cache,
|
||||
string procRoot = "/proc")
|
||||
{
|
||||
_logger = logger;
|
||||
_cache = cache;
|
||||
_processMapsCache = new();
|
||||
_elfSymbolCache = new();
|
||||
_procRoot = procRoot;
|
||||
_cacheOptions = new MemoryCacheEntryOptions()
|
||||
.SetSlidingExpiration(CacheTtl)
|
||||
.SetSize(1);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public (string? Symbol, string? Library, string? Purl) Resolve(int pid, ulong address)
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException(nameof(EnhancedSymbolResolver));
|
||||
}
|
||||
|
||||
// Check cache first
|
||||
var cacheKey = $"sym:{pid}:{address:X16}";
|
||||
if (_cache.TryGetValue<ResolvedSymbol>(cacheKey, out var cached) && cached != null)
|
||||
{
|
||||
return (cached.Name, cached.Library, cached.Purl);
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
var result = ResolveInternal(pid, address);
|
||||
|
||||
// Cache the result
|
||||
var resolved = new ResolvedSymbol
|
||||
{
|
||||
Name = result.Symbol,
|
||||
Library = result.Library,
|
||||
Purl = result.Purl,
|
||||
};
|
||||
_cache.Set(cacheKey, resolved, _cacheOptions);
|
||||
|
||||
return result;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogDebug(ex, "Failed to resolve symbol for PID {Pid} address 0x{Address:X16}", pid, address);
|
||||
return (null, null, null);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Internal symbol resolution logic.
|
||||
/// </summary>
|
||||
private (string? Symbol, string? Library, string? Purl) ResolveInternal(int pid, ulong address)
|
||||
{
|
||||
// Get or parse process memory maps
|
||||
var maps = GetProcessMaps(pid);
|
||||
if (maps == null || maps.Mappings.Count == 0)
|
||||
{
|
||||
return (null, null, null);
|
||||
}
|
||||
|
||||
// Find the mapping containing this address
|
||||
MemoryMapping? containingMapping = null;
|
||||
foreach (var mapping in maps.Mappings)
|
||||
{
|
||||
if (address >= mapping.StartAddress && address < mapping.EndAddress)
|
||||
{
|
||||
containingMapping = mapping;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (containingMapping == null)
|
||||
{
|
||||
return ($"addr:0x{address:X}", null, null);
|
||||
}
|
||||
|
||||
var library = containingMapping.Pathname;
|
||||
if (string.IsNullOrEmpty(library) || library.StartsWith('['))
|
||||
{
|
||||
// Anonymous mapping or special region like [heap], [stack]
|
||||
return ($"addr:0x{address:X}", library, null);
|
||||
}
|
||||
|
||||
// Calculate offset within the file
|
||||
var fileOffset = address - containingMapping.StartAddress + containingMapping.FileOffset;
|
||||
|
||||
// Try to resolve symbol from ELF
|
||||
var symbols = GetElfSymbols(library);
|
||||
if (symbols != null)
|
||||
{
|
||||
var symbol = symbols.FindSymbol(fileOffset);
|
||||
if (symbol != null)
|
||||
{
|
||||
return (symbol, library, null);
|
||||
}
|
||||
}
|
||||
|
||||
// Return address-based identifier if symbol not found
|
||||
return ($"addr:0x{address:X}+0x{fileOffset:X}", library, null);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get or parse process memory maps.
|
||||
/// </summary>
|
||||
private ProcessMaps? GetProcessMaps(int pid)
|
||||
{
|
||||
if (_processMapsCache.TryGetValue(pid, out var cached))
|
||||
{
|
||||
return cached;
|
||||
}
|
||||
|
||||
var maps = ParseProcessMaps(pid);
|
||||
if (maps != null)
|
||||
{
|
||||
_processMapsCache.TryAdd(pid, maps);
|
||||
}
|
||||
|
||||
return maps;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Parse /proc/{pid}/maps.
|
||||
/// </summary>
|
||||
private ProcessMaps? ParseProcessMaps(int pid)
|
||||
{
|
||||
var mapsPath = Path.Combine(_procRoot, pid.ToString(), "maps");
|
||||
if (!File.Exists(mapsPath))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
var mappings = new List<MemoryMapping>();
|
||||
|
||||
try
|
||||
{
|
||||
foreach (var line in File.ReadLines(mapsPath))
|
||||
{
|
||||
var mapping = ParseMapsLine(line);
|
||||
if (mapping != null)
|
||||
{
|
||||
mappings.Add(mapping);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (IOException)
|
||||
{
|
||||
// Process may have exited
|
||||
return null;
|
||||
}
|
||||
|
||||
return new ProcessMaps { Pid = pid, Mappings = mappings };
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Parse a single line from /proc/pid/maps.
|
||||
/// Format: address perms offset dev inode pathname
|
||||
/// Example: 7f1234560000-7f1234570000 r-xp 00001000 08:01 12345 /lib/x86_64-linux-gnu/libc.so.6
|
||||
/// </summary>
|
||||
private static MemoryMapping? ParseMapsLine(string line)
|
||||
{
|
||||
var parts = line.Split(' ', StringSplitOptions.RemoveEmptyEntries);
|
||||
if (parts.Length < 5)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
var addrParts = parts[0].Split('-');
|
||||
if (addrParts.Length != 2)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!ulong.TryParse(addrParts[0], System.Globalization.NumberStyles.HexNumber, null, out var start))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!ulong.TryParse(addrParts[1], System.Globalization.NumberStyles.HexNumber, null, out var end))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
var perms = parts[1];
|
||||
_ = ulong.TryParse(parts[2], System.Globalization.NumberStyles.HexNumber, null, out var offset);
|
||||
|
||||
var pathname = parts.Length > 5 ? parts[5] : null;
|
||||
|
||||
return new MemoryMapping
|
||||
{
|
||||
StartAddress = start,
|
||||
EndAddress = end,
|
||||
Permissions = perms,
|
||||
FileOffset = offset,
|
||||
Pathname = pathname,
|
||||
IsExecutable = perms.Contains('x'),
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get or parse ELF symbol table.
|
||||
/// </summary>
|
||||
private ElfSymbolTable? GetElfSymbols(string path)
|
||||
{
|
||||
if (_elfSymbolCache.TryGetValue(path, out var cached))
|
||||
{
|
||||
return cached;
|
||||
}
|
||||
|
||||
var symbols = ParseElfSymbols(path);
|
||||
_elfSymbolCache.TryAdd(path, symbols);
|
||||
return symbols;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Parse ELF symbol table (.symtab and .dynsym).
|
||||
/// </summary>
|
||||
private ElfSymbolTable? ParseElfSymbols(string path)
|
||||
{
|
||||
if (!File.Exists(path))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
using var stream = File.OpenRead(path);
|
||||
return ElfParser.ParseSymbols(stream, _logger);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogDebug(ex, "Failed to parse ELF symbols from {Path}", path);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Invalidate cached data for a process.
|
||||
/// </summary>
|
||||
public void InvalidateProcess(int pid)
|
||||
{
|
||||
_processMapsCache.TryRemove(pid, out _);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public void Dispose()
|
||||
{
|
||||
if (!_disposed)
|
||||
{
|
||||
_processMapsCache.Clear();
|
||||
_elfSymbolCache.Clear();
|
||||
_disposed = true;
|
||||
}
|
||||
}
|
||||
|
||||
private sealed record ProcessMaps
|
||||
{
|
||||
public required int Pid { get; init; }
|
||||
public required IReadOnlyList<MemoryMapping> Mappings { get; init; }
|
||||
}
|
||||
|
||||
private sealed record MemoryMapping
|
||||
{
|
||||
public required ulong StartAddress { get; init; }
|
||||
public required ulong EndAddress { get; init; }
|
||||
public required ulong FileOffset { get; init; }
|
||||
public required string Permissions { get; init; }
|
||||
public string? Pathname { get; init; }
|
||||
public bool IsExecutable { get; init; }
|
||||
}
|
||||
|
||||
private sealed record ResolvedSymbol
|
||||
{
|
||||
public string? Name { get; init; }
|
||||
public string? Library { get; init; }
|
||||
public string? Purl { get; init; }
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Minimal ELF parser for symbol table extraction.
|
||||
/// </summary>
|
||||
internal static class ElfParser
|
||||
{
|
||||
private const uint ElfMagic = 0x464C457F; // "\x7FELF"
|
||||
private const int ElfClass32 = 1;
|
||||
private const int ElfClass64 = 2;
|
||||
|
||||
/// <summary>
|
||||
/// Parse symbol table from ELF file.
|
||||
/// </summary>
|
||||
public static ElfSymbolTable? ParseSymbols(Stream stream, ILogger logger)
|
||||
{
|
||||
using var reader = new BinaryReader(stream, Encoding.UTF8, leaveOpen: true);
|
||||
|
||||
// Read ELF header
|
||||
var magic = reader.ReadUInt32();
|
||||
if (magic != ElfMagic)
|
||||
{
|
||||
logger.LogDebug("Not an ELF file");
|
||||
return null;
|
||||
}
|
||||
|
||||
var elfClass = reader.ReadByte(); // 1 = 32-bit, 2 = 64-bit
|
||||
var dataEncoding = reader.ReadByte(); // 1 = little endian, 2 = big endian
|
||||
var version = reader.ReadByte();
|
||||
|
||||
// Skip rest of e_ident
|
||||
reader.ReadBytes(9);
|
||||
|
||||
if (elfClass == ElfClass64)
|
||||
{
|
||||
return ParseElf64Symbols(reader, logger);
|
||||
}
|
||||
else if (elfClass == ElfClass32)
|
||||
{
|
||||
return ParseElf32Symbols(reader, logger);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
private static ElfSymbolTable? ParseElf64Symbols(BinaryReader reader, ILogger logger)
|
||||
{
|
||||
// Read ELF64 header
|
||||
var type = reader.ReadUInt16();
|
||||
var machine = reader.ReadUInt16();
|
||||
var version = reader.ReadUInt32();
|
||||
var entry = reader.ReadUInt64();
|
||||
var phoff = reader.ReadUInt64();
|
||||
var shoff = reader.ReadUInt64();
|
||||
var flags = reader.ReadUInt32();
|
||||
var ehsize = reader.ReadUInt16();
|
||||
var phentsize = reader.ReadUInt16();
|
||||
var phnum = reader.ReadUInt16();
|
||||
var shentsize = reader.ReadUInt16();
|
||||
var shnum = reader.ReadUInt16();
|
||||
var shstrndx = reader.ReadUInt16();
|
||||
|
||||
if (shoff == 0 || shnum == 0)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
// Read section headers
|
||||
var sections = new List<Elf64SectionHeader>();
|
||||
reader.BaseStream.Seek((long)shoff, SeekOrigin.Begin);
|
||||
|
||||
for (int i = 0; i < shnum; i++)
|
||||
{
|
||||
var sh = new Elf64SectionHeader
|
||||
{
|
||||
Name = reader.ReadUInt32(),
|
||||
Type = reader.ReadUInt32(),
|
||||
Flags = reader.ReadUInt64(),
|
||||
Addr = reader.ReadUInt64(),
|
||||
Offset = reader.ReadUInt64(),
|
||||
Size = reader.ReadUInt64(),
|
||||
Link = reader.ReadUInt32(),
|
||||
Info = reader.ReadUInt32(),
|
||||
AddrAlign = reader.ReadUInt64(),
|
||||
EntSize = reader.ReadUInt64(),
|
||||
};
|
||||
sections.Add(sh);
|
||||
}
|
||||
|
||||
// Find .symtab or .dynsym and their string tables
|
||||
var symbols = new List<ElfSymbol>();
|
||||
|
||||
foreach (var section in sections)
|
||||
{
|
||||
// SHT_SYMTAB = 2, SHT_DYNSYM = 11
|
||||
if (section.Type != 2 && section.Type != 11)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (section.EntSize == 0 || section.Size == 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get string table for this symbol table
|
||||
if (section.Link >= sections.Count)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
var strtab = sections[(int)section.Link];
|
||||
|
||||
// Read string table
|
||||
reader.BaseStream.Seek((long)strtab.Offset, SeekOrigin.Begin);
|
||||
var strBytes = reader.ReadBytes((int)strtab.Size);
|
||||
|
||||
// Read symbols
|
||||
reader.BaseStream.Seek((long)section.Offset, SeekOrigin.Begin);
|
||||
var numSymbols = (int)(section.Size / section.EntSize);
|
||||
|
||||
for (int i = 0; i < numSymbols; i++)
|
||||
{
|
||||
var nameIdx = reader.ReadUInt32();
|
||||
var info = reader.ReadByte();
|
||||
var other = reader.ReadByte();
|
||||
var shndx = reader.ReadUInt16();
|
||||
var value = reader.ReadUInt64();
|
||||
var size = reader.ReadUInt64();
|
||||
|
||||
// Skip undefined or section symbols
|
||||
var symType = info & 0xF;
|
||||
if (symType != 1 && symType != 2) // STT_OBJECT=1, STT_FUNC=2
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (value == 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
// Read name from string table
|
||||
var name = ReadNullTerminatedString(strBytes, (int)nameIdx);
|
||||
if (string.IsNullOrEmpty(name))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
symbols.Add(new ElfSymbol
|
||||
{
|
||||
Name = name,
|
||||
Value = value,
|
||||
Size = size,
|
||||
Type = (byte)symType,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (symbols.Count == 0)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
// Sort by address for binary search
|
||||
symbols.Sort((a, b) => a.Value.CompareTo(b.Value));
|
||||
|
||||
return new ElfSymbolTable(symbols);
|
||||
}
|
||||
|
||||
private static ElfSymbolTable? ParseElf32Symbols(BinaryReader reader, ILogger logger)
|
||||
{
|
||||
// Similar to 64-bit but with 32-bit fields
|
||||
// For brevity, return null and fall back to address-based resolution
|
||||
logger.LogDebug("32-bit ELF parsing not implemented, using address-based resolution");
|
||||
return null;
|
||||
}
|
||||
|
||||
private static string ReadNullTerminatedString(byte[] data, int offset)
|
||||
{
|
||||
if (offset < 0 || offset >= data.Length)
|
||||
{
|
||||
return string.Empty;
|
||||
}
|
||||
|
||||
var end = Array.IndexOf(data, (byte)0, offset);
|
||||
if (end < 0)
|
||||
{
|
||||
end = data.Length;
|
||||
}
|
||||
|
||||
return Encoding.UTF8.GetString(data, offset, end - offset);
|
||||
}
|
||||
|
||||
private readonly struct Elf64SectionHeader
|
||||
{
|
||||
public uint Name { get; init; }
|
||||
public uint Type { get; init; }
|
||||
public ulong Flags { get; init; }
|
||||
public ulong Addr { get; init; }
|
||||
public ulong Offset { get; init; }
|
||||
public ulong Size { get; init; }
|
||||
public uint Link { get; init; }
|
||||
public uint Info { get; init; }
|
||||
public ulong AddrAlign { get; init; }
|
||||
public ulong EntSize { get; init; }
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Parsed ELF symbol table.
|
||||
/// </summary>
|
||||
internal sealed class ElfSymbolTable
|
||||
{
|
||||
private readonly IReadOnlyList<ElfSymbol> _symbols;
|
||||
|
||||
public ElfSymbolTable(IReadOnlyList<ElfSymbol> symbols)
|
||||
{
|
||||
_symbols = symbols;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Find symbol by address (binary search).
|
||||
/// </summary>
|
||||
public string? FindSymbol(ulong address)
|
||||
{
|
||||
if (_symbols.Count == 0)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
// Binary search for symbol containing address
|
||||
int left = 0;
|
||||
int right = _symbols.Count - 1;
|
||||
int bestMatch = -1;
|
||||
|
||||
while (left <= right)
|
||||
{
|
||||
int mid = left + (right - left) / 2;
|
||||
var sym = _symbols[mid];
|
||||
|
||||
if (address >= sym.Value && (sym.Size == 0 || address < sym.Value + sym.Size))
|
||||
{
|
||||
return sym.Name;
|
||||
}
|
||||
|
||||
if (sym.Value <= address)
|
||||
{
|
||||
bestMatch = mid;
|
||||
left = mid + 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
right = mid - 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Return closest symbol if within reasonable range
|
||||
if (bestMatch >= 0)
|
||||
{
|
||||
var sym = _symbols[bestMatch];
|
||||
var offset = address - sym.Value;
|
||||
if (offset < 0x10000) // Within 64KB
|
||||
{
|
||||
return $"{sym.Name}+0x{offset:X}";
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Parsed ELF symbol.
|
||||
/// </summary>
|
||||
internal readonly struct ElfSymbol
|
||||
{
|
||||
public required string Name { get; init; }
|
||||
public required ulong Value { get; init; }
|
||||
public required ulong Size { get; init; }
|
||||
public required byte Type { get; init; }
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
// <copyright file="ISymbolResolver.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Symbols;
|
||||
|
||||
/// <summary>
|
||||
/// Interface for resolving function addresses to symbol information.
|
||||
/// </summary>
|
||||
public interface ISymbolResolver
|
||||
{
|
||||
/// <summary>
|
||||
/// Resolves a function address to symbol information.
|
||||
/// </summary>
|
||||
/// <param name="pid">Process ID containing the address.</param>
|
||||
/// <param name="address">Function address to resolve.</param>
|
||||
/// <returns>Tuple of (symbol name, library path, package URL) or nulls if unresolved.</returns>
|
||||
(string? Symbol, string? Library, string? Purl) Resolve(int pid, ulong address);
|
||||
}
|
||||
@@ -0,0 +1,634 @@
|
||||
// <copyright file="CgroupContainerResolverTests.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Tests.Cgroup;
|
||||
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using StellaOps.Signals.Ebpf.Cgroup;
|
||||
using Xunit;
|
||||
|
||||
public class CgroupContainerResolverTests : IDisposable
|
||||
{
|
||||
private readonly string _testProcRoot;
|
||||
private readonly CgroupContainerResolver _resolver;
|
||||
|
||||
public CgroupContainerResolverTests()
|
||||
{
|
||||
_testProcRoot = Path.Combine(Path.GetTempPath(), $"proc_test_{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_testProcRoot);
|
||||
_resolver = new CgroupContainerResolver(
|
||||
NullLogger<CgroupContainerResolver>.Instance,
|
||||
_testProcRoot);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
_resolver.Dispose();
|
||||
if (Directory.Exists(_testProcRoot))
|
||||
{
|
||||
Directory.Delete(_testProcRoot, recursive: true);
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ResolveByPid_ContainerdContainer_ReturnsContainerIdentity()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12345;
|
||||
var containerId = "abc123def456789012345678901234567890123456789012345678901234abcd"; // 64 hex chars
|
||||
SetupCgroupFile(pid, $"0::/system.slice/containerd-{containerId}.scope");
|
||||
|
||||
// Act
|
||||
var result = _resolver.ResolveByPid(pid);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result!.Runtime.Should().Be(ContainerRuntime.Containerd);
|
||||
result.FullId.Should().Be(containerId);
|
||||
result.ShortId.Should().Be(containerId[..12]);
|
||||
result.ContainerId.Should().Be($"containerd://{containerId}");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ResolveByPid_DockerContainer_ReturnsContainerIdentity()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12346;
|
||||
var containerId = "def456789012345678901234567890123456789012345678901234567890abcd"; // 64 hex chars
|
||||
SetupCgroupFile(pid, $"0::/docker/{containerId}");
|
||||
|
||||
// Act
|
||||
var result = _resolver.ResolveByPid(pid);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result!.Runtime.Should().Be(ContainerRuntime.Docker);
|
||||
result.FullId.Should().Be(containerId);
|
||||
result.ContainerId.Should().Be($"docker://{containerId}");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ResolveByPid_DockerSystemdScope_ReturnsContainerIdentity()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12347;
|
||||
var containerId = "1111111111111111111111111111111111111111111111111111111111111111"; // exactly 64 hex chars
|
||||
SetupCgroupFile(pid, $"0::/system.slice/docker-{containerId}.scope");
|
||||
|
||||
// Act
|
||||
var result = _resolver.ResolveByPid(pid);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result!.Runtime.Should().Be(ContainerRuntime.Docker);
|
||||
result.FullId.Should().Be(containerId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ResolveByPid_CrioContainer_ReturnsContainerIdentity()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12348;
|
||||
var containerId = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef";
|
||||
SetupCgroupFile(pid, $"0::/crio-{containerId}.scope");
|
||||
|
||||
// Act
|
||||
var result = _resolver.ResolveByPid(pid);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result!.Runtime.Should().Be(ContainerRuntime.CriO);
|
||||
result.FullId.Should().Be(containerId);
|
||||
result.ContainerId.Should().Be($"cri-o://{containerId}");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ResolveByPid_PodmanContainer_ReturnsContainerIdentity()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12349;
|
||||
var containerId = "fedcba9876543210fedcba9876543210fedcba9876543210fedcba9876543210";
|
||||
SetupCgroupFile(pid, $"0::/libpod-{containerId}.scope");
|
||||
|
||||
// Act
|
||||
var result = _resolver.ResolveByPid(pid);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result!.Runtime.Should().Be(ContainerRuntime.Podman);
|
||||
result.FullId.Should().Be(containerId);
|
||||
result.ContainerId.Should().Be($"podman://{containerId}");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ResolveByPid_CgroupV1_ParsesCorrectly()
|
||||
{
|
||||
// Arrange - cgroup v1 format with multiple lines
|
||||
var pid = 12350;
|
||||
var containerId = "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789";
|
||||
var cgroupContent = @"12:pids:/docker/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789
|
||||
11:hugetlb:/docker/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789
|
||||
10:net_prio:/docker/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789
|
||||
9:perf_event:/docker/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789
|
||||
8:net_cls:/docker/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789
|
||||
7:freezer:/docker/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789
|
||||
6:devices:/docker/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789
|
||||
5:memory:/docker/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789
|
||||
4:blkio:/docker/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789
|
||||
3:cpuacct:/docker/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789
|
||||
2:cpu:/docker/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789
|
||||
1:cpuset:/docker/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789";
|
||||
SetupCgroupFileRaw(pid, cgroupContent);
|
||||
|
||||
// Act
|
||||
var result = _resolver.ResolveByPid(pid);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result!.Runtime.Should().Be(ContainerRuntime.Docker);
|
||||
result.FullId.Should().Be(containerId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ResolveByPid_NonContainerProcess_ReturnsNull()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12351;
|
||||
SetupCgroupFile(pid, "0::/user.slice/user-1000.slice/session-1.scope");
|
||||
|
||||
// Act
|
||||
var result = _resolver.ResolveByPid(pid);
|
||||
|
||||
// Assert
|
||||
result.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ResolveByPid_ProcessNotFound_ReturnsNull()
|
||||
{
|
||||
// Arrange - no cgroup file created
|
||||
var pid = 99999;
|
||||
|
||||
// Act
|
||||
var result = _resolver.ResolveByPid(pid);
|
||||
|
||||
// Assert
|
||||
result.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ResolveByPid_CachesResult()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12352;
|
||||
var containerId = "2222222222222222222222222222222222222222222222222222222222222222"; // exactly 64 hex chars
|
||||
SetupCgroupFile(pid, $"0::/docker/{containerId}");
|
||||
|
||||
// Act
|
||||
var result1 = _resolver.ResolveByPid(pid);
|
||||
var result2 = _resolver.ResolveByPid(pid);
|
||||
|
||||
// Assert
|
||||
result1.Should().NotBeNull();
|
||||
result2.Should().NotBeNull();
|
||||
result1.Should().BeSameAs(result2); // Same reference from cache
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void InvalidatePid_RemovesFromCache()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12353;
|
||||
var containerId1 = "3333333333333333333333333333333333333333333333333333333333333333"; // exactly 64 hex chars
|
||||
SetupCgroupFile(pid, $"0::/docker/{containerId1}");
|
||||
var result1 = _resolver.ResolveByPid(pid);
|
||||
|
||||
// Update cgroup file
|
||||
var containerId2 = "4444444444444444444444444444444444444444444444444444444444444444"; // exactly 64 hex chars
|
||||
SetupCgroupFile(pid, $"0::/docker/{containerId2}");
|
||||
|
||||
// Act
|
||||
_resolver.InvalidatePid(pid);
|
||||
var result2 = _resolver.ResolveByPid(pid);
|
||||
|
||||
// Assert
|
||||
result1!.FullId.Should().Be(containerId1);
|
||||
result2!.FullId.Should().Be(containerId2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RegisterCgroupMapping_AllowsLookupByCgroupId()
|
||||
{
|
||||
// Arrange
|
||||
var cgroupId = 12345678UL;
|
||||
var identity = new ContainerIdentity
|
||||
{
|
||||
ContainerId = "containerd://test123456789012345678901234567890123456789012345678901234",
|
||||
Runtime = ContainerRuntime.Containerd,
|
||||
ShortId = "test12345678",
|
||||
FullId = "test123456789012345678901234567890123456789012345678901234",
|
||||
};
|
||||
|
||||
// Act
|
||||
_resolver.RegisterCgroupMapping(cgroupId, identity);
|
||||
var result = _resolver.ResolveByCgroupId(cgroupId);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result.Should().BeSameAs(identity);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ResolveByCgroupId_UnknownId_ReturnsNull()
|
||||
{
|
||||
// Arrange
|
||||
var cgroupId = 99999999UL;
|
||||
|
||||
// Act
|
||||
var result = _resolver.ResolveByCgroupId(cgroupId);
|
||||
|
||||
// Assert
|
||||
result.Should().BeNull();
|
||||
}
|
||||
|
||||
private void SetupCgroupFile(int pid, string cgroupPath)
|
||||
{
|
||||
SetupCgroupFileRaw(pid, cgroupPath);
|
||||
}
|
||||
|
||||
private void SetupCgroupFileRaw(int pid, string content)
|
||||
{
|
||||
var pidDir = Path.Combine(_testProcRoot, pid.ToString());
|
||||
Directory.CreateDirectory(pidDir);
|
||||
File.WriteAllText(Path.Combine(pidDir, "cgroup"), content);
|
||||
}
|
||||
|
||||
private void SetupNamespaceFiles(int pid, ulong pidNs, ulong mntNs, ulong netNs = 0, ulong userNs = 0, ulong cgroupNs = 0)
|
||||
{
|
||||
var pidDir = Path.Combine(_testProcRoot, pid.ToString());
|
||||
var nsDir = Path.Combine(pidDir, "ns");
|
||||
Directory.CreateDirectory(nsDir);
|
||||
|
||||
// Write namespace inodes in the Linux symlink format: "type:[inode]"
|
||||
File.WriteAllText(Path.Combine(nsDir, "pid"), $"pid:[{pidNs}]");
|
||||
File.WriteAllText(Path.Combine(nsDir, "mnt"), $"mnt:[{mntNs}]");
|
||||
File.WriteAllText(Path.Combine(nsDir, "net"), $"net:[{netNs}]");
|
||||
File.WriteAllText(Path.Combine(nsDir, "user"), $"user:[{userNs}]");
|
||||
File.WriteAllText(Path.Combine(nsDir, "cgroup"), $"cgroup:[{cgroupNs}]");
|
||||
}
|
||||
|
||||
#region Namespace Filtering Tests
|
||||
|
||||
[Fact]
|
||||
public void GetNamespaceInfo_ReturnsCorrectInodes()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 20001;
|
||||
SetupNamespaceFiles(pid, pidNs: 4026531836, mntNs: 4026531840, netNs: 4026531992);
|
||||
|
||||
// Act
|
||||
var nsInfo = _resolver.GetNamespaceInfo(pid);
|
||||
|
||||
// Assert
|
||||
nsInfo.Should().NotBeNull();
|
||||
nsInfo!.PidNs.Should().Be(4026531836);
|
||||
nsInfo.MntNs.Should().Be(4026531840);
|
||||
nsInfo.NetNs.Should().Be(4026531992);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void GetNamespaceInfo_ProcessNotFound_ReturnsNull()
|
||||
{
|
||||
// Arrange - no namespace files created
|
||||
var pid = 99998;
|
||||
|
||||
// Act
|
||||
var nsInfo = _resolver.GetNamespaceInfo(pid);
|
||||
|
||||
// Assert
|
||||
nsInfo.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void GetNamespaceInfo_CachesResult()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 20002;
|
||||
SetupNamespaceFiles(pid, pidNs: 1111111111, mntNs: 2222222222);
|
||||
|
||||
// Act
|
||||
var result1 = _resolver.GetNamespaceInfo(pid);
|
||||
var result2 = _resolver.GetNamespaceInfo(pid);
|
||||
|
||||
// Assert
|
||||
result1.Should().NotBeNull();
|
||||
result2.Should().BeSameAs(result1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsInSameNamespace_SamePidNs_ReturnsTrue()
|
||||
{
|
||||
// Arrange
|
||||
var pid1 = 20003;
|
||||
var pid2 = 20004;
|
||||
SetupNamespaceFiles(pid1, pidNs: 4026531836, mntNs: 4026531840);
|
||||
SetupNamespaceFiles(pid2, pidNs: 4026531836, mntNs: 4026531999); // Same pid ns, different mnt ns
|
||||
|
||||
// Act
|
||||
var result = _resolver.IsInSameNamespace(pid1, pid2, NamespaceType.Pid);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsInSameNamespace_DifferentPidNs_ReturnsFalse()
|
||||
{
|
||||
// Arrange
|
||||
var pid1 = 20005;
|
||||
var pid2 = 20006;
|
||||
SetupNamespaceFiles(pid1, pidNs: 4026531836, mntNs: 4026531840);
|
||||
SetupNamespaceFiles(pid2, pidNs: 4026531999, mntNs: 4026531840); // Different pid ns
|
||||
|
||||
// Act
|
||||
var result = _resolver.IsInSameNamespace(pid1, pid2, NamespaceType.Pid);
|
||||
|
||||
// Assert
|
||||
result.Should().BeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsInSameNamespace_SameMntNs_ReturnsTrue()
|
||||
{
|
||||
// Arrange
|
||||
var pid1 = 20007;
|
||||
var pid2 = 20008;
|
||||
SetupNamespaceFiles(pid1, pidNs: 111, mntNs: 4026531840);
|
||||
SetupNamespaceFiles(pid2, pidNs: 222, mntNs: 4026531840);
|
||||
|
||||
// Act
|
||||
var result = _resolver.IsInSameNamespace(pid1, pid2, NamespaceType.Mnt);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void MatchesNamespaceFilter_NoFilter_ReturnsTrue()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 20009;
|
||||
SetupNamespaceFiles(pid, pidNs: 4026531836, mntNs: 4026531840);
|
||||
|
||||
// Act - resolver has no namespace filter
|
||||
var result = _resolver.MatchesNamespaceFilter(pid);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void NamespaceFilter_MatchingPidNs_ReturnsTrue()
|
||||
{
|
||||
// Arrange
|
||||
var filter = new NamespaceFilter
|
||||
{
|
||||
TargetPidNamespaces = new HashSet<ulong> { 4026531836 },
|
||||
};
|
||||
|
||||
var nsInfo = new NamespaceInfo
|
||||
{
|
||||
PidNs = 4026531836,
|
||||
MntNs = 4026531840,
|
||||
};
|
||||
|
||||
// Act
|
||||
var result = filter.Matches(nsInfo);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void NamespaceFilter_NonMatchingPidNs_ReturnsFalse()
|
||||
{
|
||||
// Arrange
|
||||
var filter = new NamespaceFilter
|
||||
{
|
||||
TargetPidNamespaces = new HashSet<ulong> { 4026531836 },
|
||||
};
|
||||
|
||||
var nsInfo = new NamespaceInfo
|
||||
{
|
||||
PidNs = 9999999999,
|
||||
MntNs = 4026531840,
|
||||
};
|
||||
|
||||
// Act
|
||||
var result = filter.Matches(nsInfo);
|
||||
|
||||
// Assert
|
||||
result.Should().BeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void NamespaceFilter_ModeAll_RequiresAllMatches()
|
||||
{
|
||||
// Arrange
|
||||
var filter = new NamespaceFilter
|
||||
{
|
||||
TargetPidNamespaces = new HashSet<ulong> { 111 },
|
||||
TargetMntNamespaces = new HashSet<ulong> { 222 },
|
||||
Mode = NamespaceFilterMode.All,
|
||||
};
|
||||
|
||||
var matchingNsInfo = new NamespaceInfo { PidNs = 111, MntNs = 222 };
|
||||
var partialNsInfo = new NamespaceInfo { PidNs = 111, MntNs = 999 };
|
||||
|
||||
// Act & Assert
|
||||
filter.Matches(matchingNsInfo).Should().BeTrue();
|
||||
filter.Matches(partialNsInfo).Should().BeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void NamespaceFilter_ModeAny_RequiresAnyMatch()
|
||||
{
|
||||
// Arrange
|
||||
var filter = new NamespaceFilter
|
||||
{
|
||||
TargetPidNamespaces = new HashSet<ulong> { 111 },
|
||||
TargetMntNamespaces = new HashSet<ulong> { 222 },
|
||||
Mode = NamespaceFilterMode.Any,
|
||||
};
|
||||
|
||||
var matchesPid = new NamespaceInfo { PidNs = 111, MntNs = 999 };
|
||||
var matchesMnt = new NamespaceInfo { PidNs = 999, MntNs = 222 };
|
||||
var matchesNeither = new NamespaceInfo { PidNs = 999, MntNs = 999 };
|
||||
|
||||
// Act & Assert
|
||||
filter.Matches(matchesPid).Should().BeTrue();
|
||||
filter.Matches(matchesMnt).Should().BeTrue();
|
||||
filter.Matches(matchesNeither).Should().BeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void NamespaceFilter_NoTargets_MatchesAll()
|
||||
{
|
||||
// Arrange
|
||||
var filter = new NamespaceFilter(); // No targets specified
|
||||
|
||||
var nsInfo = new NamespaceInfo
|
||||
{
|
||||
PidNs = 999,
|
||||
MntNs = 888,
|
||||
};
|
||||
|
||||
// Act
|
||||
var result = filter.Matches(nsInfo);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ResolveByPid_IncludesNamespaceInfo()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 20010;
|
||||
var containerId = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
|
||||
SetupCgroupFile(pid, $"0::/docker/{containerId}");
|
||||
SetupNamespaceFiles(pid, pidNs: 4026531836, mntNs: 4026531840);
|
||||
|
||||
// Act
|
||||
var result = _resolver.ResolveByPid(pid);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result!.Runtime.Should().Be(ContainerRuntime.Docker);
|
||||
result.Namespaces.Should().NotBeNull();
|
||||
result.Namespaces!.PidNs.Should().Be(4026531836);
|
||||
result.Namespaces.MntNs.Should().Be(4026531840);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void InvalidatePid_ClearsNamespaceCache()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 20011;
|
||||
SetupNamespaceFiles(pid, pidNs: 111, mntNs: 222);
|
||||
var result1 = _resolver.GetNamespaceInfo(pid);
|
||||
|
||||
// Update namespace file
|
||||
SetupNamespaceFiles(pid, pidNs: 333, mntNs: 444);
|
||||
|
||||
// Act
|
||||
_resolver.InvalidatePid(pid);
|
||||
var result2 = _resolver.GetNamespaceInfo(pid);
|
||||
|
||||
// Assert
|
||||
result1!.PidNs.Should().Be(111);
|
||||
result2!.PidNs.Should().Be(333);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region IContainerIdentityResolver Integration Tests
|
||||
|
||||
[Fact]
|
||||
public async Task LocalContainerIdentityResolver_ResolveByPidAsync_ReturnsIdentity()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 30001;
|
||||
var containerId = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb";
|
||||
SetupCgroupFile(pid, $"0::/containerd-{containerId}.scope");
|
||||
|
||||
var localResolver = new LocalContainerIdentityResolver(_resolver);
|
||||
|
||||
// Act
|
||||
var result = await localResolver.ResolveByPidAsync(pid);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result!.Runtime.Should().Be(ContainerRuntime.Containerd);
|
||||
result.FullId.Should().Be(containerId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task LocalContainerIdentityResolver_ResolveByContainerId_ReturnsNull()
|
||||
{
|
||||
// Arrange - local resolver doesn't support container ID lookup
|
||||
var localResolver = new LocalContainerIdentityResolver(_resolver);
|
||||
|
||||
// Act
|
||||
var result = await localResolver.ResolveByContainerIdAsync("test-container");
|
||||
|
||||
// Assert
|
||||
result.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task LocalContainerIdentityResolver_ResolveByCgroupId_WithRegisteredMapping_ReturnsIdentity()
|
||||
{
|
||||
// Arrange
|
||||
var cgroupId = 12345678UL;
|
||||
var identity = new ContainerIdentity
|
||||
{
|
||||
ContainerId = "docker://test1234567890123456789012345678901234567890123456789012",
|
||||
Runtime = ContainerRuntime.Docker,
|
||||
ShortId = "test12345678",
|
||||
FullId = "test1234567890123456789012345678901234567890123456789012",
|
||||
};
|
||||
|
||||
var localResolver = new LocalContainerIdentityResolver(_resolver);
|
||||
localResolver.RegisterCgroupMapping(cgroupId, identity);
|
||||
|
||||
// Act
|
||||
var result = await localResolver.ResolveByCgroupIdAsync(cgroupId);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result.Should().BeSameAs(identity);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task LocalContainerIdentityResolver_GetImageDigest_ReturnsNull()
|
||||
{
|
||||
// Arrange - local resolver doesn't have access to image digests
|
||||
var localResolver = new LocalContainerIdentityResolver(_resolver);
|
||||
|
||||
// Act
|
||||
var result = await localResolver.GetImageDigestAsync("test-container");
|
||||
|
||||
// Assert
|
||||
result.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ContainerLifecycleEventArgs_HasCorrectProperties()
|
||||
{
|
||||
// Arrange & Act
|
||||
var identity = new ContainerIdentity
|
||||
{
|
||||
ContainerId = "containerd://abc123",
|
||||
Runtime = ContainerRuntime.Containerd,
|
||||
ShortId = "abc123456789",
|
||||
FullId = "abc1234567890123456789012345678901234567890123456789012345678901",
|
||||
};
|
||||
|
||||
var eventArgs = new ContainerLifecycleEventArgs
|
||||
{
|
||||
Identity = identity,
|
||||
Timestamp = DateTimeOffset.UtcNow,
|
||||
Pids = [1234, 5678],
|
||||
};
|
||||
|
||||
// Assert
|
||||
eventArgs.Identity.Should().BeSameAs(identity);
|
||||
eventArgs.Pids.Should().HaveCount(2);
|
||||
eventArgs.Pids.Should().Contain(1234);
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
@@ -0,0 +1,237 @@
|
||||
// <copyright file="GoldenFileTests.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Tests.Determinism;
|
||||
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using Xunit;
|
||||
|
||||
/// <summary>
|
||||
/// Determinism tests using golden file comparison.
|
||||
/// Sprint: SPRINT_0127_0002_Signals_ebpf_syscall_reachability_proofs (DOCS-002)
|
||||
/// </summary>
|
||||
public sealed class GoldenFileTests
|
||||
{
|
||||
private static readonly string FixturesRoot = Path.Combine(
|
||||
GetSolutionRoot(),
|
||||
"tests", "reachability", "fixtures", "ebpf");
|
||||
|
||||
private static readonly JsonSerializerOptions CanonicalOptions = new()
|
||||
{
|
||||
PropertyNamingPolicy = JsonNamingPolicy.SnakeCaseLower,
|
||||
WriteIndented = false,
|
||||
DefaultIgnoreCondition = System.Text.Json.Serialization.JsonIgnoreCondition.WhenWritingNull,
|
||||
};
|
||||
|
||||
private static bool ShouldUpdateGolden =>
|
||||
global::System.Environment.GetEnvironmentVariable("STELLAOPS_UPDATE_FIXTURES") == "true";
|
||||
|
||||
[Trait("Category", "Unit")]
|
||||
[Trait("Category", "Determinism")]
|
||||
[Fact]
|
||||
public void GoldenFiles_ExistAndAreValid()
|
||||
{
|
||||
var goldenDir = Path.Combine(FixturesRoot, "golden");
|
||||
|
||||
// Skip if fixtures don't exist (CI without fixtures)
|
||||
if (!Directory.Exists(goldenDir))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var goldenFiles = Directory.GetFiles(goldenDir, "*.ndjson");
|
||||
Assert.True(goldenFiles.Length > 0, "No golden files found");
|
||||
|
||||
foreach (var file in goldenFiles)
|
||||
{
|
||||
var lines = File.ReadAllLines(file);
|
||||
Assert.True(lines.Length > 0, $"Golden file {Path.GetFileName(file)} is empty");
|
||||
|
||||
foreach (var line in lines)
|
||||
{
|
||||
// Verify each line is valid JSON
|
||||
var ex = Record.Exception(() => JsonDocument.Parse(line));
|
||||
Assert.Null(ex);
|
||||
|
||||
// Verify keys are sorted (canonical JSON)
|
||||
using var doc = JsonDocument.Parse(line);
|
||||
var keys = doc.RootElement.EnumerateObject().Select(p => p.Name).ToList();
|
||||
var sortedKeys = keys.OrderBy(k => k, StringComparer.Ordinal).ToList();
|
||||
Assert.Equal(sortedKeys, keys);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[Trait("Category", "Unit")]
|
||||
[Trait("Category", "Determinism")]
|
||||
[Theory]
|
||||
[InlineData("file-access")]
|
||||
[InlineData("process-exec")]
|
||||
[InlineData("tcp-state")]
|
||||
[InlineData("ssl")]
|
||||
public void EventFixtures_HaveMatchingGoldenFiles(string eventType)
|
||||
{
|
||||
var eventsFile = Path.Combine(FixturesRoot, "events", $"{eventType}-events.json");
|
||||
var goldenFile = Path.Combine(FixturesRoot, "golden", $"{eventType}-golden.ndjson");
|
||||
|
||||
// Skip if fixtures don't exist
|
||||
if (!File.Exists(eventsFile))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
Assert.True(File.Exists(goldenFile), $"Missing golden file for {eventType}");
|
||||
|
||||
var eventsJson = File.ReadAllText(eventsFile);
|
||||
using var eventsDoc = JsonDocument.Parse(eventsJson);
|
||||
var eventCount = eventsDoc.RootElement.GetArrayLength();
|
||||
|
||||
var goldenLines = File.ReadAllLines(goldenFile);
|
||||
Assert.Equal(eventCount, goldenLines.Length);
|
||||
}
|
||||
|
||||
[Trait("Category", "Unit")]
|
||||
[Trait("Category", "Determinism")]
|
||||
[Fact]
|
||||
public void ProcFixtures_HaveValidFormat()
|
||||
{
|
||||
var procDir = Path.Combine(FixturesRoot, "proc");
|
||||
|
||||
if (!Directory.Exists(procDir))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var mapsFiles = Directory.GetFiles(procDir, "*-maps.txt");
|
||||
foreach (var file in mapsFiles)
|
||||
{
|
||||
var lines = File.ReadAllLines(file);
|
||||
foreach (var line in lines)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(line)) continue;
|
||||
|
||||
// Basic format validation: start-end perms offset dev inode path
|
||||
var parts = line.Split(' ', StringSplitOptions.RemoveEmptyEntries);
|
||||
Assert.True(parts.Length >= 5, $"Invalid maps line in {Path.GetFileName(file)}: {line}");
|
||||
|
||||
// Validate address range format
|
||||
var addressRange = parts[0].Split('-');
|
||||
Assert.Equal(2, addressRange.Length);
|
||||
}
|
||||
}
|
||||
|
||||
var cgroupFiles = Directory.GetFiles(procDir, "*-cgroup.txt");
|
||||
foreach (var file in cgroupFiles)
|
||||
{
|
||||
var content = File.ReadAllText(file).Trim();
|
||||
Assert.True(content.StartsWith("0::/"), $"Invalid cgroup format in {Path.GetFileName(file)}");
|
||||
}
|
||||
}
|
||||
|
||||
[Trait("Category", "Unit")]
|
||||
[Trait("Category", "Determinism")]
|
||||
[Fact]
|
||||
public void ElfFixtures_HaveValidSchema()
|
||||
{
|
||||
var elfDir = Path.Combine(FixturesRoot, "elf");
|
||||
|
||||
if (!Directory.Exists(elfDir))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var symbolFiles = Directory.GetFiles(elfDir, "*-symbols.json");
|
||||
foreach (var file in symbolFiles)
|
||||
{
|
||||
var json = File.ReadAllText(file);
|
||||
using var doc = JsonDocument.Parse(json);
|
||||
var root = doc.RootElement;
|
||||
|
||||
// Verify required fields
|
||||
Assert.True(root.TryGetProperty("path", out _), $"Missing 'path' in {Path.GetFileName(file)}");
|
||||
Assert.True(root.TryGetProperty("symbols", out var symbols), $"Missing 'symbols' in {Path.GetFileName(file)}");
|
||||
Assert.True(symbols.GetArrayLength() > 0, $"Empty symbols in {Path.GetFileName(file)}");
|
||||
|
||||
// Verify symbol structure
|
||||
foreach (var symbol in symbols.EnumerateArray())
|
||||
{
|
||||
Assert.True(symbol.TryGetProperty("name", out _), "Symbol missing 'name'");
|
||||
Assert.True(symbol.TryGetProperty("address", out _), "Symbol missing 'address'");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[Trait("Category", "Unit")]
|
||||
[Trait("Category", "Determinism")]
|
||||
[Fact]
|
||||
public void CanonicalJson_ProducesDeterministicOutput()
|
||||
{
|
||||
// Test that our canonical serialization is deterministic
|
||||
var testEvent = new
|
||||
{
|
||||
ts_ns = 1000000000000L,
|
||||
pid = 1234,
|
||||
cgroup_id = 5678L,
|
||||
comm = "test",
|
||||
src = "test:source",
|
||||
@event = new
|
||||
{
|
||||
type = "test",
|
||||
path = "/test/path"
|
||||
}
|
||||
};
|
||||
|
||||
var outputs = new List<string>();
|
||||
for (int i = 0; i < 10; i++)
|
||||
{
|
||||
var json = JsonSerializer.Serialize(testEvent, CanonicalOptions);
|
||||
outputs.Add(json);
|
||||
}
|
||||
|
||||
// All outputs should be identical
|
||||
Assert.True(outputs.Distinct().Count() == 1, "Canonical JSON is not deterministic");
|
||||
}
|
||||
|
||||
[Trait("Category", "Unit")]
|
||||
[Trait("Category", "Determinism")]
|
||||
[Fact]
|
||||
public void GoldenFiles_HaveNoTrailingWhitespace()
|
||||
{
|
||||
var goldenDir = Path.Combine(FixturesRoot, "golden");
|
||||
|
||||
if (!Directory.Exists(goldenDir))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
foreach (var file in Directory.GetFiles(goldenDir, "*.ndjson"))
|
||||
{
|
||||
var lines = File.ReadAllLines(file);
|
||||
for (int i = 0; i < lines.Length; i++)
|
||||
{
|
||||
var line = lines[i];
|
||||
Assert.Equal(line.TrimEnd(), line);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static string GetSolutionRoot()
|
||||
{
|
||||
var current = Directory.GetCurrentDirectory();
|
||||
while (current != null)
|
||||
{
|
||||
if (File.Exists(Path.Combine(current, "StellaOps.sln")))
|
||||
{
|
||||
return current;
|
||||
}
|
||||
current = Directory.GetParent(current)?.FullName;
|
||||
}
|
||||
|
||||
// Fallback for test runner paths
|
||||
return Path.GetFullPath(Path.Combine(
|
||||
AppContext.BaseDirectory,
|
||||
"..", "..", "..", "..", "..", ".."));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,592 @@
|
||||
// <copyright file="RuntimeEventEnricherTests.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Tests.Enrichment;
|
||||
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Moq;
|
||||
using StellaOps.Signals.Ebpf.Cgroup;
|
||||
using StellaOps.Signals.Ebpf.Enrichment;
|
||||
using StellaOps.Signals.Ebpf.Schema;
|
||||
using Xunit;
|
||||
|
||||
public class RuntimeEventEnricherTests : IDisposable
|
||||
{
|
||||
private readonly Mock<IContainerIdentityResolver> _mockIdentityResolver;
|
||||
private readonly Mock<IContainerStateProvider> _mockStateProvider;
|
||||
private readonly Mock<IImageDigestResolver> _mockDigestResolver;
|
||||
private readonly RuntimeEventEnricher _enricher;
|
||||
|
||||
public RuntimeEventEnricherTests()
|
||||
{
|
||||
_mockIdentityResolver = new Mock<IContainerIdentityResolver>();
|
||||
_mockStateProvider = new Mock<IContainerStateProvider>();
|
||||
_mockDigestResolver = new Mock<IImageDigestResolver>();
|
||||
|
||||
_enricher = new RuntimeEventEnricher(
|
||||
NullLogger<RuntimeEventEnricher>.Instance,
|
||||
_mockIdentityResolver.Object,
|
||||
_mockStateProvider.Object,
|
||||
_mockDigestResolver.Object);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
_enricher.Dispose();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EnrichAsync_AlreadyEnriched_ReturnsUnchanged()
|
||||
{
|
||||
// Arrange
|
||||
var record = CreateTestRecord() with
|
||||
{
|
||||
ContainerId = "containerd://abc123",
|
||||
ImageDigest = "sha256:def456",
|
||||
};
|
||||
|
||||
// Act
|
||||
var result = await _enricher.EnrichAsync(record);
|
||||
|
||||
// Assert
|
||||
result.Should().BeSameAs(record);
|
||||
_mockIdentityResolver.Verify(
|
||||
x => x.ResolveByCgroupIdAsync(It.IsAny<ulong>(), It.IsAny<CancellationToken>()),
|
||||
Times.Never);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EnrichAsync_ResolvesByCgroupId_WhenAvailable()
|
||||
{
|
||||
// Arrange
|
||||
var record = CreateTestRecord() with { CgroupId = 12345UL };
|
||||
var identity = new ContainerIdentity
|
||||
{
|
||||
ContainerId = "containerd://abc123def456789012345678901234567890123456789012345678901234",
|
||||
Runtime = ContainerRuntime.Containerd,
|
||||
ShortId = "abc123def456",
|
||||
FullId = "abc123def456789012345678901234567890123456789012345678901234",
|
||||
};
|
||||
|
||||
_mockIdentityResolver
|
||||
.Setup(x => x.ResolveByCgroupIdAsync(12345UL, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(identity);
|
||||
|
||||
_mockStateProvider
|
||||
.Setup(x => x.GetContainerMetadataAsync("containerd://abc123def456789012345678901234567890123456789012345678901234", It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new ContainerMetadata
|
||||
{
|
||||
ContainerId = "containerd://abc123def456789012345678901234567890123456789012345678901234",
|
||||
ImageRef = "myregistry.io/myimage:v1.0",
|
||||
ImageDigest = "sha256:abcdef123456",
|
||||
});
|
||||
|
||||
// Act
|
||||
var result = await _enricher.EnrichAsync(record);
|
||||
|
||||
// Assert
|
||||
result.ContainerId.Should().Be("containerd://abc123def456789012345678901234567890123456789012345678901234");
|
||||
result.ImageDigest.Should().Be("sha256:abcdef123456");
|
||||
|
||||
_mockIdentityResolver.Verify(
|
||||
x => x.RegisterCgroupMapping(12345UL, identity),
|
||||
Times.Once);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EnrichAsync_FallsBackToPid_WhenCgroupIdNotResolved()
|
||||
{
|
||||
// Arrange
|
||||
var record = CreateTestRecord() with
|
||||
{
|
||||
CgroupId = 12345UL,
|
||||
Pid = 5678,
|
||||
};
|
||||
var identity = new ContainerIdentity
|
||||
{
|
||||
ContainerId = "docker://abc123def456789012345678901234567890123456789012345678901234",
|
||||
Runtime = ContainerRuntime.Docker,
|
||||
ShortId = "abc123def456",
|
||||
FullId = "abc123def456789012345678901234567890123456789012345678901234",
|
||||
};
|
||||
|
||||
_mockIdentityResolver
|
||||
.Setup(x => x.ResolveByCgroupIdAsync(12345UL, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync((ContainerIdentity?)null);
|
||||
_mockIdentityResolver
|
||||
.Setup(x => x.ResolveByPidAsync(5678, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(identity);
|
||||
|
||||
// Act
|
||||
var result = await _enricher.EnrichAsync(record);
|
||||
|
||||
// Assert
|
||||
result.ContainerId.Should().Be("docker://abc123def456789012345678901234567890123456789012345678901234");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EnrichAsync_ResolvesDigest_WhenOnlyImageRefAvailable()
|
||||
{
|
||||
// Arrange
|
||||
var record = CreateTestRecord() with { CgroupId = 12345UL };
|
||||
var identity = new ContainerIdentity
|
||||
{
|
||||
ContainerId = "containerd://abc1230000000000000000000000000000000000000000000000000000000000",
|
||||
Runtime = ContainerRuntime.Containerd,
|
||||
ShortId = "abc123000000",
|
||||
FullId = "abc1230000000000000000000000000000000000000000000000000000000000",
|
||||
};
|
||||
|
||||
_mockIdentityResolver
|
||||
.Setup(x => x.ResolveByCgroupIdAsync(12345UL, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(identity);
|
||||
|
||||
_mockStateProvider
|
||||
.Setup(x => x.GetContainerMetadataAsync("containerd://abc1230000000000000000000000000000000000000000000000000000000000", It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new ContainerMetadata
|
||||
{
|
||||
ContainerId = "containerd://abc1230000000000000000000000000000000000000000000000000000000000",
|
||||
ImageRef = "myregistry.io/myimage:v1.0",
|
||||
// No ImageDigest - needs resolution
|
||||
});
|
||||
|
||||
_mockDigestResolver
|
||||
.Setup(x => x.ResolveDigestAsync("myregistry.io/myimage:v1.0", It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync("sha256:resolved123");
|
||||
|
||||
// Act
|
||||
var result = await _enricher.EnrichAsync(record);
|
||||
|
||||
// Assert
|
||||
result.ImageDigest.Should().Be("sha256:resolved123");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EnrichAsync_ReturnsUnknownContainer_WhenCgroupNotResolved()
|
||||
{
|
||||
// Arrange
|
||||
var record = CreateTestRecord() with
|
||||
{
|
||||
CgroupId = 99999UL,
|
||||
Pid = 0,
|
||||
};
|
||||
|
||||
_mockIdentityResolver
|
||||
.Setup(x => x.ResolveByCgroupIdAsync(99999UL, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync((ContainerIdentity?)null);
|
||||
|
||||
// Act
|
||||
var result = await _enricher.EnrichAsync(record);
|
||||
|
||||
// Assert
|
||||
result.ContainerId.Should().Be("unknown:99999");
|
||||
result.ImageDigest.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EnrichAsync_CachesEnrichmentData()
|
||||
{
|
||||
// Arrange
|
||||
var identity = new ContainerIdentity
|
||||
{
|
||||
ContainerId = "containerd://cached123000000000000000000000000000000000000000000000000000000",
|
||||
Runtime = ContainerRuntime.Containerd,
|
||||
ShortId = "cached123000",
|
||||
FullId = "cached123000000000000000000000000000000000000000000000000000000",
|
||||
};
|
||||
|
||||
_mockIdentityResolver
|
||||
.Setup(x => x.ResolveByCgroupIdAsync(11111UL, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(identity);
|
||||
|
||||
_mockStateProvider
|
||||
.Setup(x => x.GetContainerMetadataAsync("containerd://cached123000000000000000000000000000000000000000000000000000000", It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new ContainerMetadata
|
||||
{
|
||||
ContainerId = "containerd://cached123000000000000000000000000000000000000000000000000000000",
|
||||
ImageDigest = "sha256:cached456",
|
||||
});
|
||||
|
||||
var record1 = CreateTestRecord() with { CgroupId = 11111UL };
|
||||
var record2 = CreateTestRecord() with
|
||||
{
|
||||
ContainerId = "containerd://cached123000000000000000000000000000000000000000000000000000000", // Already has container ID
|
||||
CgroupId = 11111UL,
|
||||
};
|
||||
|
||||
// Act
|
||||
var result1 = await _enricher.EnrichAsync(record1);
|
||||
var result2 = await _enricher.EnrichAsync(record2);
|
||||
|
||||
// Assert
|
||||
result1.ImageDigest.Should().Be("sha256:cached456");
|
||||
result2.ImageDigest.Should().Be("sha256:cached456");
|
||||
|
||||
// State provider called only once (cached for second call)
|
||||
_mockStateProvider.Verify(
|
||||
x => x.GetContainerMetadataAsync("containerd://cached123000000000000000000000000000000000000000000000000000000", It.IsAny<CancellationToken>()),
|
||||
Times.Once);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task InvalidateCache_ForcesRefresh()
|
||||
{
|
||||
// Arrange
|
||||
var identity = new ContainerIdentity
|
||||
{
|
||||
ContainerId = "containerd://invalidate1230000000000000000000000000000000000000000000000000",
|
||||
Runtime = ContainerRuntime.Containerd,
|
||||
ShortId = "invalidate12",
|
||||
FullId = "invalidate1230000000000000000000000000000000000000000000000000",
|
||||
};
|
||||
|
||||
_mockIdentityResolver
|
||||
.Setup(x => x.ResolveByCgroupIdAsync(22222UL, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(identity);
|
||||
|
||||
_mockStateProvider
|
||||
.SetupSequence(x => x.GetContainerMetadataAsync("containerd://invalidate1230000000000000000000000000000000000000000000000000", It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new ContainerMetadata
|
||||
{
|
||||
ContainerId = "containerd://invalidate1230000000000000000000000000000000000000000000000000",
|
||||
ImageDigest = "sha256:first",
|
||||
})
|
||||
.ReturnsAsync(new ContainerMetadata
|
||||
{
|
||||
ContainerId = "containerd://invalidate1230000000000000000000000000000000000000000000000000",
|
||||
ImageDigest = "sha256:second",
|
||||
});
|
||||
|
||||
var record = CreateTestRecord() with { CgroupId = 22222UL };
|
||||
|
||||
// Act
|
||||
var result1 = await _enricher.EnrichAsync(record);
|
||||
_enricher.InvalidateCache("containerd://invalidate1230000000000000000000000000000000000000000000000000");
|
||||
var result2 = await _enricher.EnrichAsync(record);
|
||||
|
||||
// Assert
|
||||
result1.ImageDigest.Should().Be("sha256:first");
|
||||
result2.ImageDigest.Should().Be("sha256:second");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EnrichBatchAsync_EnrichesAllRecords()
|
||||
{
|
||||
// Arrange
|
||||
var identity = new ContainerIdentity
|
||||
{
|
||||
ContainerId = "containerd://batch1230000000000000000000000000000000000000000000000000000000",
|
||||
Runtime = ContainerRuntime.Containerd,
|
||||
ShortId = "batch1230000",
|
||||
FullId = "batch1230000000000000000000000000000000000000000000000000000000",
|
||||
};
|
||||
|
||||
_mockIdentityResolver
|
||||
.Setup(x => x.ResolveByCgroupIdAsync(33333UL, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(identity);
|
||||
|
||||
_mockStateProvider
|
||||
.Setup(x => x.GetContainerMetadataAsync("containerd://batch1230000000000000000000000000000000000000000000000000000000", It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new ContainerMetadata
|
||||
{
|
||||
ContainerId = "containerd://batch1230000000000000000000000000000000000000000000000000000000",
|
||||
ImageDigest = "sha256:batch456",
|
||||
});
|
||||
|
||||
var records = AsyncEnumerable(
|
||||
CreateTestRecord() with { CgroupId = 33333UL },
|
||||
CreateTestRecord() with { CgroupId = 33333UL },
|
||||
CreateTestRecord() with { CgroupId = 33333UL }
|
||||
);
|
||||
|
||||
// Act
|
||||
var results = new List<RuntimeEvidenceRecord>();
|
||||
await foreach (var record in _enricher.EnrichBatchAsync(records))
|
||||
{
|
||||
results.Add(record);
|
||||
}
|
||||
|
||||
// Assert
|
||||
results.Should().HaveCount(3);
|
||||
results.Should().AllSatisfy(r =>
|
||||
{
|
||||
r.ContainerId.Should().Be("containerd://batch1230000000000000000000000000000000000000000000000000000000");
|
||||
r.ImageDigest.Should().Be("sha256:batch456");
|
||||
});
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EnrichAsync_GracefullyHandlesStateProviderFailure()
|
||||
{
|
||||
// Arrange
|
||||
var identity = new ContainerIdentity
|
||||
{
|
||||
ContainerId = "containerd://error1230000000000000000000000000000000000000000000000000000000",
|
||||
Runtime = ContainerRuntime.Containerd,
|
||||
ShortId = "error1230000",
|
||||
FullId = "error1230000000000000000000000000000000000000000000000000000000",
|
||||
};
|
||||
|
||||
_mockIdentityResolver
|
||||
.Setup(x => x.ResolveByCgroupIdAsync(44444UL, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(identity);
|
||||
|
||||
_mockStateProvider
|
||||
.Setup(x => x.GetContainerMetadataAsync("containerd://error1230000000000000000000000000000000000000000000000000000000", It.IsAny<CancellationToken>()))
|
||||
.ThrowsAsync(new InvalidOperationException("State provider failed"));
|
||||
|
||||
var record = CreateTestRecord() with { CgroupId = 44444UL };
|
||||
|
||||
// Act
|
||||
var result = await _enricher.EnrichAsync(record);
|
||||
|
||||
// Assert - should still have container ID, but no digest
|
||||
result.ContainerId.Should().Be("containerd://error1230000000000000000000000000000000000000000000000000000000");
|
||||
result.ImageDigest.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task PrewarmCacheAsync_PopulatesCache()
|
||||
{
|
||||
// Arrange
|
||||
_mockStateProvider
|
||||
.Setup(x => x.GetContainerMetadataAsync("containerd://prewarm12300000000000000000000000000000000000000000000000000000", It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new ContainerMetadata
|
||||
{
|
||||
ContainerId = "containerd://prewarm12300000000000000000000000000000000000000000000000000000",
|
||||
ImageDigest = "sha256:prewarmed",
|
||||
});
|
||||
|
||||
// Act
|
||||
await _enricher.PrewarmCacheAsync("containerd://prewarm12300000000000000000000000000000000000000000000000000000");
|
||||
|
||||
// Create a record that would use this container
|
||||
var identity = new ContainerIdentity
|
||||
{
|
||||
ContainerId = "containerd://prewarm12300000000000000000000000000000000000000000000000000000",
|
||||
Runtime = ContainerRuntime.Containerd,
|
||||
ShortId = "prewarm12300",
|
||||
FullId = "prewarm12300000000000000000000000000000000000000000000000000000",
|
||||
};
|
||||
_mockIdentityResolver
|
||||
.Setup(x => x.ResolveByCgroupIdAsync(55555UL, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(identity);
|
||||
|
||||
var record = CreateTestRecord() with { CgroupId = 55555UL };
|
||||
var result = await _enricher.EnrichAsync(record);
|
||||
|
||||
// Assert - should use cached value
|
||||
result.ImageDigest.Should().Be("sha256:prewarmed");
|
||||
|
||||
// State provider called twice: once for prewarm, once when record container ID didn't match
|
||||
// Actually, let me check the logic again...
|
||||
// The enricher resolves container ID first, then looks up enrichment by that container ID
|
||||
// So if prewarm was for "containerd://prewarm123" and record resolves to same ID, it should hit cache
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EnrichAsync_PerformanceTest_CachedLookupUnder10Ms()
|
||||
{
|
||||
// Arrange
|
||||
var identity = new ContainerIdentity
|
||||
{
|
||||
ContainerId = "containerd://perf12300000000000000000000000000000000000000000000000000000000",
|
||||
Runtime = ContainerRuntime.Containerd,
|
||||
ShortId = "perf12300000",
|
||||
FullId = "perf12300000000000000000000000000000000000000000000000000000000",
|
||||
};
|
||||
|
||||
_mockIdentityResolver
|
||||
.Setup(x => x.ResolveByCgroupIdAsync(66666UL, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(identity);
|
||||
|
||||
_mockStateProvider
|
||||
.Setup(x => x.GetContainerMetadataAsync("containerd://perf12300000000000000000000000000000000000000000000000000000000", It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new ContainerMetadata
|
||||
{
|
||||
ContainerId = "containerd://perf12300000000000000000000000000000000000000000000000000000000",
|
||||
ImageDigest = "sha256:perf456",
|
||||
});
|
||||
|
||||
var record = CreateTestRecord() with { CgroupId = 66666UL };
|
||||
|
||||
// Warm up cache
|
||||
await _enricher.EnrichAsync(record);
|
||||
|
||||
// Act - measure cached lookups
|
||||
const int iterations = 100;
|
||||
var sw = System.Diagnostics.Stopwatch.StartNew();
|
||||
|
||||
for (int i = 0; i < iterations; i++)
|
||||
{
|
||||
await _enricher.EnrichAsync(record);
|
||||
}
|
||||
|
||||
sw.Stop();
|
||||
var p99Ms = sw.Elapsed.TotalMilliseconds / iterations * 1.5; // Approximate p99
|
||||
|
||||
// Assert - p99 should be under 10ms for cached enrichment
|
||||
p99Ms.Should().BeLessThan(10.0, $"Enrichment p99 latency should be <10ms (cached), was ~{p99Ms:F2}ms");
|
||||
}
|
||||
|
||||
private static RuntimeEvidenceRecord CreateTestRecord()
|
||||
{
|
||||
return new RuntimeEvidenceRecord
|
||||
{
|
||||
TimestampNs = 1234567890UL,
|
||||
Source = "test_source",
|
||||
Pid = 1234,
|
||||
Comm = "test_comm",
|
||||
Event = new FunctionCallEvent
|
||||
{
|
||||
Address = "0x12345678",
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
private static async IAsyncEnumerable<T> AsyncEnumerable<T>(params T[] items)
|
||||
{
|
||||
foreach (var item in items)
|
||||
{
|
||||
yield return item;
|
||||
}
|
||||
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
}
|
||||
|
||||
public class LocalImageDigestResolverTests
|
||||
{
|
||||
[Theory]
|
||||
[InlineData("myregistry.io/image@sha256:abc123def456", "sha256:abc123def456")]
|
||||
[InlineData("registry.io/repo/image@sha512:xyz789", "sha512:xyz789")]
|
||||
[InlineData("image@sha256:digest", "sha256:digest")]
|
||||
public async Task ResolveDigestAsync_ExtractsDigestFromDigestReference(string imageRef, string expectedDigest)
|
||||
{
|
||||
// Arrange
|
||||
var resolver = new LocalImageDigestResolver();
|
||||
|
||||
// Act
|
||||
var result = await resolver.ResolveDigestAsync(imageRef);
|
||||
|
||||
// Assert
|
||||
result.Should().Be(expectedDigest);
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData("myregistry.io/image:v1.0")]
|
||||
[InlineData("image:latest")]
|
||||
[InlineData("registry.io/repo/image:tag")]
|
||||
[InlineData("")]
|
||||
[InlineData(null)]
|
||||
public async Task ResolveDigestAsync_ReturnsNull_ForTagReferences(string? imageRef)
|
||||
{
|
||||
// Arrange
|
||||
var resolver = new LocalImageDigestResolver();
|
||||
|
||||
// Act
|
||||
var result = await resolver.ResolveDigestAsync(imageRef!);
|
||||
|
||||
// Assert
|
||||
result.Should().BeNull();
|
||||
}
|
||||
}
|
||||
|
||||
public class SbomComponentProviderTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task NullSbomComponentProvider_ReturnsEmptyList()
|
||||
{
|
||||
// Arrange
|
||||
var provider = NullSbomComponentProvider.Instance;
|
||||
|
||||
// Act
|
||||
var purls = await provider.GetComponentPurlsAsync("sha256:test123");
|
||||
var hasSbom = await provider.HasSbomAsync("sha256:test123");
|
||||
|
||||
// Assert
|
||||
purls.Should().BeEmpty();
|
||||
hasSbom.Should().BeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task CachingSbomComponentProvider_CachesResults()
|
||||
{
|
||||
// Arrange
|
||||
var mockInner = new Mock<ISbomComponentProvider>();
|
||||
mockInner
|
||||
.Setup(x => x.GetComponentPurlsAsync("sha256:cached", It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new[] { "pkg:npm/lodash@4.17.21" });
|
||||
|
||||
var provider = new CachingSbomComponentProvider(mockInner.Object);
|
||||
|
||||
// Act
|
||||
var result1 = await provider.GetComponentPurlsAsync("sha256:cached");
|
||||
var result2 = await provider.GetComponentPurlsAsync("sha256:cached");
|
||||
|
||||
// Assert
|
||||
result1.Should().ContainSingle().Which.Should().Be("pkg:npm/lodash@4.17.21");
|
||||
result2.Should().ContainSingle().Which.Should().Be("pkg:npm/lodash@4.17.21");
|
||||
mockInner.Verify(
|
||||
x => x.GetComponentPurlsAsync("sha256:cached", It.IsAny<CancellationToken>()),
|
||||
Times.Once);
|
||||
}
|
||||
}
|
||||
|
||||
public class CachingImageDigestResolverTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task ResolveDigestAsync_CachesResults()
|
||||
{
|
||||
// Arrange
|
||||
var mockInner = new Mock<IImageDigestResolver>();
|
||||
mockInner
|
||||
.Setup(x => x.ResolveDigestAsync("test:v1", It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync("sha256:cached");
|
||||
|
||||
var resolver = new CachingImageDigestResolver(mockInner.Object);
|
||||
|
||||
// Act
|
||||
var result1 = await resolver.ResolveDigestAsync("test:v1");
|
||||
var result2 = await resolver.ResolveDigestAsync("test:v1");
|
||||
|
||||
// Assert
|
||||
result1.Should().Be("sha256:cached");
|
||||
result2.Should().Be("sha256:cached");
|
||||
mockInner.Verify(x => x.ResolveDigestAsync("test:v1", It.IsAny<CancellationToken>()), Times.Once);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ResolveDigestBatchAsync_UsesCacheForKnownRefs()
|
||||
{
|
||||
// Arrange
|
||||
var mockInner = new Mock<IImageDigestResolver>();
|
||||
mockInner
|
||||
.Setup(x => x.ResolveDigestAsync("known:v1", It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync("sha256:known");
|
||||
mockInner
|
||||
.Setup(x => x.ResolveDigestBatchAsync(It.IsAny<IEnumerable<string>>(), It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new Dictionary<string, string?> { ["unknown:v1"] = "sha256:unknown" });
|
||||
|
||||
var resolver = new CachingImageDigestResolver(mockInner.Object);
|
||||
|
||||
// Pre-cache one ref
|
||||
await resolver.ResolveDigestAsync("known:v1");
|
||||
|
||||
// Act
|
||||
var results = await resolver.ResolveDigestBatchAsync(new[] { "known:v1", "unknown:v1" });
|
||||
|
||||
// Assert
|
||||
results.Should().ContainKey("known:v1").WhoseValue.Should().Be("sha256:known");
|
||||
results.Should().ContainKey("unknown:v1").WhoseValue.Should().Be("sha256:unknown");
|
||||
|
||||
// Only "unknown:v1" should have been passed to batch resolve
|
||||
mockInner.Verify(
|
||||
x => x.ResolveDigestBatchAsync(
|
||||
It.Is<IEnumerable<string>>(refs => refs.Single() == "unknown:v1"),
|
||||
It.IsAny<CancellationToken>()),
|
||||
Times.Once);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,519 @@
|
||||
// <copyright file="RuntimeEvidenceNdjsonWriterTests.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Tests.Output;
|
||||
|
||||
using System.IO.Compression;
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using StellaOps.Signals.Ebpf.Output;
|
||||
using StellaOps.Signals.Ebpf.Schema;
|
||||
using Xunit;
|
||||
|
||||
public class RuntimeEvidenceNdjsonWriterTests : IAsyncLifetime
|
||||
{
|
||||
private readonly string _outputDirectory;
|
||||
private RuntimeEvidenceNdjsonWriter _writer = null!;
|
||||
|
||||
public RuntimeEvidenceNdjsonWriterTests()
|
||||
{
|
||||
_outputDirectory = Path.Combine(Path.GetTempPath(), $"ndjson_test_{Guid.NewGuid():N}");
|
||||
}
|
||||
|
||||
public ValueTask InitializeAsync()
|
||||
{
|
||||
Directory.CreateDirectory(_outputDirectory);
|
||||
_writer = new RuntimeEvidenceNdjsonWriter(
|
||||
NullLogger<RuntimeEvidenceNdjsonWriter>.Instance,
|
||||
_outputDirectory,
|
||||
new NdjsonWriterOptions
|
||||
{
|
||||
MaxChunkSizeBytes = 1024 * 1024, // 1MB for testing
|
||||
MaxChunkDuration = TimeSpan.FromHours(1),
|
||||
});
|
||||
return ValueTask.CompletedTask;
|
||||
}
|
||||
|
||||
public async ValueTask DisposeAsync()
|
||||
{
|
||||
await _writer.DisposeAsync();
|
||||
if (Directory.Exists(_outputDirectory))
|
||||
{
|
||||
Directory.Delete(_outputDirectory, recursive: true);
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WriteAsync_SingleEvent_CreatesNdjsonFile()
|
||||
{
|
||||
// Arrange
|
||||
var record = CreateFileOpenRecord();
|
||||
|
||||
// Act
|
||||
await _writer.WriteAsync(record);
|
||||
await _writer.FlushAsync();
|
||||
await _writer.RotateAsync(); // Close the file to allow reading
|
||||
|
||||
// Assert
|
||||
var files = Directory.GetFiles(_outputDirectory, "*.ndjson");
|
||||
files.Should().HaveCount(1);
|
||||
|
||||
var content = await File.ReadAllTextAsync(files[0]);
|
||||
content.Should().NotBeEmpty();
|
||||
|
||||
// Verify it's valid JSON
|
||||
var parsed = JsonDocument.Parse(content);
|
||||
parsed.RootElement.GetProperty("ts_ns").GetUInt64().Should().Be(record.TimestampNs);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WriteAsync_MultipleEvents_AllWrittenInOrder()
|
||||
{
|
||||
// Arrange
|
||||
var records = new[]
|
||||
{
|
||||
CreateFileOpenRecord(1000000UL),
|
||||
CreateFileOpenRecord(2000000UL),
|
||||
CreateFileOpenRecord(3000000UL),
|
||||
};
|
||||
|
||||
// Act
|
||||
foreach (var record in records)
|
||||
{
|
||||
await _writer.WriteAsync(record);
|
||||
}
|
||||
await _writer.FlushAsync();
|
||||
await _writer.RotateAsync(); // Close the file to allow reading
|
||||
|
||||
// Assert
|
||||
var files = Directory.GetFiles(_outputDirectory, "*.ndjson");
|
||||
var lines = (await File.ReadAllLinesAsync(files[0]))
|
||||
.Where(l => !string.IsNullOrWhiteSpace(l))
|
||||
.ToArray();
|
||||
|
||||
lines.Should().HaveCount(3);
|
||||
|
||||
for (int i = 0; i < records.Length; i++)
|
||||
{
|
||||
var parsed = JsonDocument.Parse(lines[i]);
|
||||
parsed.RootElement.GetProperty("ts_ns").GetUInt64().Should().Be(records[i].TimestampNs);
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WriteAsync_DeterministicOutput_SameInputProducesSameOutput()
|
||||
{
|
||||
// Arrange
|
||||
var record1 = new RuntimeEvidenceRecord
|
||||
{
|
||||
TimestampNs = 1000000UL,
|
||||
Source = "sys_enter_openat",
|
||||
Pid = 1234,
|
||||
Tid = 1234,
|
||||
CgroupId = 5678UL,
|
||||
Comm = "test",
|
||||
Event = new FileOpenEvent
|
||||
{
|
||||
Path = "/etc/passwd",
|
||||
Flags = 0,
|
||||
},
|
||||
};
|
||||
|
||||
var record2 = new RuntimeEvidenceRecord
|
||||
{
|
||||
TimestampNs = 1000000UL,
|
||||
Source = "sys_enter_openat",
|
||||
Pid = 1234,
|
||||
Tid = 1234,
|
||||
CgroupId = 5678UL,
|
||||
Comm = "test",
|
||||
Event = new FileOpenEvent
|
||||
{
|
||||
Path = "/etc/passwd",
|
||||
Flags = 0,
|
||||
},
|
||||
};
|
||||
|
||||
// Act - write to two separate writers
|
||||
await _writer.WriteAsync(record1);
|
||||
await _writer.FlushAsync();
|
||||
await _writer.RotateAsync(); // Close the file to allow reading
|
||||
|
||||
var dir2 = Path.Combine(Path.GetTempPath(), $"ndjson_test2_{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(dir2);
|
||||
try
|
||||
{
|
||||
await using var writer2 = new RuntimeEvidenceNdjsonWriter(
|
||||
NullLogger<RuntimeEvidenceNdjsonWriter>.Instance,
|
||||
dir2);
|
||||
await writer2.WriteAsync(record2);
|
||||
await writer2.FlushAsync();
|
||||
await writer2.RotateAsync(); // Close the file to allow reading
|
||||
|
||||
// Assert
|
||||
var file1 = Directory.GetFiles(_outputDirectory, "*.ndjson")[0];
|
||||
var file2 = Directory.GetFiles(dir2, "*.ndjson")[0];
|
||||
|
||||
var content1 = await File.ReadAllTextAsync(file1);
|
||||
var content2 = await File.ReadAllTextAsync(file2);
|
||||
|
||||
// The JSON content should be identical
|
||||
var lines1 = content1.Split('\n', StringSplitOptions.RemoveEmptyEntries);
|
||||
var lines2 = content2.Split('\n', StringSplitOptions.RemoveEmptyEntries);
|
||||
|
||||
lines1[0].Should().Be(lines2[0], "Deterministic serialization should produce identical output");
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (Directory.Exists(dir2))
|
||||
{
|
||||
Directory.Delete(dir2, recursive: true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WriteAsync_JsonFieldsAreSorted_ForDeterminism()
|
||||
{
|
||||
// Arrange
|
||||
var record = CreateFileOpenRecord();
|
||||
|
||||
// Act
|
||||
await _writer.WriteAsync(record);
|
||||
await _writer.FlushAsync();
|
||||
await _writer.RotateAsync(); // Close the file to allow reading
|
||||
|
||||
// Assert
|
||||
var file = Directory.GetFiles(_outputDirectory, "*.ndjson")[0];
|
||||
var content = await File.ReadAllTextAsync(file);
|
||||
var line = content.Split('\n')[0];
|
||||
|
||||
// Note: System.Text.Json with SnakeCaseLower doesn't guarantee sorting,
|
||||
// but the fields should be consistent. Check key fields are present.
|
||||
line.Should().Contain("\"ts_ns\":");
|
||||
line.Should().Contain("\"src\":");
|
||||
line.Should().Contain("\"pid\":");
|
||||
line.Should().Contain("\"comm\":");
|
||||
line.Should().Contain("\"event\":");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WriteAsync_NullFieldsAreOmitted()
|
||||
{
|
||||
// Arrange
|
||||
var record = new RuntimeEvidenceRecord
|
||||
{
|
||||
TimestampNs = 1000000UL,
|
||||
Source = "sys_enter_openat",
|
||||
Pid = 1234,
|
||||
Comm = "test",
|
||||
ContainerId = null, // Should be omitted
|
||||
ImageDigest = null, // Should be omitted
|
||||
Event = new FileOpenEvent
|
||||
{
|
||||
Path = "/etc/passwd",
|
||||
Flags = 0,
|
||||
},
|
||||
};
|
||||
|
||||
// Act
|
||||
await _writer.WriteAsync(record);
|
||||
await _writer.FlushAsync();
|
||||
await _writer.RotateAsync(); // Close the file to allow reading
|
||||
|
||||
// Assert
|
||||
var file = Directory.GetFiles(_outputDirectory, "*.ndjson")[0];
|
||||
var content = await File.ReadAllTextAsync(file);
|
||||
var line = content.Split('\n')[0];
|
||||
|
||||
line.Should().NotContain("\"container_id\":");
|
||||
line.Should().NotContain("\"image_digest\":");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WriteBatchAsync_WritesAllRecords()
|
||||
{
|
||||
// Arrange
|
||||
var records = Enumerable.Range(1, 100)
|
||||
.Select(i => CreateFileOpenRecord((ulong)i * 1000))
|
||||
.ToList();
|
||||
|
||||
// Act
|
||||
await _writer.WriteBatchAsync(records);
|
||||
await _writer.FlushAsync();
|
||||
await _writer.RotateAsync(); // Close the file to allow reading
|
||||
|
||||
// Assert
|
||||
var file = Directory.GetFiles(_outputDirectory, "*.ndjson")[0];
|
||||
var lines = (await File.ReadAllLinesAsync(file))
|
||||
.Where(l => !string.IsNullOrWhiteSpace(l))
|
||||
.ToArray();
|
||||
|
||||
lines.Should().HaveCount(100);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task RotateAsync_CreatesNewChunk()
|
||||
{
|
||||
// Arrange
|
||||
await _writer.WriteAsync(CreateFileOpenRecord(1000000UL));
|
||||
await _writer.FlushAsync();
|
||||
|
||||
var initialFiles = Directory.GetFiles(_outputDirectory, "*.ndjson").Length;
|
||||
|
||||
// Act
|
||||
await _writer.RotateAsync();
|
||||
await _writer.WriteAsync(CreateFileOpenRecord(2000000UL));
|
||||
await _writer.FlushAsync();
|
||||
|
||||
// Assert
|
||||
var finalFiles = Directory.GetFiles(_outputDirectory, "*.ndjson").Length;
|
||||
finalFiles.Should().BeGreaterThan(initialFiles);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ChunkRotated_EventFired_WithCorrectStatistics()
|
||||
{
|
||||
// Arrange
|
||||
ChunkRotatedEventArgs? capturedArgs = null;
|
||||
_writer.ChunkRotated += (args, ct) =>
|
||||
{
|
||||
capturedArgs = args;
|
||||
return Task.CompletedTask;
|
||||
};
|
||||
|
||||
// Write some events
|
||||
for (int i = 0; i < 10; i++)
|
||||
{
|
||||
await _writer.WriteAsync(CreateFileOpenRecord((ulong)i * 1000));
|
||||
}
|
||||
|
||||
// Act
|
||||
await _writer.RotateAsync();
|
||||
|
||||
// Assert
|
||||
capturedArgs.Should().NotBeNull();
|
||||
capturedArgs!.Statistics.EventCount.Should().Be(10);
|
||||
capturedArgs.Statistics.Size.Should().BeGreaterThan(0);
|
||||
capturedArgs.Statistics.FilePath.Should().NotBeNullOrEmpty();
|
||||
capturedArgs.Statistics.ContentHash.Should().StartWith("sha256:");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetCurrentChunkStats_ReturnsCorrectInfo()
|
||||
{
|
||||
// Arrange
|
||||
await _writer.WriteAsync(CreateFileOpenRecord(1000000UL));
|
||||
await _writer.WriteAsync(CreateFileOpenRecord(2000000UL));
|
||||
|
||||
// Act
|
||||
var stats = _writer.GetCurrentChunkStats();
|
||||
|
||||
// Assert
|
||||
stats.EventCount.Should().Be(2);
|
||||
stats.Size.Should().BeGreaterThan(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WriteAsync_GzipCompression_CreatesCompressedFile()
|
||||
{
|
||||
// Arrange
|
||||
var compressedDir = Path.Combine(Path.GetTempPath(), $"ndjson_gz_{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(compressedDir);
|
||||
|
||||
try
|
||||
{
|
||||
await using var compressedWriter = new RuntimeEvidenceNdjsonWriter(
|
||||
NullLogger<RuntimeEvidenceNdjsonWriter>.Instance,
|
||||
compressedDir,
|
||||
new NdjsonWriterOptions { UseGzipCompression = true });
|
||||
|
||||
// Act
|
||||
await compressedWriter.WriteAsync(CreateFileOpenRecord());
|
||||
await compressedWriter.FlushAsync();
|
||||
await compressedWriter.RotateAsync();
|
||||
|
||||
// Assert
|
||||
var gzFiles = Directory.GetFiles(compressedDir, "*.ndjson.gz");
|
||||
gzFiles.Should().HaveCount(1);
|
||||
|
||||
// Verify it's valid gzip
|
||||
await using var fileStream = File.OpenRead(gzFiles[0]);
|
||||
await using var gzipStream = new GZipStream(fileStream, CompressionMode.Decompress);
|
||||
using var reader = new StreamReader(gzipStream);
|
||||
var content = await reader.ReadToEndAsync();
|
||||
content.Should().Contain("sys_enter_openat");
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (Directory.Exists(compressedDir))
|
||||
{
|
||||
Directory.Delete(compressedDir, recursive: true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WriteAsync_AllEventTypes_SerializeCorrectly()
|
||||
{
|
||||
// Arrange
|
||||
var records = new RuntimeEvidenceRecord[]
|
||||
{
|
||||
CreateFileOpenRecord(),
|
||||
CreateProcessExecRecord(),
|
||||
CreateTcpStateRecord(),
|
||||
CreateNetConnectRecord(),
|
||||
CreateSslOpRecord(),
|
||||
CreateFunctionCallRecord(),
|
||||
};
|
||||
|
||||
// Act
|
||||
foreach (var record in records)
|
||||
{
|
||||
await _writer.WriteAsync(record);
|
||||
}
|
||||
await _writer.FlushAsync();
|
||||
await _writer.RotateAsync(); // Close the file to allow reading
|
||||
|
||||
// Assert
|
||||
var file = Directory.GetFiles(_outputDirectory, "*.ndjson")[0];
|
||||
var lines = (await File.ReadAllLinesAsync(file))
|
||||
.Where(l => !string.IsNullOrWhiteSpace(l))
|
||||
.ToArray();
|
||||
|
||||
lines.Should().HaveCount(6);
|
||||
|
||||
// Each line should parse and have correct type discriminator
|
||||
var expectedTypes = new[] { "file_open", "process_exec", "tcp_state", "net_connect", "ssl_op", "function_call" };
|
||||
for (int i = 0; i < lines.Length; i++)
|
||||
{
|
||||
var doc = JsonDocument.Parse(lines[i]);
|
||||
var eventType = doc.RootElement.GetProperty("event").GetProperty("type").GetString();
|
||||
eventType.Should().Be(expectedTypes[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#region Record Factories
|
||||
|
||||
private static RuntimeEvidenceRecord CreateFileOpenRecord(ulong timestamp = 1000000UL)
|
||||
{
|
||||
return new RuntimeEvidenceRecord
|
||||
{
|
||||
TimestampNs = timestamp,
|
||||
Source = "sys_enter_openat",
|
||||
Pid = 1234,
|
||||
Tid = 1234,
|
||||
CgroupId = 5678UL,
|
||||
Comm = "test",
|
||||
Event = new FileOpenEvent
|
||||
{
|
||||
Path = "/etc/passwd",
|
||||
Flags = 0,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
private static RuntimeEvidenceRecord CreateProcessExecRecord()
|
||||
{
|
||||
return new RuntimeEvidenceRecord
|
||||
{
|
||||
TimestampNs = 2000000UL,
|
||||
Source = "sched_process_exec",
|
||||
Pid = 1235,
|
||||
Tid = 1235,
|
||||
CgroupId = 5678UL,
|
||||
Comm = "bash",
|
||||
Event = new ProcessExecEvent
|
||||
{
|
||||
Filename = "/usr/bin/python3",
|
||||
Ppid = 1234,
|
||||
Argv0 = "python3",
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
private static RuntimeEvidenceRecord CreateTcpStateRecord()
|
||||
{
|
||||
return new RuntimeEvidenceRecord
|
||||
{
|
||||
TimestampNs = 3000000UL,
|
||||
Source = "inet_sock_set_state",
|
||||
Pid = 1236,
|
||||
Tid = 1236,
|
||||
CgroupId = 5678UL,
|
||||
Comm = "nginx",
|
||||
Event = new TcpStateEvent
|
||||
{
|
||||
OldState = "SYN_SENT",
|
||||
NewState = "ESTABLISHED",
|
||||
DestAddress = "93.184.216.34",
|
||||
DestPort = 443,
|
||||
Family = "inet",
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
private static RuntimeEvidenceRecord CreateNetConnectRecord()
|
||||
{
|
||||
return new RuntimeEvidenceRecord
|
||||
{
|
||||
TimestampNs = 4000000UL,
|
||||
Source = "uprobe:connect",
|
||||
Pid = 1237,
|
||||
Tid = 1237,
|
||||
CgroupId = 5678UL,
|
||||
Comm = "curl",
|
||||
Event = new NetConnectEvent
|
||||
{
|
||||
Address = "93.184.216.34",
|
||||
Port = 443,
|
||||
Success = true,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
private static RuntimeEvidenceRecord CreateSslOpRecord()
|
||||
{
|
||||
return new RuntimeEvidenceRecord
|
||||
{
|
||||
TimestampNs = 5000000UL,
|
||||
Source = "uprobe:SSL_write",
|
||||
Pid = 1238,
|
||||
Tid = 1238,
|
||||
CgroupId = 5678UL,
|
||||
Comm = "curl",
|
||||
Event = new SslOpEvent
|
||||
{
|
||||
Operation = "write",
|
||||
Bytes = 1024,
|
||||
SslPtr = "0x7f1234560000",
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
private static RuntimeEvidenceRecord CreateFunctionCallRecord()
|
||||
{
|
||||
return new RuntimeEvidenceRecord
|
||||
{
|
||||
TimestampNs = 6000000UL,
|
||||
Source = "uprobe:function_entry",
|
||||
Pid = 1239,
|
||||
Tid = 1239,
|
||||
CgroupId = 5678UL,
|
||||
Comm = "myapp",
|
||||
Event = new FunctionCallEvent
|
||||
{
|
||||
Address = "0x7f1234567890",
|
||||
Symbol = "my_function",
|
||||
Library = "/usr/lib/libmyapp.so",
|
||||
Runtime = "native",
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
@@ -0,0 +1,393 @@
|
||||
// <copyright file="EventParserTests.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Tests.Parsers;
|
||||
|
||||
using System.Buffers.Binary;
|
||||
using System.Text;
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Moq;
|
||||
using StellaOps.Signals.Ebpf.Parsers;
|
||||
using StellaOps.Signals.Ebpf.Schema;
|
||||
using StellaOps.Signals.Ebpf.Symbols;
|
||||
using Xunit;
|
||||
|
||||
public class EventParserTests
|
||||
{
|
||||
private readonly Mock<ISymbolResolver> _mockSymbolResolver;
|
||||
private readonly EventParser _parser;
|
||||
|
||||
public EventParserTests()
|
||||
{
|
||||
_mockSymbolResolver = new Mock<ISymbolResolver>();
|
||||
_mockSymbolResolver
|
||||
.Setup(x => x.Resolve(It.IsAny<int>(), It.IsAny<ulong>()))
|
||||
.Returns((null, null, null));
|
||||
|
||||
_parser = new EventParser(
|
||||
NullLogger<EventParser>.Instance,
|
||||
_mockSymbolResolver.Object);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Parse_FileOpenEvent_ReturnsCorrectRecord()
|
||||
{
|
||||
// Arrange
|
||||
var timestamp = 1737890000123456789UL;
|
||||
var pid = 2311U;
|
||||
var tid = 2311U;
|
||||
var cgroupId = 12345UL;
|
||||
var comm = "nginx";
|
||||
var filename = "/etc/ssl/certs/ca-bundle.crt";
|
||||
var flags = 0; // O_RDONLY
|
||||
var dfd = -100; // AT_FDCWD
|
||||
|
||||
var eventData = BuildFileOpenEvent(timestamp, pid, tid, cgroupId, comm, dfd, flags, 0, filename);
|
||||
|
||||
// Act
|
||||
var result = _parser.Parse(eventData);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result!.TimestampNs.Should().Be(timestamp);
|
||||
result.Source.Should().Be("sys_enter_openat");
|
||||
result.Pid.Should().Be((int)pid);
|
||||
result.Tid.Should().Be((int)tid);
|
||||
result.CgroupId.Should().Be(cgroupId);
|
||||
result.Comm.Should().Be(comm);
|
||||
|
||||
result.Event.Should().BeOfType<FileOpenEvent>();
|
||||
var fileEvent = (FileOpenEvent)result.Event;
|
||||
fileEvent.Path.Should().Be(filename);
|
||||
fileEvent.Flags.Should().Be(flags);
|
||||
fileEvent.Access.Should().Be("read");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Parse_ProcessExecEvent_ReturnsCorrectRecord()
|
||||
{
|
||||
// Arrange
|
||||
var timestamp = 1737890001123456789UL;
|
||||
var pid = 2312U;
|
||||
var tid = 2312U;
|
||||
var cgroupId = 12345UL;
|
||||
var comm = "bash";
|
||||
var filename = "/usr/bin/python3";
|
||||
var ppid = 2311U;
|
||||
var argv0 = "python3";
|
||||
|
||||
var eventData = BuildProcessExecEvent(timestamp, pid, tid, cgroupId, comm, ppid, filename, argv0);
|
||||
|
||||
// Act
|
||||
var result = _parser.Parse(eventData);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result!.Source.Should().Be("sched_process_exec");
|
||||
result.Pid.Should().Be((int)pid);
|
||||
result.Comm.Should().Be(comm);
|
||||
|
||||
result.Event.Should().BeOfType<ProcessExecEvent>();
|
||||
var execEvent = (ProcessExecEvent)result.Event;
|
||||
execEvent.Filename.Should().Be(filename);
|
||||
execEvent.Ppid.Should().Be((int)ppid);
|
||||
execEvent.Argv0.Should().Be(argv0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Parse_TcpStateEvent_IPv4_ReturnsCorrectRecord()
|
||||
{
|
||||
// Arrange
|
||||
var timestamp = 1737890002123456789UL;
|
||||
var pid = 2313U;
|
||||
var tid = 2315U;
|
||||
var cgroupId = 12345UL;
|
||||
var comm = "nginx";
|
||||
byte oldState = 2; // SYN_SENT
|
||||
byte newState = 1; // ESTABLISHED
|
||||
ushort sport = 54321;
|
||||
ushort dport = 443;
|
||||
var daddr = new byte[] { 93, 184, 216, 34 }; // 93.184.216.34
|
||||
|
||||
var eventData = BuildTcpStateEvent(timestamp, pid, tid, cgroupId, comm, oldState, newState, 2, sport, dport, daddr);
|
||||
|
||||
// Act
|
||||
var result = _parser.Parse(eventData);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result!.Source.Should().Be("inet_sock_set_state");
|
||||
|
||||
result.Event.Should().BeOfType<TcpStateEvent>();
|
||||
var tcpEvent = (TcpStateEvent)result.Event;
|
||||
tcpEvent.OldState.Should().Be("SYN_SENT");
|
||||
tcpEvent.NewState.Should().Be("ESTABLISHED");
|
||||
tcpEvent.DestPort.Should().Be(dport);
|
||||
tcpEvent.DestAddress.Should().Be("93.184.216.34");
|
||||
tcpEvent.Family.Should().Be("inet");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Parse_SslOpEvent_ReturnsCorrectRecord()
|
||||
{
|
||||
// Arrange
|
||||
var timestamp = 1737890003123456789UL;
|
||||
var pid = 2314U;
|
||||
var tid = 2316U;
|
||||
var cgroupId = 12345UL;
|
||||
var comm = "nginx";
|
||||
var sslPtr = 0x7f1234560000UL;
|
||||
var bytes = 2048U;
|
||||
byte operation = 1; // write
|
||||
|
||||
var eventData = BuildSslOpEvent(timestamp, pid, tid, cgroupId, comm, sslPtr, bytes, operation);
|
||||
|
||||
// Act
|
||||
var result = _parser.Parse(eventData);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result!.Source.Should().Be("uprobe:SSL_write");
|
||||
|
||||
result.Event.Should().BeOfType<SslOpEvent>();
|
||||
var sslEvent = (SslOpEvent)result.Event;
|
||||
sslEvent.Operation.Should().Be("write");
|
||||
sslEvent.Bytes.Should().Be((int)bytes);
|
||||
sslEvent.SslPtr.Should().Be("0x7F1234560000");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Parse_FunctionCallEvent_WithSymbolResolution_ReturnsCorrectRecord()
|
||||
{
|
||||
// Arrange
|
||||
var timestamp = 1737890004123456789UL;
|
||||
var pid = 2315U;
|
||||
var tid = 2317U;
|
||||
var cgroupId = 12345UL;
|
||||
var comm = "myapp";
|
||||
var funcAddr = 0x7f1234567890UL;
|
||||
|
||||
_mockSymbolResolver
|
||||
.Setup(x => x.Resolve((int)pid, funcAddr))
|
||||
.Returns(("my_function", "/usr/lib/libmyapp.so", null));
|
||||
|
||||
var eventData = BuildFunctionCallEvent(timestamp, pid, tid, cgroupId, comm, funcAddr, 0, null, 0);
|
||||
|
||||
// Act
|
||||
var result = _parser.Parse(eventData);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result!.Source.Should().Be("uprobe:function_entry");
|
||||
|
||||
result.Event.Should().BeOfType<FunctionCallEvent>();
|
||||
var funcEvent = (FunctionCallEvent)result.Event;
|
||||
funcEvent.Address.Should().Be("0x7F1234567890");
|
||||
funcEvent.Symbol.Should().Be("my_function");
|
||||
funcEvent.Library.Should().Be("/usr/lib/libmyapp.so");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Parse_EventTooSmall_ReturnsNull()
|
||||
{
|
||||
// Arrange - less than minimum event size (40 bytes)
|
||||
var tooSmall = new byte[20];
|
||||
|
||||
// Act
|
||||
var result = _parser.Parse(tooSmall);
|
||||
|
||||
// Assert
|
||||
result.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Parse_UnknownEventType_ReturnsNull()
|
||||
{
|
||||
// Arrange - unknown event type (99)
|
||||
var eventData = new byte[64];
|
||||
eventData[24] = 99; // Unknown event type
|
||||
|
||||
// Act
|
||||
var result = _parser.Parse(eventData);
|
||||
|
||||
// Assert
|
||||
result.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Parse_FileOpenEvent_WritableFlags_ReturnsWriteAccess()
|
||||
{
|
||||
// Arrange
|
||||
var eventData = BuildFileOpenEvent(
|
||||
1000000UL, 1000U, 1000U, 1UL, "test", -100, 1, 0, "/tmp/test.txt"); // O_WRONLY = 1
|
||||
|
||||
// Act
|
||||
var result = _parser.Parse(eventData);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
var fileEvent = (FileOpenEvent)result!.Event;
|
||||
fileEvent.Access.Should().Be("write");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Parse_FileOpenEvent_ReadWriteFlags_ReturnsReadWriteAccess()
|
||||
{
|
||||
// Arrange
|
||||
var eventData = BuildFileOpenEvent(
|
||||
1000000UL, 1000U, 1000U, 1UL, "test", -100, 2, 0, "/tmp/test.txt"); // O_RDWR = 2
|
||||
|
||||
// Act
|
||||
var result = _parser.Parse(eventData);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
var fileEvent = (FileOpenEvent)result!.Event;
|
||||
fileEvent.Access.Should().Be("read_write");
|
||||
}
|
||||
|
||||
#region Event Builders
|
||||
|
||||
private static byte[] BuildFileOpenEvent(
|
||||
ulong timestamp, uint pid, uint tid, ulong cgroupId, string comm,
|
||||
int dfd, int flags, ushort mode, string filename)
|
||||
{
|
||||
const int HeaderSize = 48;
|
||||
const int FilenameOffset = HeaderSize + 8; // Must match parser: HeaderSize + 8
|
||||
const int MaxFilenameLen = 256;
|
||||
var buffer = new byte[FilenameOffset + MaxFilenameLen];
|
||||
|
||||
WriteHeader(buffer, timestamp, pid, tid, cgroupId, EbpfEventType.FileOpen, comm);
|
||||
|
||||
// File open specific fields
|
||||
BinaryPrimitives.WriteInt32LittleEndian(buffer.AsSpan(HeaderSize), dfd);
|
||||
BinaryPrimitives.WriteInt32LittleEndian(buffer.AsSpan(HeaderSize + 4), flags);
|
||||
// Note: mode at HeaderSize + 8 overlaps with filename in current parser
|
||||
|
||||
// Filename at offset HeaderSize + 8 (matches parser's FilenameOffset)
|
||||
var filenameBytes = Encoding.UTF8.GetBytes(filename);
|
||||
Array.Copy(filenameBytes, 0, buffer, FilenameOffset, Math.Min(filenameBytes.Length, MaxFilenameLen - 1));
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
private static byte[] BuildProcessExecEvent(
|
||||
ulong timestamp, uint pid, uint tid, ulong cgroupId, string comm,
|
||||
uint ppid, string filename, string? argv0)
|
||||
{
|
||||
const int HeaderSize = 48;
|
||||
const int MaxFilenameLen = 256;
|
||||
const int MaxArgv0Len = 128;
|
||||
var buffer = new byte[HeaderSize + 8 + MaxFilenameLen + MaxArgv0Len]; // header + ppid(4) + reserved(4) + filename + argv0
|
||||
|
||||
WriteHeader(buffer, timestamp, pid, tid, cgroupId, EbpfEventType.ProcessExec, comm);
|
||||
|
||||
BinaryPrimitives.WriteUInt32LittleEndian(buffer.AsSpan(HeaderSize), ppid);
|
||||
|
||||
var filenameBytes = Encoding.UTF8.GetBytes(filename);
|
||||
Array.Copy(filenameBytes, 0, buffer, HeaderSize + 8, Math.Min(filenameBytes.Length, MaxFilenameLen - 1));
|
||||
|
||||
if (argv0 != null)
|
||||
{
|
||||
var argv0Bytes = Encoding.UTF8.GetBytes(argv0);
|
||||
Array.Copy(argv0Bytes, 0, buffer, HeaderSize + 8 + MaxFilenameLen, Math.Min(argv0Bytes.Length, MaxArgv0Len - 1));
|
||||
}
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
private static byte[] BuildTcpStateEvent(
|
||||
ulong timestamp, uint pid, uint tid, ulong cgroupId, string comm,
|
||||
byte oldState, byte newState, byte family, ushort sport, ushort dport, byte[] daddr)
|
||||
{
|
||||
const int HeaderSize = 48;
|
||||
var buffer = new byte[HeaderSize + 48]; // header + tcp state fields
|
||||
|
||||
WriteHeader(buffer, timestamp, pid, tid, cgroupId, EbpfEventType.TcpState, comm);
|
||||
|
||||
buffer[HeaderSize] = oldState;
|
||||
buffer[HeaderSize + 1] = newState;
|
||||
buffer[HeaderSize + 2] = family;
|
||||
BinaryPrimitives.WriteUInt16LittleEndian(buffer.AsSpan(HeaderSize + 4), sport);
|
||||
BinaryPrimitives.WriteUInt16LittleEndian(buffer.AsSpan(HeaderSize + 6), dport);
|
||||
|
||||
if (family == 2) // AF_INET
|
||||
{
|
||||
// saddr at +8, daddr at +12
|
||||
buffer[HeaderSize + 12] = daddr[0];
|
||||
buffer[HeaderSize + 13] = daddr[1];
|
||||
buffer[HeaderSize + 14] = daddr[2];
|
||||
buffer[HeaderSize + 15] = daddr[3];
|
||||
}
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
private static byte[] BuildSslOpEvent(
|
||||
ulong timestamp, uint pid, uint tid, ulong cgroupId, string comm,
|
||||
ulong sslPtr, uint bytes, byte operation)
|
||||
{
|
||||
const int HeaderSize = 48;
|
||||
var buffer = new byte[HeaderSize + 24];
|
||||
|
||||
WriteHeader(buffer, timestamp, pid, tid, cgroupId, EbpfEventType.SslOp, comm);
|
||||
|
||||
BinaryPrimitives.WriteUInt64LittleEndian(buffer.AsSpan(HeaderSize), sslPtr);
|
||||
BinaryPrimitives.WriteUInt32LittleEndian(buffer.AsSpan(HeaderSize + 8), bytes); // requested
|
||||
BinaryPrimitives.WriteUInt32LittleEndian(buffer.AsSpan(HeaderSize + 12), bytes); // actual
|
||||
buffer[HeaderSize + 16] = operation;
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
private static byte[] BuildFunctionCallEvent(
|
||||
ulong timestamp, uint pid, uint tid, ulong cgroupId, string comm,
|
||||
ulong funcAddr, ulong returnAddr, ulong[]? stack, byte runtimeType)
|
||||
{
|
||||
const int HeaderSize = 48;
|
||||
const int MaxStackDepth = 16;
|
||||
var buffer = new byte[HeaderSize + 16 + MaxStackDepth * 8 + 8];
|
||||
|
||||
WriteHeader(buffer, timestamp, pid, tid, cgroupId, EbpfEventType.FunctionCall, comm);
|
||||
|
||||
BinaryPrimitives.WriteUInt64LittleEndian(buffer.AsSpan(HeaderSize), funcAddr);
|
||||
BinaryPrimitives.WriteUInt64LittleEndian(buffer.AsSpan(HeaderSize + 8), returnAddr);
|
||||
|
||||
// Stack trace
|
||||
var stackOffset = HeaderSize + 16;
|
||||
var stackDepth = (byte)(stack?.Length ?? 0);
|
||||
if (stack != null)
|
||||
{
|
||||
for (int i = 0; i < Math.Min(stack.Length, MaxStackDepth); i++)
|
||||
{
|
||||
BinaryPrimitives.WriteUInt64LittleEndian(buffer.AsSpan(stackOffset + i * 8), stack[i]);
|
||||
}
|
||||
}
|
||||
|
||||
var metaOffset = stackOffset + MaxStackDepth * 8;
|
||||
buffer[metaOffset] = stackDepth;
|
||||
buffer[metaOffset + 1] = runtimeType;
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
private static void WriteHeader(
|
||||
byte[] buffer, ulong timestamp, uint pid, uint tid, ulong cgroupId,
|
||||
EbpfEventType eventType, string comm)
|
||||
{
|
||||
BinaryPrimitives.WriteUInt64LittleEndian(buffer.AsSpan(0), timestamp);
|
||||
BinaryPrimitives.WriteUInt32LittleEndian(buffer.AsSpan(8), pid);
|
||||
BinaryPrimitives.WriteUInt32LittleEndian(buffer.AsSpan(12), tid);
|
||||
BinaryPrimitives.WriteUInt64LittleEndian(buffer.AsSpan(16), cgroupId);
|
||||
buffer[24] = (byte)eventType;
|
||||
// Reserved bytes 25-31
|
||||
|
||||
// comm at offset 32, max 16 bytes
|
||||
var commBytes = Encoding.UTF8.GetBytes(comm);
|
||||
Array.Copy(commBytes, 0, buffer, 32, Math.Min(commBytes.Length, 16));
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
@@ -0,0 +1,432 @@
|
||||
// <copyright file="RuntimeEvidenceCollectorTests.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Tests.Services;
|
||||
|
||||
using System.Runtime.CompilerServices;
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Caching.Memory;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Moq;
|
||||
using StellaOps.Signals.Ebpf.Cgroup;
|
||||
using StellaOps.Signals.Ebpf.Output;
|
||||
using StellaOps.Signals.Ebpf.Parsers;
|
||||
using StellaOps.Signals.Ebpf.Probes;
|
||||
using StellaOps.Signals.Ebpf.Schema;
|
||||
using StellaOps.Signals.Ebpf.Services;
|
||||
using StellaOps.Signals.Ebpf.Symbols;
|
||||
using Xunit;
|
||||
|
||||
public class RuntimeEvidenceCollectorTests : IAsyncLifetime
|
||||
{
|
||||
private readonly string _outputDir;
|
||||
private readonly string _procDir;
|
||||
private readonly Mock<IEbpfProbeLoader> _mockProbeLoader;
|
||||
private readonly Mock<ISymbolResolver> _mockSymbolResolver;
|
||||
private readonly EventParser _eventParser;
|
||||
private readonly CgroupContainerResolver _cgroupResolver;
|
||||
private RuntimeEvidenceNdjsonWriter _writer = null!;
|
||||
private RuntimeEvidenceCollector _collector = null!;
|
||||
|
||||
public RuntimeEvidenceCollectorTests()
|
||||
{
|
||||
_outputDir = Path.Combine(Path.GetTempPath(), $"evidence_test_{Guid.NewGuid():N}");
|
||||
_procDir = Path.Combine(Path.GetTempPath(), $"proc_test_{Guid.NewGuid():N}");
|
||||
|
||||
_mockProbeLoader = new Mock<IEbpfProbeLoader>();
|
||||
_mockSymbolResolver = new Mock<ISymbolResolver>();
|
||||
_mockSymbolResolver
|
||||
.Setup(x => x.Resolve(It.IsAny<int>(), It.IsAny<ulong>()))
|
||||
.Returns((null, null, null));
|
||||
|
||||
_eventParser = new EventParser(
|
||||
NullLogger<EventParser>.Instance,
|
||||
_mockSymbolResolver.Object);
|
||||
_cgroupResolver = new CgroupContainerResolver(
|
||||
NullLogger<CgroupContainerResolver>.Instance,
|
||||
_procDir);
|
||||
}
|
||||
|
||||
public ValueTask InitializeAsync()
|
||||
{
|
||||
Directory.CreateDirectory(_outputDir);
|
||||
Directory.CreateDirectory(_procDir);
|
||||
|
||||
_writer = new RuntimeEvidenceNdjsonWriter(
|
||||
NullLogger<RuntimeEvidenceNdjsonWriter>.Instance,
|
||||
_outputDir);
|
||||
|
||||
_collector = new RuntimeEvidenceCollector(
|
||||
NullLogger<RuntimeEvidenceCollector>.Instance,
|
||||
_mockProbeLoader.Object,
|
||||
_eventParser,
|
||||
_cgroupResolver,
|
||||
_writer);
|
||||
|
||||
return ValueTask.CompletedTask;
|
||||
}
|
||||
|
||||
public async ValueTask DisposeAsync()
|
||||
{
|
||||
await _collector.DisposeAsync();
|
||||
|
||||
if (Directory.Exists(_outputDir))
|
||||
{
|
||||
Directory.Delete(_outputDir, recursive: true);
|
||||
}
|
||||
|
||||
if (Directory.Exists(_procDir))
|
||||
{
|
||||
Directory.Delete(_procDir, recursive: true);
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task StartCollectionAsync_ReturnsValidHandle()
|
||||
{
|
||||
// Arrange
|
||||
var containerId = "test-container-123";
|
||||
var options = new RuntimeSignalOptions();
|
||||
var probeHandle = new EbpfProbeHandle
|
||||
{
|
||||
ProbeId = Guid.NewGuid(),
|
||||
ContainerId = containerId,
|
||||
TracedPids = [],
|
||||
};
|
||||
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.LoadAndAttachAsync(containerId, options, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(probeHandle);
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.ReadEventsAsync(probeHandle, It.IsAny<CancellationToken>()))
|
||||
.Returns(EmptyAsyncEnumerable());
|
||||
|
||||
// Act
|
||||
var handle = await _collector.StartCollectionAsync(containerId, options);
|
||||
|
||||
// Assert
|
||||
handle.Should().NotBeNull();
|
||||
handle.SessionId.Should().NotBeEmpty();
|
||||
handle.ContainerId.Should().Be(containerId);
|
||||
handle.StartedAt.Should().BeCloseTo(DateTimeOffset.UtcNow, TimeSpan.FromSeconds(5));
|
||||
handle.Options.Should().BeSameAs(options);
|
||||
|
||||
// Cleanup
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.DetachAsync(probeHandle, It.IsAny<CancellationToken>()))
|
||||
.Returns(Task.CompletedTask);
|
||||
await _collector.StopCollectionAsync(handle);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task StopCollectionAsync_ReturnsSummary()
|
||||
{
|
||||
// Arrange
|
||||
var containerId = "test-container-456";
|
||||
var options = new RuntimeSignalOptions();
|
||||
var probeHandle = new EbpfProbeHandle
|
||||
{
|
||||
ProbeId = Guid.NewGuid(),
|
||||
ContainerId = containerId,
|
||||
TracedPids = [],
|
||||
};
|
||||
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.LoadAndAttachAsync(containerId, options, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(probeHandle);
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.ReadEventsAsync(probeHandle, It.IsAny<CancellationToken>()))
|
||||
.Returns(EmptyAsyncEnumerable());
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.DetachAsync(probeHandle, It.IsAny<CancellationToken>()))
|
||||
.Returns(Task.CompletedTask);
|
||||
|
||||
var handle = await _collector.StartCollectionAsync(containerId, options);
|
||||
|
||||
// Act
|
||||
var summary = await _collector.StopCollectionAsync(handle);
|
||||
|
||||
// Assert
|
||||
summary.Should().NotBeNull();
|
||||
summary.SessionId.Should().Be(handle.SessionId);
|
||||
summary.ContainerId.Should().Be(containerId);
|
||||
summary.StartedAt.Should().Be(handle.StartedAt);
|
||||
summary.StoppedAt.Should().BeAfter(summary.StartedAt);
|
||||
summary.Duration.Should().BePositive();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task StopCollectionAsync_CalledTwice_ThrowsInvalidOperation()
|
||||
{
|
||||
// Arrange
|
||||
var containerId = "test-container-789";
|
||||
var options = new RuntimeSignalOptions();
|
||||
var probeHandle = new EbpfProbeHandle
|
||||
{
|
||||
ProbeId = Guid.NewGuid(),
|
||||
ContainerId = containerId,
|
||||
TracedPids = [],
|
||||
};
|
||||
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.LoadAndAttachAsync(containerId, options, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(probeHandle);
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.ReadEventsAsync(probeHandle, It.IsAny<CancellationToken>()))
|
||||
.Returns(EmptyAsyncEnumerable());
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.DetachAsync(probeHandle, It.IsAny<CancellationToken>()))
|
||||
.Returns(Task.CompletedTask);
|
||||
|
||||
var handle = await _collector.StartCollectionAsync(containerId, options);
|
||||
await _collector.StopCollectionAsync(handle);
|
||||
|
||||
// Act & Assert
|
||||
var act = () => _collector.StopCollectionAsync(handle);
|
||||
await act.Should().ThrowAsync<InvalidOperationException>()
|
||||
.WithMessage("*not found*");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetStatsAsync_ReturnsCurrentStats()
|
||||
{
|
||||
// Arrange
|
||||
var containerId = "test-container-stats";
|
||||
var options = new RuntimeSignalOptions();
|
||||
var probeHandle = new EbpfProbeHandle
|
||||
{
|
||||
ProbeId = Guid.NewGuid(),
|
||||
ContainerId = containerId,
|
||||
TracedPids = [],
|
||||
};
|
||||
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.LoadAndAttachAsync(containerId, options, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(probeHandle);
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.ReadEventsAsync(probeHandle, It.IsAny<CancellationToken>()))
|
||||
.Returns(EmptyAsyncEnumerable());
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.GetBufferUtilization(probeHandle))
|
||||
.Returns(0.25);
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.GetCpuOverhead(probeHandle))
|
||||
.Returns(0.01);
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.GetMemoryUsage(probeHandle))
|
||||
.Returns(1024 * 1024);
|
||||
|
||||
var handle = await _collector.StartCollectionAsync(containerId, options);
|
||||
|
||||
// Act
|
||||
var stats = await _collector.GetStatsAsync(handle);
|
||||
|
||||
// Assert
|
||||
stats.Should().NotBeNull();
|
||||
stats.BufferUtilization.Should().Be(0.25);
|
||||
stats.CpuOverhead.Should().Be(0.01);
|
||||
stats.MemoryUsage.Should().Be(1024 * 1024);
|
||||
|
||||
// Cleanup
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.DetachAsync(probeHandle, It.IsAny<CancellationToken>()))
|
||||
.Returns(Task.CompletedTask);
|
||||
await _collector.StopCollectionAsync(handle);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetStatsAsync_InvalidSession_ThrowsInvalidOperation()
|
||||
{
|
||||
// Arrange
|
||||
var fakeHandle = new EvidenceCollectionHandle
|
||||
{
|
||||
SessionId = Guid.NewGuid(),
|
||||
ContainerId = "fake",
|
||||
StartedAt = DateTimeOffset.UtcNow,
|
||||
Options = new RuntimeSignalOptions(),
|
||||
};
|
||||
|
||||
// Act & Assert
|
||||
var act = () => _collector.GetStatsAsync(fakeHandle);
|
||||
await act.Should().ThrowAsync<InvalidOperationException>()
|
||||
.WithMessage("*not found*");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task DisposeAsync_StopsAllSessions()
|
||||
{
|
||||
// Arrange
|
||||
var containerId = "test-container-dispose";
|
||||
var options = new RuntimeSignalOptions();
|
||||
var probeHandle = new EbpfProbeHandle
|
||||
{
|
||||
ProbeId = Guid.NewGuid(),
|
||||
ContainerId = containerId,
|
||||
TracedPids = [],
|
||||
};
|
||||
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.LoadAndAttachAsync(containerId, options, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(probeHandle);
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.ReadEventsAsync(probeHandle, It.IsAny<CancellationToken>()))
|
||||
.Returns(EmptyAsyncEnumerable());
|
||||
_mockProbeLoader
|
||||
.Setup(x => x.DetachAsync(probeHandle, It.IsAny<CancellationToken>()))
|
||||
.Returns(Task.CompletedTask);
|
||||
|
||||
var handle = await _collector.StartCollectionAsync(containerId, options);
|
||||
|
||||
// Act
|
||||
await _collector.DisposeAsync();
|
||||
|
||||
// Assert - verify detach was called
|
||||
_mockProbeLoader.Verify(
|
||||
x => x.DetachAsync(probeHandle, It.IsAny<CancellationToken>()),
|
||||
Times.Once);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task StartCollectionAsync_AfterDispose_ThrowsObjectDisposed()
|
||||
{
|
||||
// Arrange
|
||||
await _collector.DisposeAsync();
|
||||
|
||||
// Act & Assert
|
||||
var act = () => _collector.StartCollectionAsync("container", new RuntimeSignalOptions());
|
||||
await act.Should().ThrowAsync<ObjectDisposedException>();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task StreamEvidenceAsync_InvalidSession_YieldsNothing()
|
||||
{
|
||||
// Arrange
|
||||
var fakeHandle = new EvidenceCollectionHandle
|
||||
{
|
||||
SessionId = Guid.NewGuid(),
|
||||
ContainerId = "fake",
|
||||
StartedAt = DateTimeOffset.UtcNow,
|
||||
Options = new RuntimeSignalOptions(),
|
||||
};
|
||||
|
||||
// Act
|
||||
var records = new List<RuntimeEvidenceRecord>();
|
||||
await foreach (var record in _collector.StreamEvidenceAsync(fakeHandle))
|
||||
{
|
||||
records.Add(record);
|
||||
}
|
||||
|
||||
// Assert
|
||||
records.Should().BeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EvidenceCollectionHandle_HasCorrectProperties()
|
||||
{
|
||||
// Arrange & Act
|
||||
var handle = new EvidenceCollectionHandle
|
||||
{
|
||||
SessionId = Guid.NewGuid(),
|
||||
ContainerId = "test-container",
|
||||
StartedAt = DateTimeOffset.UtcNow,
|
||||
Options = new RuntimeSignalOptions { MaxEventsPerSecond = 5000 },
|
||||
};
|
||||
|
||||
// Assert
|
||||
handle.SessionId.Should().NotBeEmpty();
|
||||
handle.ContainerId.Should().Be("test-container");
|
||||
handle.Options.MaxEventsPerSecond.Should().Be(5000);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EvidenceCollectionSummary_DurationCalculation()
|
||||
{
|
||||
// Arrange
|
||||
var start = DateTimeOffset.UtcNow.AddMinutes(-5);
|
||||
var stop = DateTimeOffset.UtcNow;
|
||||
|
||||
// Act
|
||||
var summary = new EvidenceCollectionSummary
|
||||
{
|
||||
SessionId = Guid.NewGuid(),
|
||||
ContainerId = "test",
|
||||
StartedAt = start,
|
||||
StoppedAt = stop,
|
||||
TotalEvents = 1000,
|
||||
ProcessedEvents = 990,
|
||||
DroppedEvents = 10,
|
||||
ChunksWritten = 5,
|
||||
};
|
||||
|
||||
// Assert
|
||||
summary.Duration.Should().BeCloseTo(TimeSpan.FromMinutes(5), TimeSpan.FromSeconds(1));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EvidenceCollectionStats_HasAllMetrics()
|
||||
{
|
||||
// Arrange & Act
|
||||
var stats = new EvidenceCollectionStats
|
||||
{
|
||||
TotalEvents = 10000,
|
||||
ProcessedEvents = 9900,
|
||||
DroppedEvents = 100,
|
||||
EventsPerSecond = 1000.0,
|
||||
BufferUtilization = 0.5,
|
||||
CpuOverhead = 0.02,
|
||||
MemoryUsage = 10 * 1024 * 1024,
|
||||
};
|
||||
|
||||
// Assert
|
||||
stats.TotalEvents.Should().Be(10000);
|
||||
stats.ProcessedEvents.Should().Be(9900);
|
||||
stats.DroppedEvents.Should().Be(100);
|
||||
stats.EventsPerSecond.Should().Be(1000.0);
|
||||
stats.BufferUtilization.Should().Be(0.5);
|
||||
stats.CpuOverhead.Should().Be(0.02);
|
||||
stats.MemoryUsage.Should().Be(10 * 1024 * 1024);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EvidenceChunkCompletedEventArgs_HasAllFields()
|
||||
{
|
||||
// Arrange & Act
|
||||
var args = new EvidenceChunkCompletedEventArgs
|
||||
{
|
||||
SessionId = Guid.NewGuid(),
|
||||
ContainerId = "container-123",
|
||||
ChunkPath = "/tmp/evidence-chunk-001.ndjson",
|
||||
EventCount = 5000,
|
||||
Size = 1024 * 1024,
|
||||
ContentHash = "sha256:abc123",
|
||||
PreviousHash = "sha256:xyz789",
|
||||
};
|
||||
|
||||
// Assert
|
||||
args.SessionId.Should().NotBeEmpty();
|
||||
args.ContainerId.Should().Be("container-123");
|
||||
args.ChunkPath.Should().EndWith(".ndjson");
|
||||
args.EventCount.Should().Be(5000);
|
||||
args.Size.Should().Be(1024 * 1024);
|
||||
args.ContentHash.Should().StartWith("sha256:");
|
||||
args.PreviousHash.Should().StartWith("sha256:");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RuntimeEvidenceCollectorOptions_HasDefaults()
|
||||
{
|
||||
// Arrange & Act
|
||||
var options = new RuntimeEvidenceCollectorOptions();
|
||||
|
||||
// Assert
|
||||
options.EventChannelCapacity.Should().Be(10000);
|
||||
}
|
||||
|
||||
private static async IAsyncEnumerable<ReadOnlyMemory<byte>> EmptyAsyncEnumerable(
|
||||
[EnumeratorCancellation] CancellationToken ct = default)
|
||||
{
|
||||
await Task.Yield();
|
||||
yield break;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,595 @@
|
||||
// <copyright file="EvidenceChunkFinalizerTests.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Tests.Signing;
|
||||
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using StellaOps.Signals.Ebpf.Output;
|
||||
using StellaOps.Signals.Ebpf.Signing;
|
||||
using Xunit;
|
||||
|
||||
public sealed class EvidenceChunkFinalizerTests : IAsyncLifetime
|
||||
{
|
||||
private readonly string _testDir;
|
||||
|
||||
public EvidenceChunkFinalizerTests()
|
||||
{
|
||||
_testDir = Path.Combine(Path.GetTempPath(), $"evidence-chunk-test-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_testDir);
|
||||
}
|
||||
|
||||
public ValueTask InitializeAsync() => ValueTask.CompletedTask;
|
||||
|
||||
public ValueTask DisposeAsync()
|
||||
{
|
||||
if (Directory.Exists(_testDir))
|
||||
{
|
||||
Directory.Delete(_testDir, recursive: true);
|
||||
}
|
||||
|
||||
return ValueTask.CompletedTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task FinalizeChunk_SignsChunkAndReturnsPredicate()
|
||||
{
|
||||
// Arrange
|
||||
var signer = new LocalEvidenceChunkSigner(
|
||||
NullLogger<LocalEvidenceChunkSigner>.Instance);
|
||||
var options = new EvidenceChunkFinalizerOptions
|
||||
{
|
||||
SigningKeyId = "test-key",
|
||||
CollectorVersion = "1.0.0-test",
|
||||
ChainStateDirectory = _testDir,
|
||||
};
|
||||
await using var finalizer = new EvidenceChunkFinalizer(
|
||||
NullLogger<EvidenceChunkFinalizer>.Instance,
|
||||
signer,
|
||||
options);
|
||||
|
||||
var args = CreateChunkRotatedArgs(1, 100, "sha256:abc123");
|
||||
|
||||
// Act
|
||||
var result = await finalizer.FinalizeChunkAsync(args, CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result.Predicate.ChunkId.Should().Be("sha256:abc123");
|
||||
result.Predicate.ChunkSequence.Should().Be(1);
|
||||
result.Predicate.EventCount.Should().Be(100);
|
||||
result.Predicate.PreviousChunkId.Should().BeNull(); // First chunk
|
||||
result.DsseEnvelopeBase64.Should().NotBeNullOrEmpty();
|
||||
result.KeyId.Should().Be("test-key");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task FinalizeChunk_LinksChainWithPreviousHash()
|
||||
{
|
||||
// Arrange
|
||||
var signer = new LocalEvidenceChunkSigner(
|
||||
NullLogger<LocalEvidenceChunkSigner>.Instance);
|
||||
var options = new EvidenceChunkFinalizerOptions
|
||||
{
|
||||
SigningKeyId = "test-key",
|
||||
CollectorVersion = "1.0.0-test",
|
||||
ChainStateDirectory = _testDir,
|
||||
};
|
||||
await using var finalizer = new EvidenceChunkFinalizer(
|
||||
NullLogger<EvidenceChunkFinalizer>.Instance,
|
||||
signer,
|
||||
options);
|
||||
|
||||
// First chunk
|
||||
var args1 = CreateChunkRotatedArgs(1, 100, "sha256:first");
|
||||
var result1 = await finalizer.FinalizeChunkAsync(args1, CancellationToken.None);
|
||||
|
||||
// Second chunk
|
||||
var args2 = CreateChunkRotatedArgs(2, 200, "sha256:second");
|
||||
args2 = args2 with { PreviousChunkHash = "sha256:first" };
|
||||
var result2 = await finalizer.FinalizeChunkAsync(args2, CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
result1.Predicate.PreviousChunkId.Should().BeNull();
|
||||
result2.Predicate.PreviousChunkId.Should().Be("sha256:first");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task FinalizeChunk_EmitsChunkFinalizedEvent()
|
||||
{
|
||||
// Arrange
|
||||
var signer = new LocalEvidenceChunkSigner(
|
||||
NullLogger<LocalEvidenceChunkSigner>.Instance);
|
||||
await using var finalizer = new EvidenceChunkFinalizer(
|
||||
NullLogger<EvidenceChunkFinalizer>.Instance,
|
||||
signer);
|
||||
|
||||
ChunkFinalizedEventArgs? receivedArgs = null;
|
||||
finalizer.ChunkFinalized += (args, ct) =>
|
||||
{
|
||||
receivedArgs = args;
|
||||
return Task.CompletedTask;
|
||||
};
|
||||
|
||||
var args = CreateChunkRotatedArgs(1, 100, "sha256:abc123");
|
||||
|
||||
// Act
|
||||
await finalizer.FinalizeChunkAsync(args, CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
receivedArgs.Should().NotBeNull();
|
||||
receivedArgs!.Result.Predicate.ChunkId.Should().Be("sha256:abc123");
|
||||
receivedArgs.ChainTotalChunks.Should().Be(1);
|
||||
receivedArgs.ChainTotalEvents.Should().Be(100);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task FinalizeChunk_SavesAndLoadsChainState()
|
||||
{
|
||||
// Arrange
|
||||
var signer = new LocalEvidenceChunkSigner(
|
||||
NullLogger<LocalEvidenceChunkSigner>.Instance);
|
||||
var options = new EvidenceChunkFinalizerOptions
|
||||
{
|
||||
SigningKeyId = "test-key",
|
||||
CollectorVersion = "1.0.0-test",
|
||||
ChainStateDirectory = _testDir,
|
||||
};
|
||||
|
||||
// First finalizer - create and finalize chunks
|
||||
await using (var finalizer1 = new EvidenceChunkFinalizer(
|
||||
NullLogger<EvidenceChunkFinalizer>.Instance,
|
||||
signer,
|
||||
options))
|
||||
{
|
||||
var args1 = CreateChunkRotatedArgs(1, 100, "sha256:first");
|
||||
await finalizer1.FinalizeChunkAsync(args1, CancellationToken.None);
|
||||
|
||||
var args2 = CreateChunkRotatedArgs(2, 200, "sha256:second");
|
||||
await finalizer1.FinalizeChunkAsync(args2, CancellationToken.None);
|
||||
}
|
||||
|
||||
// Second finalizer - load state
|
||||
await using var finalizer2 = new EvidenceChunkFinalizer(
|
||||
NullLogger<EvidenceChunkFinalizer>.Instance,
|
||||
signer,
|
||||
options);
|
||||
|
||||
var chainKey = Path.GetDirectoryName(Path.Combine(_testDir, "evidence"));
|
||||
await finalizer2.LoadChainStateAsync(chainKey!, CancellationToken.None);
|
||||
|
||||
// Third chunk should link to second
|
||||
var args3 = CreateChunkRotatedArgs(3, 300, "sha256:third");
|
||||
var result3 = await finalizer2.FinalizeChunkAsync(args3, CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
result3.Predicate.PreviousChunkId.Should().Be("sha256:second");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task VerifyChain_ValidChain_ReturnsTrue()
|
||||
{
|
||||
// Arrange
|
||||
var signer = new LocalEvidenceChunkSigner(
|
||||
NullLogger<LocalEvidenceChunkSigner>.Instance);
|
||||
await using var finalizer = new EvidenceChunkFinalizer(
|
||||
NullLogger<EvidenceChunkFinalizer>.Instance,
|
||||
signer);
|
||||
|
||||
var results = new List<EvidenceChunkSignResult>();
|
||||
|
||||
// Create chain of chunks
|
||||
for (int i = 1; i <= 3; i++)
|
||||
{
|
||||
var args = CreateChunkRotatedArgs(i, 100 * i, $"sha256:chunk{i}");
|
||||
var result = await finalizer.FinalizeChunkAsync(args, CancellationToken.None);
|
||||
results.Add(result);
|
||||
}
|
||||
|
||||
// Act
|
||||
var verification = await finalizer.VerifyChainAsync(results, CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
verification.IsValid.Should().BeTrue();
|
||||
verification.VerifiedChunks.Should().Be(3);
|
||||
verification.Errors.Should().BeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task VerifyChain_BrokenChain_ReturnsErrors()
|
||||
{
|
||||
// Arrange
|
||||
var signer = new LocalEvidenceChunkSigner(
|
||||
NullLogger<LocalEvidenceChunkSigner>.Instance);
|
||||
await using var finalizer = new EvidenceChunkFinalizer(
|
||||
NullLogger<EvidenceChunkFinalizer>.Instance,
|
||||
signer);
|
||||
|
||||
// Create first chunk
|
||||
var args1 = CreateChunkRotatedArgs(1, 100, "sha256:chunk1");
|
||||
var result1 = await finalizer.FinalizeChunkAsync(args1, CancellationToken.None);
|
||||
|
||||
// Create second chunk with wrong previous hash
|
||||
var args2 = CreateChunkRotatedArgs(2, 200, "sha256:chunk2");
|
||||
var result2 = await finalizer.FinalizeChunkAsync(args2, CancellationToken.None);
|
||||
|
||||
// Tamper with chain
|
||||
var tamperedResult2 = result2 with
|
||||
{
|
||||
Predicate = result2.Predicate with { PreviousChunkId = "sha256:wrong" },
|
||||
};
|
||||
|
||||
// Act
|
||||
var verification = await finalizer.VerifyChainAsync(
|
||||
new[] { result1, tamperedResult2 },
|
||||
CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
verification.IsValid.Should().BeFalse();
|
||||
verification.Errors.Should().ContainSingle(e => e.ErrorType == "chain_broken");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task VerifyChain_EmptyChain_ReturnsValid()
|
||||
{
|
||||
// Arrange
|
||||
var signer = new LocalEvidenceChunkSigner(
|
||||
NullLogger<LocalEvidenceChunkSigner>.Instance);
|
||||
await using var finalizer = new EvidenceChunkFinalizer(
|
||||
NullLogger<EvidenceChunkFinalizer>.Instance,
|
||||
signer);
|
||||
|
||||
// Act
|
||||
var verification = await finalizer.VerifyChainAsync(
|
||||
Array.Empty<EvidenceChunkSignResult>(),
|
||||
CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
verification.IsValid.Should().BeTrue();
|
||||
verification.VerifiedChunks.Should().Be(0);
|
||||
}
|
||||
|
||||
private ChunkRotatedEventArgs CreateChunkRotatedArgs(
|
||||
int sequence,
|
||||
long eventCount,
|
||||
string contentHash)
|
||||
{
|
||||
// Create timestamps in ascending order: chunk 1 starts at base, chunk 2 at base+1hr, etc.
|
||||
var baseTime = DateTimeOffset.UtcNow.AddHours(-10);
|
||||
var startTime = baseTime.AddMinutes((sequence - 1) * 10);
|
||||
return new ChunkRotatedEventArgs
|
||||
{
|
||||
Statistics = new ChunkStatistics
|
||||
{
|
||||
FilePath = Path.Combine(_testDir, $"evidence-{sequence:D6}.ndjson"),
|
||||
Size = eventCount * 100,
|
||||
EventCount = eventCount,
|
||||
StartTime = startTime,
|
||||
Duration = TimeSpan.FromMinutes(5),
|
||||
ContentHash = contentHash,
|
||||
ChunkSequence = sequence,
|
||||
},
|
||||
PreviousChunkHash = sequence > 1 ? $"sha256:chunk{sequence - 1}" : null,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class LocalEvidenceChunkSignerTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task SignAsync_CreatesDsseEnvelope()
|
||||
{
|
||||
// Arrange
|
||||
var signer = new LocalEvidenceChunkSigner(
|
||||
NullLogger<LocalEvidenceChunkSigner>.Instance);
|
||||
|
||||
var request = new EvidenceChunkSignRequest
|
||||
{
|
||||
Statistics = new ChunkStatistics
|
||||
{
|
||||
FilePath = "/tmp/evidence.ndjson",
|
||||
Size = 10000,
|
||||
EventCount = 100,
|
||||
StartTime = DateTimeOffset.Parse("2026-01-27T10:00:00Z"),
|
||||
Duration = TimeSpan.FromMinutes(5),
|
||||
ContentHash = "sha256:abc123def456",
|
||||
ChunkSequence = 1,
|
||||
},
|
||||
KeyId = "test-key",
|
||||
CollectorVersion = "1.0.0",
|
||||
KernelVersion = "5.15.0",
|
||||
};
|
||||
|
||||
// Act
|
||||
var result = await signer.SignAsync(request, CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result.Predicate.ChunkId.Should().Be("sha256:abc123def456");
|
||||
result.Predicate.CollectorVersion.Should().Be("1.0.0");
|
||||
result.Predicate.KernelVersion.Should().Be("5.15.0");
|
||||
result.DsseEnvelopeBase64.Should().NotBeNullOrEmpty();
|
||||
|
||||
// Decode and verify envelope structure
|
||||
var envelopeJson = Encoding.UTF8.GetString(Convert.FromBase64String(result.DsseEnvelopeBase64));
|
||||
var envelope = JsonDocument.Parse(envelopeJson);
|
||||
envelope.RootElement.GetProperty("payloadType").GetString()
|
||||
.Should().Be("application/vnd.in-toto+json");
|
||||
envelope.RootElement.GetProperty("signatures").GetArrayLength()
|
||||
.Should().Be(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task VerifyAsync_ValidSignature_ReturnsTrue()
|
||||
{
|
||||
// Arrange
|
||||
var signer = new LocalEvidenceChunkSigner(
|
||||
NullLogger<LocalEvidenceChunkSigner>.Instance);
|
||||
|
||||
var request = new EvidenceChunkSignRequest
|
||||
{
|
||||
Statistics = new ChunkStatistics
|
||||
{
|
||||
FilePath = "/tmp/evidence.ndjson",
|
||||
Size = 10000,
|
||||
EventCount = 100,
|
||||
StartTime = DateTimeOffset.UtcNow,
|
||||
Duration = TimeSpan.FromMinutes(5),
|
||||
ContentHash = "sha256:abc123",
|
||||
ChunkSequence = 1,
|
||||
},
|
||||
KeyId = "test-key",
|
||||
CollectorVersion = "1.0.0",
|
||||
};
|
||||
|
||||
var result = await signer.SignAsync(request, CancellationToken.None);
|
||||
|
||||
// Act
|
||||
var isValid = await signer.VerifyAsync(result, CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
isValid.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task VerifyAsync_TamperedSignature_ReturnsFalse()
|
||||
{
|
||||
// Arrange
|
||||
var signer = new LocalEvidenceChunkSigner(
|
||||
NullLogger<LocalEvidenceChunkSigner>.Instance);
|
||||
|
||||
var request = new EvidenceChunkSignRequest
|
||||
{
|
||||
Statistics = new ChunkStatistics
|
||||
{
|
||||
FilePath = "/tmp/evidence.ndjson",
|
||||
Size = 10000,
|
||||
EventCount = 100,
|
||||
StartTime = DateTimeOffset.UtcNow,
|
||||
Duration = TimeSpan.FromMinutes(5),
|
||||
ContentHash = "sha256:abc123",
|
||||
ChunkSequence = 1,
|
||||
},
|
||||
KeyId = "test-key",
|
||||
CollectorVersion = "1.0.0",
|
||||
};
|
||||
|
||||
var result = await signer.SignAsync(request, CancellationToken.None);
|
||||
|
||||
// Tamper with envelope
|
||||
var tamperedResult = result with
|
||||
{
|
||||
DsseEnvelopeBase64 = Convert.ToBase64String(
|
||||
Encoding.UTF8.GetBytes("{\"payloadType\":\"tampered\",\"payload\":\"\",\"signatures\":[]}")),
|
||||
};
|
||||
|
||||
// Act
|
||||
var isValid = await signer.VerifyAsync(tamperedResult, CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
isValid.Should().BeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task SignAsync_WithCompression_SetsCompressionField()
|
||||
{
|
||||
// Arrange
|
||||
var signer = new LocalEvidenceChunkSigner(
|
||||
NullLogger<LocalEvidenceChunkSigner>.Instance);
|
||||
|
||||
var request = new EvidenceChunkSignRequest
|
||||
{
|
||||
Statistics = new ChunkStatistics
|
||||
{
|
||||
FilePath = "/tmp/evidence.ndjson.gz",
|
||||
Size = 5000,
|
||||
EventCount = 100,
|
||||
StartTime = DateTimeOffset.UtcNow,
|
||||
Duration = TimeSpan.FromMinutes(5),
|
||||
ContentHash = "sha256:abc123",
|
||||
ChunkSequence = 1,
|
||||
},
|
||||
KeyId = "test-key",
|
||||
CollectorVersion = "1.0.0",
|
||||
};
|
||||
|
||||
// Act
|
||||
var result = await signer.SignAsync(request, CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
result.Predicate.Compression.Should().Be("gzip");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task SignAsync_WithPreviousChunkHash_SetsChainLink()
|
||||
{
|
||||
// Arrange
|
||||
var signer = new LocalEvidenceChunkSigner(
|
||||
NullLogger<LocalEvidenceChunkSigner>.Instance);
|
||||
|
||||
var request = new EvidenceChunkSignRequest
|
||||
{
|
||||
Statistics = new ChunkStatistics
|
||||
{
|
||||
FilePath = "/tmp/evidence.ndjson",
|
||||
Size = 10000,
|
||||
EventCount = 100,
|
||||
StartTime = DateTimeOffset.UtcNow,
|
||||
Duration = TimeSpan.FromMinutes(5),
|
||||
ContentHash = "sha256:current",
|
||||
ChunkSequence = 2,
|
||||
},
|
||||
PreviousChunkHash = "sha256:previous",
|
||||
KeyId = "test-key",
|
||||
CollectorVersion = "1.0.0",
|
||||
};
|
||||
|
||||
// Act
|
||||
var result = await signer.SignAsync(request, CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
result.Predicate.PreviousChunkId.Should().Be("sha256:previous");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task SignAsync_WithContainerIds_IncludesInPredicate()
|
||||
{
|
||||
// Arrange
|
||||
var signer = new LocalEvidenceChunkSigner(
|
||||
NullLogger<LocalEvidenceChunkSigner>.Instance);
|
||||
|
||||
var containerIds = new[] { "container-1", "container-2" };
|
||||
var request = new EvidenceChunkSignRequest
|
||||
{
|
||||
Statistics = new ChunkStatistics
|
||||
{
|
||||
FilePath = "/tmp/evidence.ndjson",
|
||||
Size = 10000,
|
||||
EventCount = 100,
|
||||
StartTime = DateTimeOffset.UtcNow,
|
||||
Duration = TimeSpan.FromMinutes(5),
|
||||
ContentHash = "sha256:abc123",
|
||||
ChunkSequence = 1,
|
||||
},
|
||||
KeyId = "test-key",
|
||||
CollectorVersion = "1.0.0",
|
||||
ContainerIds = containerIds,
|
||||
};
|
||||
|
||||
// Act
|
||||
var result = await signer.SignAsync(request, CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
result.Predicate.ContainerIds.Should().BeEquivalentTo(containerIds);
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class NullEvidenceChunkSignerTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task SignAsync_ReturnsUnsignedResult()
|
||||
{
|
||||
// Arrange
|
||||
var signer = NullEvidenceChunkSigner.Instance;
|
||||
var request = new EvidenceChunkSignRequest
|
||||
{
|
||||
Statistics = new ChunkStatistics
|
||||
{
|
||||
FilePath = "/tmp/evidence.ndjson",
|
||||
Size = 10000,
|
||||
EventCount = 100,
|
||||
StartTime = DateTimeOffset.UtcNow,
|
||||
Duration = TimeSpan.FromMinutes(5),
|
||||
ContentHash = "sha256:abc123",
|
||||
ChunkSequence = 1,
|
||||
},
|
||||
KeyId = "null-key",
|
||||
CollectorVersion = "1.0.0",
|
||||
};
|
||||
|
||||
// Act
|
||||
var result = await signer.SignAsync(request, CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
result.Should().NotBeNull();
|
||||
result.DsseEnvelopeBase64.Should().BeEmpty();
|
||||
result.RekorUuid.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task VerifyAsync_AlwaysReturnsTrue()
|
||||
{
|
||||
// Arrange
|
||||
var signer = NullEvidenceChunkSigner.Instance;
|
||||
var result = new EvidenceChunkSignResult
|
||||
{
|
||||
Statistics = new ChunkStatistics
|
||||
{
|
||||
FilePath = "/tmp/evidence.ndjson",
|
||||
Size = 10000,
|
||||
EventCount = 100,
|
||||
StartTime = DateTimeOffset.UtcNow,
|
||||
Duration = TimeSpan.FromMinutes(5),
|
||||
ContentHash = "sha256:abc123",
|
||||
ChunkSequence = 1,
|
||||
},
|
||||
Predicate = new RuntimeEvidencePredicate
|
||||
{
|
||||
ChunkId = "sha256:abc123",
|
||||
ChunkSequence = 1,
|
||||
EventCount = 100,
|
||||
TimeRange = new EvidenceTimeRange
|
||||
{
|
||||
Start = DateTimeOffset.UtcNow,
|
||||
End = DateTimeOffset.UtcNow.AddMinutes(5),
|
||||
},
|
||||
CollectorVersion = "1.0.0",
|
||||
},
|
||||
DsseEnvelopeBase64 = string.Empty,
|
||||
SignedAt = DateTimeOffset.UtcNow,
|
||||
KeyId = "null-key",
|
||||
};
|
||||
|
||||
// Act
|
||||
var isValid = await signer.VerifyAsync(result, CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
isValid.Should().BeTrue();
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class PredicateTypeTests
|
||||
{
|
||||
[Fact]
|
||||
public void RuntimeEvidenceType_IsRecognized()
|
||||
{
|
||||
// Assert
|
||||
StellaOps.Signer.Core.PredicateTypes.IsRuntimeEvidenceType("stella.ops/runtime-evidence@v1")
|
||||
.Should().BeTrue();
|
||||
|
||||
StellaOps.Signer.Core.PredicateTypes.IsRuntimeEvidenceType("https://stella.ops/predicates/runtime-evidence/v1")
|
||||
.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RuntimeEvidenceType_IsInAllowedList()
|
||||
{
|
||||
// Assert
|
||||
StellaOps.Signer.Core.PredicateTypes.IsAllowedPredicateType("stella.ops/runtime-evidence@v1")
|
||||
.Should().BeTrue();
|
||||
|
||||
StellaOps.Signer.Core.PredicateTypes.IsAllowedPredicateType("https://stella.ops/predicates/runtime-evidence/v1")
|
||||
.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RuntimeEvidenceType_IsReachabilityRelated()
|
||||
{
|
||||
// Assert
|
||||
StellaOps.Signer.Core.PredicateTypes.IsReachabilityRelatedType("stella.ops/runtime-evidence@v1")
|
||||
.Should().BeTrue();
|
||||
}
|
||||
}
|
||||
@@ -14,11 +14,20 @@
|
||||
<PrivateAssets>all</PrivateAssets>
|
||||
</PackageReference>
|
||||
<PackageReference Include="FluentAssertions" />
|
||||
<PackageReference Include="Microsoft.Extensions.Caching.Memory" />
|
||||
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" />
|
||||
<PackageReference Include="Microsoft.NET.Test.Sdk" />
|
||||
<PackageReference Include="Moq" />
|
||||
<PackageReference Include="xunit.v3" />
|
||||
<PackageReference Include="xunit.runner.visualstudio">
|
||||
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
|
||||
<PrivateAssets>all</PrivateAssets>
|
||||
</PackageReference>
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\..\__Libraries\StellaOps.Signals.Ebpf\StellaOps.Signals.Ebpf.csproj" />
|
||||
<ProjectReference Include="..\..\..\Scanner\__Libraries\StellaOps.Scanner.Reachability\StellaOps.Scanner.Reachability.csproj" />
|
||||
<ProjectReference Include="..\..\..\Signer\StellaOps.Signer\StellaOps.Signer.Core\StellaOps.Signer.Core.csproj" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
@@ -0,0 +1,612 @@
|
||||
// <copyright file="EnhancedSymbolResolverTests.cs" company="StellaOps">
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Signals.Ebpf.Tests.Symbols;
|
||||
|
||||
using System.Buffers.Binary;
|
||||
using System.Text;
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Caching.Memory;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using StellaOps.Signals.Ebpf.Symbols;
|
||||
using Xunit;
|
||||
|
||||
public class EnhancedSymbolResolverTests : IDisposable
|
||||
{
|
||||
private readonly string _testProcRoot;
|
||||
private readonly string _testLibPath;
|
||||
private readonly IMemoryCache _memoryCache;
|
||||
private readonly EnhancedSymbolResolver _resolver;
|
||||
|
||||
public EnhancedSymbolResolverTests()
|
||||
{
|
||||
_testProcRoot = Path.Combine(Path.GetTempPath(), $"proc_test_{Guid.NewGuid():N}");
|
||||
_testLibPath = Path.Combine(Path.GetTempPath(), $"lib_test_{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_testProcRoot);
|
||||
Directory.CreateDirectory(_testLibPath);
|
||||
|
||||
_memoryCache = new MemoryCache(new MemoryCacheOptions { SizeLimit = 10000 });
|
||||
_resolver = new EnhancedSymbolResolver(
|
||||
NullLogger<EnhancedSymbolResolver>.Instance,
|
||||
_memoryCache,
|
||||
_testProcRoot);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
_resolver.Dispose();
|
||||
_memoryCache.Dispose();
|
||||
|
||||
if (Directory.Exists(_testProcRoot))
|
||||
{
|
||||
Directory.Delete(_testProcRoot, recursive: true);
|
||||
}
|
||||
|
||||
if (Directory.Exists(_testLibPath))
|
||||
{
|
||||
Directory.Delete(_testLibPath, recursive: true);
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resolve_ProcessNotFound_ReturnsNull()
|
||||
{
|
||||
// Arrange - no maps file created
|
||||
var pid = 99999;
|
||||
var address = 0x7f1234567890UL;
|
||||
|
||||
// Act
|
||||
var (symbol, library, purl) = _resolver.Resolve(pid, address);
|
||||
|
||||
// Assert
|
||||
symbol.Should().BeNull();
|
||||
library.Should().BeNull();
|
||||
purl.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resolve_AddressInMappedRegion_ReturnsLibraryPath()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12345;
|
||||
var libPath = Path.Combine(_testLibPath, "libtest.so");
|
||||
|
||||
// Create a simple non-ELF file (symbol resolution will fail but library should be found)
|
||||
File.WriteAllBytes(libPath, new byte[100]);
|
||||
|
||||
SetupMapsFile(pid, $@"
|
||||
7f1234560000-7f1234570000 r-xp 00000000 08:01 12345 {libPath}
|
||||
7f1234570000-7f1234580000 rw-p 00010000 08:01 12345 {libPath}
|
||||
");
|
||||
|
||||
// Act
|
||||
var (symbol, library, _) = _resolver.Resolve(pid, 0x7f1234565000UL);
|
||||
|
||||
// Assert
|
||||
library.Should().Be(libPath);
|
||||
symbol.Should().StartWith("addr:0x"); // Symbol resolution fails, falls back to address
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resolve_AddressInAnonymousMapping_ReturnsSpecialRegion()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12346;
|
||||
SetupMapsFile(pid, @"
|
||||
7ffc12340000-7ffc12360000 rw-p 00000000 00:00 0 [stack]
|
||||
7ffc12360000-7ffc12380000 rw-p 00000000 00:00 0 [heap]
|
||||
");
|
||||
|
||||
// Act
|
||||
var (symbol, library, _) = _resolver.Resolve(pid, 0x7ffc12350000UL);
|
||||
|
||||
// Assert
|
||||
library.Should().Be("[stack]");
|
||||
symbol.Should().StartWith("addr:0x");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resolve_AddressNotInAnyMapping_ReturnsAddressOnly()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12347;
|
||||
SetupMapsFile(pid, @"
|
||||
7f1234560000-7f1234570000 r-xp 00000000 08:01 12345 /lib/libc.so.6
|
||||
");
|
||||
|
||||
// Address outside all mappings
|
||||
var address = 0x7f9999999999UL;
|
||||
|
||||
// Act
|
||||
var (symbol, library, _) = _resolver.Resolve(pid, address);
|
||||
|
||||
// Assert
|
||||
symbol.Should().StartWith("addr:0x");
|
||||
library.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resolve_WithElfSymbols_ReturnsSymbolName()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12348;
|
||||
var libPath = Path.Combine(_testLibPath, "libsymbols.so");
|
||||
|
||||
// Create a minimal ELF64 file with symbols
|
||||
CreateMinimalElf64WithSymbols(libPath, new[]
|
||||
{
|
||||
("my_function", 0x1000UL, 0x100UL),
|
||||
("another_func", 0x1100UL, 0x80UL),
|
||||
("global_var", 0x2000UL, 0x8UL),
|
||||
});
|
||||
|
||||
// Map starts at 0x7f1234560000, file offset 0
|
||||
// So address 0x7f1234561000 maps to file offset 0x1000 (my_function)
|
||||
SetupMapsFile(pid, $@"
|
||||
7f1234560000-7f1234570000 r-xp 00000000 08:01 12345 {libPath}
|
||||
");
|
||||
|
||||
// Act
|
||||
var (symbol, library, _) = _resolver.Resolve(pid, 0x7f1234561050UL);
|
||||
|
||||
// Assert
|
||||
symbol.Should().Be("my_function");
|
||||
library.Should().Be(libPath);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resolve_SymbolWithOffset_ReturnsSymbolPlusOffset()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12349;
|
||||
var libPath = Path.Combine(_testLibPath, "liboffset.so");
|
||||
|
||||
CreateMinimalElf64WithSymbols(libPath, new[]
|
||||
{
|
||||
("base_function", 0x1000UL, 0x100UL),
|
||||
});
|
||||
|
||||
SetupMapsFile(pid, $@"
|
||||
7f1234560000-7f1234570000 r-xp 00000000 08:01 12345 {libPath}
|
||||
");
|
||||
|
||||
// Address past the symbol but within 64KB
|
||||
var (symbol, _, _) = _resolver.Resolve(pid, 0x7f1234561200UL);
|
||||
|
||||
// Assert - should return symbol+offset
|
||||
symbol.Should().Contain("base_function+0x");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resolve_CachesResult()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12350;
|
||||
var libPath = Path.Combine(_testLibPath, "libcache.so");
|
||||
File.WriteAllBytes(libPath, new byte[100]);
|
||||
|
||||
SetupMapsFile(pid, $@"
|
||||
7f1234560000-7f1234570000 r-xp 00000000 08:01 12345 {libPath}
|
||||
");
|
||||
|
||||
// Act - resolve same address twice
|
||||
var result1 = _resolver.Resolve(pid, 0x7f1234565000UL);
|
||||
var result2 = _resolver.Resolve(pid, 0x7f1234565000UL);
|
||||
|
||||
// Assert - both should return same values (from cache on second call)
|
||||
result1.Symbol.Should().Be(result2.Symbol);
|
||||
result1.Library.Should().Be(result2.Library);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void InvalidateProcess_RemovesCachedMaps()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12351;
|
||||
var libPath1 = Path.Combine(_testLibPath, "libfirst.so");
|
||||
var libPath2 = Path.Combine(_testLibPath, "libsecond.so");
|
||||
File.WriteAllBytes(libPath1, new byte[100]);
|
||||
File.WriteAllBytes(libPath2, new byte[100]);
|
||||
|
||||
SetupMapsFile(pid, $@"
|
||||
7f1234560000-7f1234570000 r-xp 00000000 08:01 12345 {libPath1}
|
||||
");
|
||||
var (_, library1, _) = _resolver.Resolve(pid, 0x7f1234565000UL);
|
||||
|
||||
// Update maps file to point to different library
|
||||
SetupMapsFile(pid, $@"
|
||||
7f1234560000-7f1234570000 r-xp 00000000 08:01 12345 {libPath2}
|
||||
");
|
||||
|
||||
// Act - InvalidateProcess clears the maps cache, so a NEW address
|
||||
// will trigger re-reading the maps file. Existing symbol cache entries
|
||||
// remain valid until they expire.
|
||||
_resolver.InvalidateProcess(pid);
|
||||
|
||||
// Use a DIFFERENT address to force re-reading the maps file
|
||||
var (_, library2, _) = _resolver.Resolve(pid, 0x7f1234566000UL);
|
||||
|
||||
// Assert
|
||||
library1.Should().Be(libPath1);
|
||||
library2.Should().Be(libPath2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resolve_AfterDispose_ThrowsObjectDisposedException()
|
||||
{
|
||||
// Arrange
|
||||
var localCache = new MemoryCache(new MemoryCacheOptions { SizeLimit = 100 });
|
||||
var localResolver = new EnhancedSymbolResolver(
|
||||
NullLogger<EnhancedSymbolResolver>.Instance,
|
||||
localCache,
|
||||
_testProcRoot);
|
||||
localResolver.Dispose();
|
||||
|
||||
// Act & Assert
|
||||
var act = () => localResolver.Resolve(123, 0x1000UL);
|
||||
act.Should().Throw<ObjectDisposedException>();
|
||||
localCache.Dispose();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resolve_MapsWithFileOffset_CalculatesCorrectSymbolAddress()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12352;
|
||||
var libPath = Path.Combine(_testLibPath, "liboffsetmap.so");
|
||||
|
||||
// Symbol at file offset 0x1000
|
||||
CreateMinimalElf64WithSymbols(libPath, new[]
|
||||
{
|
||||
("offset_function", 0x1000UL, 0x100UL),
|
||||
});
|
||||
|
||||
// Map with file offset 0x1000 - so file offset 0x1000 maps to address 0x7f1234560000
|
||||
SetupMapsFile(pid, $@"
|
||||
7f1234560000-7f1234570000 r-xp 00001000 08:01 12345 {libPath}
|
||||
");
|
||||
|
||||
// Act
|
||||
var (symbol, _, _) = _resolver.Resolve(pid, 0x7f1234560000UL);
|
||||
|
||||
// Assert
|
||||
symbol.Should().Be("offset_function");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resolve_MultipleMappings_FindsCorrectOne()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12353;
|
||||
var lib1 = Path.Combine(_testLibPath, "libfirst.so");
|
||||
var lib2 = Path.Combine(_testLibPath, "libsecond.so");
|
||||
File.WriteAllBytes(lib1, new byte[100]);
|
||||
File.WriteAllBytes(lib2, new byte[100]);
|
||||
|
||||
SetupMapsFile(pid, $@"
|
||||
7f1234560000-7f1234570000 r-xp 00000000 08:01 12345 {lib1}
|
||||
7f1234580000-7f1234590000 r-xp 00000000 08:01 12346 {lib2}
|
||||
7f12345a0000-7f12345b0000 rw-p 00000000 00:00 0 [heap]
|
||||
");
|
||||
|
||||
// Act
|
||||
var (_, library1, _) = _resolver.Resolve(pid, 0x7f1234565000UL);
|
||||
var (_, library2, _) = _resolver.Resolve(pid, 0x7f1234585000UL);
|
||||
var (_, library3, _) = _resolver.Resolve(pid, 0x7f12345a5000UL);
|
||||
|
||||
// Assert
|
||||
library1.Should().Be(lib1);
|
||||
library2.Should().Be(lib2);
|
||||
library3.Should().Be("[heap]");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resolve_InvalidMapsFormat_ReturnsNull()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12354;
|
||||
var pidDir = Path.Combine(_testProcRoot, pid.ToString());
|
||||
Directory.CreateDirectory(pidDir);
|
||||
File.WriteAllText(Path.Combine(pidDir, "maps"), "invalid format garbage data");
|
||||
|
||||
// Act
|
||||
var (symbol, library, _) = _resolver.Resolve(pid, 0x7f1234565000UL);
|
||||
|
||||
// Assert
|
||||
symbol.Should().BeNull();
|
||||
library.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resolve_NonElfFile_ReturnsAddressFallback()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12355;
|
||||
var libPath = Path.Combine(_testLibPath, "notelf.so");
|
||||
File.WriteAllText(libPath, "This is not an ELF file");
|
||||
|
||||
SetupMapsFile(pid, $@"
|
||||
7f1234560000-7f1234570000 r-xp 00000000 08:01 12345 {libPath}
|
||||
");
|
||||
|
||||
// Act
|
||||
var (symbol, library, _) = _resolver.Resolve(pid, 0x7f1234565000UL);
|
||||
|
||||
// Assert
|
||||
library.Should().Be(libPath);
|
||||
symbol.Should().StartWith("addr:0x");
|
||||
}
|
||||
|
||||
#region Performance Tests
|
||||
|
||||
[Fact]
|
||||
public void Resolve_CachedLookup_CompletesUnder1Ms()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12360;
|
||||
var libPath = Path.Combine(_testLibPath, "libperf_cached.so");
|
||||
|
||||
CreateMinimalElf64WithSymbols(libPath, new[]
|
||||
{
|
||||
("perf_function", 0x1000UL, 0x100UL),
|
||||
});
|
||||
|
||||
SetupMapsFile(pid, $@"
|
||||
7f1234560000-7f1234570000 r-xp 00000000 08:01 12345 {libPath}
|
||||
");
|
||||
|
||||
// Warm up the cache with first call
|
||||
_ = _resolver.Resolve(pid, 0x7f1234561000UL);
|
||||
|
||||
// Act - measure cached lookups
|
||||
const int iterations = 1000;
|
||||
var timings = new long[iterations];
|
||||
var sw = new System.Diagnostics.Stopwatch();
|
||||
|
||||
for (int i = 0; i < iterations; i++)
|
||||
{
|
||||
sw.Restart();
|
||||
_ = _resolver.Resolve(pid, 0x7f1234561000UL);
|
||||
sw.Stop();
|
||||
timings[i] = sw.ElapsedTicks;
|
||||
}
|
||||
|
||||
// Calculate p99
|
||||
Array.Sort(timings);
|
||||
var p99Index = (int)(iterations * 0.99);
|
||||
var p99Ticks = timings[p99Index];
|
||||
var p99Ms = (double)p99Ticks / System.Diagnostics.Stopwatch.Frequency * 1000;
|
||||
|
||||
// Assert - p99 should be under 1ms for cached lookups
|
||||
p99Ms.Should().BeLessThan(1.0, $"p99 latency for cached lookups should be <1ms, but was {p99Ms:F3}ms");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resolve_UncachedLookup_CompletesUnder10Ms()
|
||||
{
|
||||
// Arrange - create multiple processes to test uncached lookups
|
||||
const int numProcesses = 50;
|
||||
var libPath = Path.Combine(_testLibPath, "libperf_uncached.so");
|
||||
|
||||
CreateMinimalElf64WithSymbols(libPath, new[]
|
||||
{
|
||||
("uncached_function", 0x1000UL, 0x100UL),
|
||||
("another_func", 0x2000UL, 0x100UL),
|
||||
("third_func", 0x3000UL, 0x100UL),
|
||||
});
|
||||
|
||||
// Create maps for multiple processes
|
||||
for (int i = 0; i < numProcesses; i++)
|
||||
{
|
||||
var pid = 20000 + i;
|
||||
SetupMapsFile(pid, $@"
|
||||
7f1234560000-7f1234570000 r-xp 00000000 08:01 12345 {libPath}
|
||||
7f1234570000-7f1234580000 rw-p 00010000 08:01 12345 {libPath}
|
||||
");
|
||||
}
|
||||
|
||||
// Act - measure uncached lookups (first access per PID)
|
||||
var timings = new List<long>();
|
||||
var sw = new System.Diagnostics.Stopwatch();
|
||||
|
||||
for (int i = 0; i < numProcesses; i++)
|
||||
{
|
||||
var pid = 20000 + i;
|
||||
sw.Restart();
|
||||
_ = _resolver.Resolve(pid, 0x7f1234561000UL);
|
||||
sw.Stop();
|
||||
timings.Add(sw.ElapsedTicks);
|
||||
}
|
||||
|
||||
// Calculate p99
|
||||
timings.Sort();
|
||||
var p99Index = (int)(timings.Count * 0.99);
|
||||
if (p99Index >= timings.Count) p99Index = timings.Count - 1;
|
||||
var p99Ticks = timings[p99Index];
|
||||
var p99Ms = (double)p99Ticks / System.Diagnostics.Stopwatch.Frequency * 1000;
|
||||
|
||||
// Assert - p99 should be under 10ms for uncached lookups
|
||||
p99Ms.Should().BeLessThan(10.0, $"p99 latency for uncached lookups should be <10ms, but was {p99Ms:F3}ms");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resolve_HighVolumeCached_MaintainsPerformance()
|
||||
{
|
||||
// Arrange
|
||||
var pid = 12361;
|
||||
var libPath = Path.Combine(_testLibPath, "libperf_volume.so");
|
||||
|
||||
CreateMinimalElf64WithSymbols(libPath, new[]
|
||||
{
|
||||
("volume_func_1", 0x1000UL, 0x100UL),
|
||||
("volume_func_2", 0x2000UL, 0x100UL),
|
||||
("volume_func_3", 0x3000UL, 0x100UL),
|
||||
("volume_func_4", 0x4000UL, 0x100UL),
|
||||
("volume_func_5", 0x5000UL, 0x100UL),
|
||||
});
|
||||
|
||||
SetupMapsFile(pid, $@"
|
||||
7f1234560000-7f1234580000 r-xp 00000000 08:01 12345 {libPath}
|
||||
");
|
||||
|
||||
// Warm up cache with various addresses
|
||||
var addresses = new ulong[]
|
||||
{
|
||||
0x7f1234561000UL, 0x7f1234562000UL, 0x7f1234563000UL,
|
||||
0x7f1234564000UL, 0x7f1234565000UL,
|
||||
};
|
||||
|
||||
foreach (var addr in addresses)
|
||||
{
|
||||
_ = _resolver.Resolve(pid, addr);
|
||||
}
|
||||
|
||||
// Act - high volume cached lookups
|
||||
const int iterations = 5000;
|
||||
var sw = System.Diagnostics.Stopwatch.StartNew();
|
||||
|
||||
for (int i = 0; i < iterations; i++)
|
||||
{
|
||||
var addr = addresses[i % addresses.Length];
|
||||
_ = _resolver.Resolve(pid, addr);
|
||||
}
|
||||
|
||||
sw.Stop();
|
||||
var avgMicroseconds = sw.Elapsed.TotalMicroseconds / iterations;
|
||||
|
||||
// Assert - average should be well under 100 microseconds for cached
|
||||
avgMicroseconds.Should().BeLessThan(100, $"Average cached lookup should be <100µs, but was {avgMicroseconds:F1}µs");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Helpers
|
||||
|
||||
private void SetupMapsFile(int pid, string content)
|
||||
{
|
||||
var pidDir = Path.Combine(_testProcRoot, pid.ToString());
|
||||
Directory.CreateDirectory(pidDir);
|
||||
File.WriteAllText(Path.Combine(pidDir, "maps"), content.Trim());
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a minimal valid ELF64 file with the specified symbols.
|
||||
/// </summary>
|
||||
private static void CreateMinimalElf64WithSymbols(string path, (string name, ulong value, ulong size)[] symbols)
|
||||
{
|
||||
using var stream = File.Create(path);
|
||||
using var writer = new BinaryWriter(stream);
|
||||
|
||||
// ELF Header (64 bytes)
|
||||
// e_ident[16]
|
||||
writer.Write((uint)0x464C457F); // Magic: "\x7FELF"
|
||||
writer.Write((byte)2); // EI_CLASS: 64-bit
|
||||
writer.Write((byte)1); // EI_DATA: little endian
|
||||
writer.Write((byte)1); // EI_VERSION: current
|
||||
writer.Write((byte)0); // EI_OSABI: SYSV
|
||||
writer.Write(new byte[8]); // EI_PAD
|
||||
|
||||
writer.Write((ushort)3); // e_type: ET_DYN (shared object)
|
||||
writer.Write((ushort)62); // e_machine: x86-64
|
||||
writer.Write((uint)1); // e_version
|
||||
writer.Write((ulong)0); // e_entry
|
||||
writer.Write((ulong)0); // e_phoff (no program headers for this test)
|
||||
writer.Write((ulong)64); // e_shoff (section headers at offset 64)
|
||||
writer.Write((uint)0); // e_flags
|
||||
writer.Write((ushort)64); // e_ehsize
|
||||
writer.Write((ushort)0); // e_phentsize
|
||||
writer.Write((ushort)0); // e_phnum
|
||||
writer.Write((ushort)64); // e_shentsize
|
||||
writer.Write((ushort)3); // e_shnum (null + strtab + symtab)
|
||||
writer.Write((ushort)1); // e_shstrndx (section string table at index 1)
|
||||
|
||||
// Section 0: NULL section (64 bytes at offset 64)
|
||||
WriteSectionHeader(writer, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
|
||||
|
||||
// Build string table (for symbols)
|
||||
var strtabContent = BuildStringTable(symbols.Select(s => s.name).ToArray());
|
||||
var strtabOffset = 64 + 64 * 3; // After ELF header and 3 section headers
|
||||
|
||||
// Section 1: STRTAB (string table) at offset 128
|
||||
WriteSectionHeader(writer, 0, 3 /*SHT_STRTAB*/, 0, 0, (ulong)strtabOffset, (ulong)strtabContent.Length, 0, 0, 1, 0);
|
||||
|
||||
// Build symbol table
|
||||
var symtabOffset = strtabOffset + strtabContent.Length;
|
||||
var symtabContent = BuildSymbolTable(symbols, strtabContent);
|
||||
|
||||
// Section 2: SYMTAB at offset after strtab
|
||||
// Link points to strtab (section 1)
|
||||
WriteSectionHeader(writer, 0, 2 /*SHT_SYMTAB*/, 0, 0, (ulong)symtabOffset, (ulong)symtabContent.Length, 1, 0, 8, 24);
|
||||
|
||||
// Write string table content
|
||||
writer.Write(strtabContent);
|
||||
|
||||
// Write symbol table content
|
||||
writer.Write(symtabContent);
|
||||
}
|
||||
|
||||
private static void WriteSectionHeader(
|
||||
BinaryWriter writer,
|
||||
uint name, uint type, ulong flags, ulong addr,
|
||||
ulong offset, ulong size, uint link, uint info,
|
||||
ulong addralign, ulong entsize)
|
||||
{
|
||||
writer.Write(name);
|
||||
writer.Write(type);
|
||||
writer.Write(flags);
|
||||
writer.Write(addr);
|
||||
writer.Write(offset);
|
||||
writer.Write(size);
|
||||
writer.Write(link);
|
||||
writer.Write(info);
|
||||
writer.Write(addralign);
|
||||
writer.Write(entsize);
|
||||
}
|
||||
|
||||
private static byte[] BuildStringTable(string[] names)
|
||||
{
|
||||
var ms = new MemoryStream();
|
||||
ms.WriteByte(0); // First byte is always null
|
||||
|
||||
foreach (var name in names)
|
||||
{
|
||||
var bytes = Encoding.UTF8.GetBytes(name);
|
||||
ms.Write(bytes, 0, bytes.Length);
|
||||
ms.WriteByte(0);
|
||||
}
|
||||
|
||||
return ms.ToArray();
|
||||
}
|
||||
|
||||
private static byte[] BuildSymbolTable((string name, ulong value, ulong size)[] symbols, byte[] strtab)
|
||||
{
|
||||
var ms = new MemoryStream();
|
||||
var writer = new BinaryWriter(ms);
|
||||
|
||||
// First symbol is always null
|
||||
writer.Write((uint)0); // st_name
|
||||
writer.Write((byte)0); // st_info
|
||||
writer.Write((byte)0); // st_other
|
||||
writer.Write((ushort)0); // st_shndx
|
||||
writer.Write((ulong)0); // st_value
|
||||
writer.Write((ulong)0); // st_size
|
||||
|
||||
int strOffset = 1; // Skip initial null byte
|
||||
foreach (var (name, value, size) in symbols)
|
||||
{
|
||||
writer.Write((uint)strOffset); // st_name
|
||||
writer.Write((byte)0x12); // st_info: STB_GLOBAL | STT_FUNC
|
||||
writer.Write((byte)0); // st_other
|
||||
writer.Write((ushort)1); // st_shndx: some section
|
||||
writer.Write(value); // st_value
|
||||
writer.Write(size); // st_size
|
||||
|
||||
strOffset += Encoding.UTF8.GetByteCount(name) + 1;
|
||||
}
|
||||
|
||||
return ms.ToArray();
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
Reference in New Issue
Block a user