feat: Implement Filesystem and MongoDB provenance writers for PackRun execution context
Some checks failed
Airgap Sealed CI Smoke / sealed-smoke (push) Has been cancelled
Docs CI / lint-and-preview (push) Has been cancelled
Export Center CI / export-ci (push) Has been cancelled

- Added `FilesystemPackRunProvenanceWriter` to write provenance manifests to the filesystem.
- Introduced `MongoPackRunArtifactReader` to read artifacts from MongoDB.
- Created `MongoPackRunProvenanceWriter` to store provenance manifests in MongoDB.
- Developed unit tests for filesystem and MongoDB provenance writers.
- Established `ITimelineEventStore` and `ITimelineIngestionService` interfaces for timeline event handling.
- Implemented `TimelineIngestionService` to validate and persist timeline events with hashing.
- Created PostgreSQL schema and migration scripts for timeline indexing.
- Added dependency injection support for timeline indexer services.
- Developed tests for timeline ingestion and schema validation.
This commit is contained in:
StellaOps Bot
2025-11-30 15:38:14 +02:00
parent 8f54ffa203
commit 17d45a6d30
276 changed files with 8618 additions and 688 deletions

View File

@@ -1,6 +0,0 @@
namespace StellaOps.TimelineIndexer.Infrastructure;
public class Class1
{
}

View File

@@ -0,0 +1,111 @@
-- 001_initial_schema.sql
-- Establishes Timeline Indexer schema, RLS scaffolding, and evidence linkage tables.
CREATE EXTENSION IF NOT EXISTS pgcrypto;
CREATE SCHEMA IF NOT EXISTS timeline;
CREATE SCHEMA IF NOT EXISTS timeline_app;
-- Enforce tenant context for all RLS policies
CREATE OR REPLACE FUNCTION timeline_app.require_current_tenant()
RETURNS text
LANGUAGE plpgsql
AS $$
DECLARE
tenant_text text;
BEGIN
tenant_text := current_setting('app.current_tenant', true);
IF tenant_text IS NULL OR length(tenant_text) = 0 THEN
RAISE EXCEPTION 'app.current_tenant is not set for the current session';
END IF;
RETURN tenant_text;
END;
$$;
-- Severity enum keeps ordering deterministic and compact
DO $$
BEGIN
CREATE TYPE timeline.event_severity AS ENUM ('info', 'notice', 'warn', 'error', 'critical');
EXCEPTION
WHEN duplicate_object THEN NULL;
END
$$;
-- Core event header table (dedupe + ordering)
CREATE TABLE IF NOT EXISTS timeline.timeline_events
(
event_seq bigserial PRIMARY KEY,
event_id text NOT NULL,
tenant_id text NOT NULL,
source text NOT NULL,
event_type text NOT NULL,
occurred_at timestamptz NOT NULL,
received_at timestamptz NOT NULL DEFAULT (NOW() AT TIME ZONE 'UTC'),
correlation_id text,
trace_id text,
actor text,
severity timeline.event_severity NOT NULL DEFAULT 'info',
payload_hash text CHECK (payload_hash IS NULL OR payload_hash ~ '^[0-9a-f]{64}$'),
attributes jsonb NOT NULL DEFAULT '{}'::jsonb,
UNIQUE (tenant_id, event_id)
);
CREATE INDEX IF NOT EXISTS ix_timeline_events_tenant_occurred
ON timeline.timeline_events (tenant_id, occurred_at DESC, event_seq DESC);
CREATE INDEX IF NOT EXISTS ix_timeline_events_type
ON timeline.timeline_events (tenant_id, event_type, occurred_at DESC);
ALTER TABLE timeline.timeline_events ENABLE ROW LEVEL SECURITY;
CREATE POLICY IF NOT EXISTS timeline_events_isolation
ON timeline.timeline_events
USING (tenant_id = timeline_app.require_current_tenant())
WITH CHECK (tenant_id = timeline_app.require_current_tenant());
-- Raw and normalized payloads per event
CREATE TABLE IF NOT EXISTS timeline.timeline_event_details
(
event_id text NOT NULL,
tenant_id text NOT NULL,
envelope_version text NOT NULL,
raw_payload jsonb NOT NULL,
normalized_payload jsonb,
created_at timestamptz NOT NULL DEFAULT (NOW() AT TIME ZONE 'UTC'),
CONSTRAINT fk_event_details FOREIGN KEY (event_id, tenant_id)
REFERENCES timeline.timeline_events (event_id, tenant_id) ON DELETE CASCADE,
PRIMARY KEY (event_id, tenant_id)
);
ALTER TABLE timeline.timeline_event_details ENABLE ROW LEVEL SECURITY;
CREATE POLICY IF NOT EXISTS timeline_event_details_isolation
ON timeline.timeline_event_details
USING (tenant_id = timeline_app.require_current_tenant())
WITH CHECK (tenant_id = timeline_app.require_current_tenant());
-- Evidence linkage (bundle/attestation manifests)
CREATE TABLE IF NOT EXISTS timeline.timeline_event_digests
(
digest_id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id text NOT NULL,
event_id text NOT NULL,
bundle_id uuid,
bundle_digest text,
attestation_subject text,
attestation_digest text,
manifest_uri text,
created_at timestamptz NOT NULL DEFAULT (NOW() AT TIME ZONE 'UTC'),
CONSTRAINT fk_event_digest_event FOREIGN KEY (event_id, tenant_id)
REFERENCES timeline.timeline_events (event_id, tenant_id) ON DELETE CASCADE,
CONSTRAINT ck_bundle_digest_sha CHECK (bundle_digest IS NULL OR bundle_digest ~ '^sha256:[0-9a-f]{64}$'),
CONSTRAINT ck_attestation_digest_sha CHECK (attestation_digest IS NULL OR attestation_digest ~ '^sha256:[0-9a-f]{64}$')
);
CREATE INDEX IF NOT EXISTS ix_timeline_digests_event
ON timeline.timeline_event_digests (tenant_id, event_id);
CREATE INDEX IF NOT EXISTS ix_timeline_digests_bundle
ON timeline.timeline_event_digests (tenant_id, bundle_digest);
ALTER TABLE timeline.timeline_event_digests ENABLE ROW LEVEL SECURITY;
CREATE POLICY IF NOT EXISTS timeline_event_digests_isolation
ON timeline.timeline_event_digests
USING (tenant_id = timeline_app.require_current_tenant())
WITH CHECK (tenant_id = timeline_app.require_current_tenant());

View File

@@ -0,0 +1,112 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using System.Text.Json;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.TimelineIndexer.Core.Abstractions;
using StellaOps.TimelineIndexer.Core.Models;
namespace StellaOps.TimelineIndexer.Infrastructure.Db;
/// <summary>
/// Postgres-backed implementation of ITimelineEventStore.
/// </summary>
public sealed class TimelineEventStore(TimelineIndexerDataSource dataSource, ILogger<TimelineEventStore> logger)
: RepositoryBase<TimelineIndexerDataSource>(dataSource, logger), ITimelineEventStore
{
private const string InsertEventSql = """
INSERT INTO timeline.timeline_events
(event_id, tenant_id, source, event_type, occurred_at, correlation_id, trace_id, actor, severity, payload_hash, attributes)
VALUES
(@event_id, @tenant_id, @source, @event_type, @occurred_at, @correlation_id, @trace_id, @actor, @severity, @payload_hash, @attributes::jsonb)
ON CONFLICT (tenant_id, event_id) DO NOTHING
RETURNING event_seq;
""";
private const string InsertDetailSql = """
INSERT INTO timeline.timeline_event_details
(event_id, tenant_id, envelope_version, raw_payload, normalized_payload)
VALUES
(@event_id, @tenant_id, @envelope_version, @raw_payload::jsonb, @normalized_payload::jsonb)
ON CONFLICT (event_id, tenant_id) DO NOTHING;
""";
private const string InsertDigestSql = """
INSERT INTO timeline.timeline_event_digests
(tenant_id, event_id, bundle_id, bundle_digest, attestation_subject, attestation_digest, manifest_uri)
VALUES
(@tenant_id, @event_id, @bundle_id, @bundle_digest, @attestation_subject, @attestation_digest, @manifest_uri)
ON CONFLICT (event_id, tenant_id) DO NOTHING;
""";
public async Task<bool> InsertAsync(TimelineEventEnvelope envelope, CancellationToken cancellationToken = default)
{
await using var connection = await DataSource.OpenConnectionAsync(envelope.TenantId, "writer", cancellationToken)
.ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
var inserted = await InsertEventAsync(connection, envelope, cancellationToken).ConfigureAwait(false);
if (!inserted)
{
await transaction.RollbackAsync(cancellationToken).ConfigureAwait(false);
return false;
}
await InsertDetailAsync(connection, envelope, cancellationToken).ConfigureAwait(false);
await InsertDigestAsync(connection, envelope, cancellationToken).ConfigureAwait(false);
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
return true;
}
private async Task<bool> InsertEventAsync(NpgsqlConnection connection, TimelineEventEnvelope envelope, CancellationToken cancellationToken)
{
await using var command = CreateCommand(InsertEventSql, connection);
AddParameter(command, "event_id", envelope.EventId);
AddParameter(command, "tenant_id", envelope.TenantId);
AddParameter(command, "source", envelope.Source);
AddParameter(command, "event_type", envelope.EventType);
AddParameter(command, "occurred_at", envelope.OccurredAt);
AddParameter(command, "correlation_id", envelope.CorrelationId);
AddParameter(command, "trace_id", envelope.TraceId);
AddParameter(command, "actor", envelope.Actor);
AddParameter(command, "severity", envelope.Severity);
AddParameter(command, "payload_hash", envelope.PayloadHash);
AddJsonbParameter(command, "attributes", envelope.Attributes is null
? "{}"
: JsonSerializer.Serialize(envelope.Attributes));
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
return result is not null;
}
private async Task InsertDetailAsync(NpgsqlConnection connection, TimelineEventEnvelope envelope, CancellationToken cancellationToken)
{
await using var command = CreateCommand(InsertDetailSql, connection);
AddParameter(command, "event_id", envelope.EventId);
AddParameter(command, "tenant_id", envelope.TenantId);
AddParameter(command, "envelope_version", "orch.event.v1");
AddJsonbParameter(command, "raw_payload", envelope.RawPayloadJson);
AddJsonbParameter(command, "normalized_payload", envelope.NormalizedPayloadJson);
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
private async Task InsertDigestAsync(NpgsqlConnection connection, TimelineEventEnvelope envelope, CancellationToken cancellationToken)
{
if (envelope.BundleDigest is null && envelope.AttestationDigest is null && envelope.ManifestUri is null && envelope.BundleId is null)
{
return;
}
await using var command = CreateCommand(InsertDigestSql, connection);
AddParameter(command, "tenant_id", envelope.TenantId);
AddParameter(command, "event_id", envelope.EventId);
AddParameter(command, "bundle_id", envelope.BundleId);
AddParameter(command, "bundle_digest", envelope.BundleDigest);
AddParameter(command, "attestation_subject", envelope.AttestationSubject);
AddParameter(command, "attestation_digest", envelope.AttestationDigest);
AddParameter(command, "manifest_uri", envelope.ManifestUri);
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
}

View File

@@ -0,0 +1,47 @@
using System.Reflection;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using StellaOps.Infrastructure.Postgres.Migrations;
using StellaOps.Infrastructure.Postgres.Options;
namespace StellaOps.TimelineIndexer.Infrastructure.Db;
/// <summary>
/// Runs embedded SQL migrations for the Timeline Indexer schema.
/// </summary>
public sealed class TimelineIndexerMigrationRunner
{
private readonly PostgresOptions _options;
private readonly ILogger<TimelineIndexerMigrationRunner> _logger;
private const string ResourcePrefix = "StellaOps.TimelineIndexer.Infrastructure.Db.Migrations";
public TimelineIndexerMigrationRunner(
IOptions<PostgresOptions> options,
ILogger<TimelineIndexerMigrationRunner> logger)
{
_options = options.Value ?? throw new ArgumentNullException(nameof(options));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
}
/// <summary>
/// Apply all pending migrations from embedded resources.
/// </summary>
public Task<int> RunAsync(CancellationToken cancellationToken = default)
{
var schema = string.IsNullOrWhiteSpace(_options.SchemaName)
? TimelineIndexerDataSource.DefaultSchemaName
: _options.SchemaName!;
var runner = new MigrationRunner(
_options.ConnectionString,
schema,
moduleName: "TimelineIndexer",
_logger);
return runner.RunFromAssemblyAsync(
assembly: Assembly.GetExecutingAssembly(),
resourcePrefix: ResourcePrefix,
cancellationToken);
}
}

View File

@@ -0,0 +1,85 @@
using Npgsql;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.TimelineIndexer.Core.Abstractions;
using StellaOps.TimelineIndexer.Core.Models;
namespace StellaOps.TimelineIndexer.Infrastructure.Db;
public sealed class TimelineQueryStore(TimelineIndexerDataSource dataSource, ILogger<TimelineQueryStore> logger)
: RepositoryBase<TimelineIndexerDataSource>(dataSource, logger), ITimelineQueryStore
{
private const string BaseSelect = """
SELECT event_seq, event_id, tenant_id, event_type, source, occurred_at, received_at, correlation_id, trace_id, actor, severity, payload_hash
FROM timeline.timeline_events
WHERE tenant_id = @tenant_id
""";
public async Task<IReadOnlyList<TimelineEventView>> QueryAsync(string tenantId, TimelineQueryOptions options, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(options);
var sql = new System.Text.StringBuilder(BaseSelect);
if (!string.IsNullOrWhiteSpace(options.EventType)) sql.Append(" AND event_type = @event_type");
if (!string.IsNullOrWhiteSpace(options.CorrelationId)) sql.Append(" AND correlation_id = @correlation_id");
if (!string.IsNullOrWhiteSpace(options.TraceId)) sql.Append(" AND trace_id = @trace_id");
if (!string.IsNullOrWhiteSpace(options.Severity)) sql.Append(" AND severity = @severity");
if (options.Since is not null) sql.Append(" AND occurred_at >= @since");
if (options.AfterEventSeq is not null) sql.Append(" AND event_seq < @after_seq");
sql.Append(" ORDER BY occurred_at DESC, event_seq DESC LIMIT @limit");
return await QueryAsync(
tenantId,
sql.ToString(),
cmd =>
{
AddParameter(cmd, "tenant_id", tenantId);
AddParameter(cmd, "event_type", options.EventType);
AddParameter(cmd, "correlation_id", options.CorrelationId);
AddParameter(cmd, "trace_id", options.TraceId);
AddParameter(cmd, "severity", options.Severity);
AddParameter(cmd, "since", options.Since);
AddParameter(cmd, "after_seq", options.AfterEventSeq);
AddParameter(cmd, "limit", Math.Clamp(options.Limit, 1, 500));
},
MapEvent,
cancellationToken).ConfigureAwait(false);
}
public async Task<TimelineEventView?> GetAsync(string tenantId, string eventId, CancellationToken cancellationToken)
{
const string sql = """
SELECT event_seq, event_id, tenant_id, event_type, source, occurred_at, received_at, correlation_id, trace_id, actor, severity, payload_hash
FROM timeline.timeline_events
WHERE tenant_id = @tenant_id AND event_id = @event_id
""";
return await QuerySingleOrDefaultAsync(
tenantId,
sql,
cmd =>
{
AddParameter(cmd, "tenant_id", tenantId);
AddParameter(cmd, "event_id", eventId);
},
MapEvent,
cancellationToken).ConfigureAwait(false);
}
private static TimelineEventView MapEvent(NpgsqlDataReader reader) => new()
{
EventSeq = reader.GetInt64(0),
EventId = reader.GetString(1),
TenantId = reader.GetString(2),
EventType = reader.GetString(3),
Source = reader.GetString(4),
OccurredAt = reader.GetFieldValue<DateTimeOffset>(5),
ReceivedAt = reader.GetFieldValue<DateTimeOffset>(6),
CorrelationId = GetNullableString(reader, 7),
TraceId = GetNullableString(reader, 8),
Actor = GetNullableString(reader, 9),
Severity = reader.GetString(10),
PayloadHash = GetNullableString(reader, 11)
};
}

View File

@@ -0,0 +1,30 @@
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Infrastructure.Postgres.Options;
using StellaOps.TimelineIndexer.Infrastructure.Db;
using StellaOps.TimelineIndexer.Core.Abstractions;
using StellaOps.TimelineIndexer.Core.Services;
namespace StellaOps.TimelineIndexer.Infrastructure.DependencyInjection;
/// <summary>
/// Timeline Indexer PostgreSQL service registration helpers.
/// </summary>
public static class ServiceCollectionExtensions
{
private const string DefaultSection = "Postgres:Timeline";
/// <summary>
/// Registers Postgres options, data source, and migration runner for the Timeline Indexer.
/// </summary>
public static IServiceCollection AddTimelineIndexerPostgres(
this IServiceCollection services,
IConfiguration configuration,
string sectionName = DefaultSection)
{
services.Configure<PostgresOptions>(configuration.GetSection(sectionName));
services.AddSingleton<TimelineIndexerDataSource>();
services.AddSingleton<TimelineIndexerMigrationRunner>();
services.AddScoped<ITimelineEventStore, TimelineEventStore>();
services.AddScoped<ITimelineIngestionService, TimelineIngestionService>();
services.AddScoped<ITimel

View File

@@ -3,26 +3,38 @@
<ItemGroup>
<ProjectReference Include="..\StellaOps.TimelineIndexer.Core\StellaOps.TimelineIndexer.Core.csproj"/>
</ItemGroup>
<PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\StellaOps.TimelineIndexer.Core\StellaOps.TimelineIndexer.Core.csproj"/>
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.Postgres\StellaOps.Infrastructure.Postgres.csproj"/>
</ItemGroup>
<PropertyGroup>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<LangVersion>preview</LangVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
</Project>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
<ItemGroup>
<EmbeddedResource Include="Db/Migrations/*.sql" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0-rc.2.25502.107" />
<PackageReference Include="Microsoft.Extensions.Options" Version="10.0.0-rc.2.25502.107" />
<PackageReference Include="Npgsql" Version="9.0.2" />
</ItemGroup>
</Project>

View File

@@ -0,0 +1,16 @@
using StellaOps.TimelineIndexer.Core.Abstractions;
using StellaOps.TimelineIndexer.Core.Models;
namespace StellaOps.TimelineIndexer.Infrastructure.Subscriptions;
/// <summary>
/// Default no-op subscriber used until transport bindings are configured.
/// Keeps the ingestion worker running without requiring live brokers.
/// </summary>
public sealed class NullTimelineEventSubscriber : ITimelineEventSubscriber
{
public IAsyncEnumerable<TimelineEventEnvelope> SubscribeAsync(CancellationToken cancellationToken = default)
{
return AsyncEnumerable.Empty<TimelineEventEnvelope>();
}
}

View File

@@ -0,0 +1,32 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using StellaOps.Infrastructure.Postgres.Connections;
using StellaOps.Infrastructure.Postgres.Options;
namespace StellaOps.TimelineIndexer.Infrastructure;
/// <summary>
/// PostgreSQL data source for the Timeline Indexer module.
/// Sets the default schema and carries tenant context via app.current_tenant.
/// </summary>
public sealed class TimelineIndexerDataSource : DataSourceBase
{
public const string DefaultSchemaName = "timeline";
public TimelineIndexerDataSource(IOptions<PostgresOptions> options, ILogger<TimelineIndexerDataSource> logger)
: base(EnsureSchema(options.Value), logger)
{
}
protected override string ModuleName => "TimelineIndexer";
private static PostgresOptions EnsureSchema(PostgresOptions baseOptions)
{
if (string.IsNullOrWhiteSpace(baseOptions.SchemaName))
{
baseOptions.SchemaName = DefaultSchemaName;
}
return baseOptions;
}
}