feat: Implement Filesystem and MongoDB provenance writers for PackRun execution context
Some checks failed
Airgap Sealed CI Smoke / sealed-smoke (push) Has been cancelled
Docs CI / lint-and-preview (push) Has been cancelled
Export Center CI / export-ci (push) Has been cancelled

- Added `FilesystemPackRunProvenanceWriter` to write provenance manifests to the filesystem.
- Introduced `MongoPackRunArtifactReader` to read artifacts from MongoDB.
- Created `MongoPackRunProvenanceWriter` to store provenance manifests in MongoDB.
- Developed unit tests for filesystem and MongoDB provenance writers.
- Established `ITimelineEventStore` and `ITimelineIngestionService` interfaces for timeline event handling.
- Implemented `TimelineIngestionService` to validate and persist timeline events with hashing.
- Created PostgreSQL schema and migration scripts for timeline indexing.
- Added dependency injection support for timeline indexer services.
- Developed tests for timeline ingestion and schema validation.
This commit is contained in:
StellaOps Bot
2025-11-30 15:38:14 +02:00
parent 8f54ffa203
commit 17d45a6d30
276 changed files with 8618 additions and 688 deletions

View File

@@ -0,0 +1,218 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using NpgsqlTypes;
using StellaOps.Orchestrator.Core.Domain;
using PackLogLevel = StellaOps.Orchestrator.Core.Domain.LogLevel;
using StellaOps.Orchestrator.Infrastructure.Repositories;
namespace StellaOps.Orchestrator.Infrastructure.Postgres;
/// <summary>
/// PostgreSQL implementation for pack run logs.
/// </summary>
public sealed class PostgresPackRunLogRepository : IPackRunLogRepository
{
private const string Columns = "log_id, pack_run_id, tenant_id, sequence, log_level, source, message, data, created_at";
private const string InsertSql = """
INSERT INTO pack_run_logs (log_id, tenant_id, pack_run_id, sequence, log_level, source, message, data, created_at)
VALUES (@log_id, @tenant_id, @pack_run_id, @sequence, @log_level, @source, @message, @data, @created_at)
""";
private const string SelectLogsSql = $"""
SELECT {Columns}
FROM pack_run_logs
WHERE tenant_id = @tenant_id AND pack_run_id = @pack_run_id AND sequence > @after
ORDER BY sequence
LIMIT @limit
""";
private const string SelectLogsByLevelSql = $"""
SELECT {Columns}
FROM pack_run_logs
WHERE tenant_id = @tenant_id AND pack_run_id = @pack_run_id AND sequence > @after AND log_level >= @min_level
ORDER BY sequence
LIMIT @limit
""";
private const string SearchLogsSql = $"""
SELECT {Columns}
FROM pack_run_logs
WHERE tenant_id = @tenant_id AND pack_run_id = @pack_run_id AND sequence > @after AND message ILIKE @pattern
ORDER BY sequence
LIMIT @limit
""";
private const string StatsSql = """
SELECT COUNT(*)::BIGINT, COALESCE(MAX(sequence), -1)
FROM pack_run_logs
WHERE tenant_id = @tenant_id AND pack_run_id = @pack_run_id
""";
private const string DeleteSql = """
DELETE FROM pack_run_logs
WHERE tenant_id = @tenant_id AND pack_run_id = @pack_run_id
""";
private readonly OrchestratorDataSource _dataSource;
private readonly ILogger<PostgresPackRunLogRepository> _logger;
public PostgresPackRunLogRepository(OrchestratorDataSource dataSource, ILogger<PostgresPackRunLogRepository> logger)
{
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
}
public async Task AppendAsync(PackRunLog log, CancellationToken cancellationToken)
{
await using var connection = await _dataSource.OpenConnectionAsync(log.TenantId, "writer", cancellationToken).ConfigureAwait(false);
await using var command = new NpgsqlCommand(InsertSql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
AddParameters(command.Parameters, log);
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
public async Task AppendBatchAsync(IReadOnlyList<PackRunLog> logs, CancellationToken cancellationToken)
{
if (logs.Count == 0)
{
return;
}
var tenantId = logs[0].TenantId;
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken).ConfigureAwait(false);
await using var batch = new NpgsqlBatch(connection);
foreach (var log in logs)
{
var cmd = new NpgsqlBatchCommand(InsertSql);
AddParameters(cmd.Parameters, log);
batch.BatchCommands.Add(cmd);
}
await batch.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
public async Task<PackRunLogBatch> GetLogsAsync(string tenantId, Guid packRunId, long afterSequence, int limit, CancellationToken cancellationToken)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken).ConfigureAwait(false);
await using var command = new NpgsqlCommand(SelectLogsSql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
command.Parameters.AddWithValue("tenant_id", tenantId);
command.Parameters.AddWithValue("pack_run_id", packRunId);
command.Parameters.AddWithValue("after", afterSequence);
command.Parameters.AddWithValue("limit", limit);
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
return await ReadBatchAsync(reader, tenantId, packRunId, cancellationToken).ConfigureAwait(false);
}
public async Task<(long Count, long LatestSequence)> GetLogStatsAsync(string tenantId, Guid packRunId, CancellationToken cancellationToken)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken).ConfigureAwait(false);
await using var command = new NpgsqlCommand(StatsSql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
command.Parameters.AddWithValue("tenant_id", tenantId);
command.Parameters.AddWithValue("pack_run_id", packRunId);
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
if (!await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
return (0, -1);
}
var count = reader.GetInt64(0);
var latest = reader.GetInt64(1);
return (count, latest);
}
public async Task<PackRunLogBatch> GetLogsByLevelAsync(string tenantId, Guid packRunId, PackLogLevel minLevel, long afterSequence, int limit, CancellationToken cancellationToken)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken).ConfigureAwait(false);
await using var command = new NpgsqlCommand(SelectLogsByLevelSql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
command.Parameters.AddWithValue("tenant_id", tenantId);
command.Parameters.AddWithValue("pack_run_id", packRunId);
command.Parameters.AddWithValue("after", afterSequence);
command.Parameters.AddWithValue("limit", limit);
command.Parameters.AddWithValue("min_level", (int)minLevel);
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
return await ReadBatchAsync(reader, tenantId, packRunId, cancellationToken).ConfigureAwait(false);
}
public async Task<PackRunLogBatch> SearchLogsAsync(string tenantId, Guid packRunId, string pattern, long afterSequence, int limit, CancellationToken cancellationToken)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken).ConfigureAwait(false);
await using var command = new NpgsqlCommand(SearchLogsSql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
command.Parameters.AddWithValue("tenant_id", tenantId);
command.Parameters.AddWithValue("pack_run_id", packRunId);
command.Parameters.AddWithValue("after", afterSequence);
command.Parameters.AddWithValue("limit", limit);
command.Parameters.AddWithValue("pattern", $"%{pattern}%");
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
return await ReadBatchAsync(reader, tenantId, packRunId, cancellationToken).ConfigureAwait(false);
}
public async Task<long> DeleteLogsAsync(string tenantId, Guid packRunId, CancellationToken cancellationToken)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken).ConfigureAwait(false);
await using var command = new NpgsqlCommand(DeleteSql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
command.Parameters.AddWithValue("tenant_id", tenantId);
command.Parameters.AddWithValue("pack_run_id", packRunId);
var rows = await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
return rows;
}
private static void AddParameters(NpgsqlParameterCollection parameters, PackRunLog log)
{
parameters.AddWithValue("log_id", log.LogId);
parameters.AddWithValue("tenant_id", log.TenantId);
parameters.AddWithValue("pack_run_id", log.PackRunId);
parameters.AddWithValue("sequence", log.Sequence);
parameters.AddWithValue("log_level", (int)log.Level);
parameters.AddWithValue("source", (object?)log.Source ?? DBNull.Value);
parameters.AddWithValue("message", log.Message);
parameters.Add(new NpgsqlParameter("data", NpgsqlDbType.Jsonb) { Value = (object?)log.Data ?? DBNull.Value });
parameters.AddWithValue("created_at", log.Timestamp);
}
private static async Task<PackRunLogBatch> ReadBatchAsync(NpgsqlDataReader reader, string tenantId, Guid packRunId, CancellationToken cancellationToken)
{
var logs = new List<PackRunLog>();
long startSequence = -1;
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
var log = new PackRunLog(
LogId: reader.GetGuid(0),
TenantId: reader.GetString(2),
PackRunId: reader.GetGuid(1),
Sequence: reader.GetInt64(3),
Level: (PackLogLevel)reader.GetInt32(4),
Source: reader.IsDBNull(5) ? "unknown" : reader.GetString(5),
Message: reader.GetString(6),
Timestamp: reader.GetFieldValue<DateTimeOffset>(8),
Data: reader.IsDBNull(7) ? null : reader.GetString(7));
if (startSequence < 0)
{
startSequence = log.Sequence;
}
logs.Add(log);
}
if (startSequence < 0)
{
startSequence = 0;
}
return new PackRunLogBatch(packRunId, tenantId, startSequence, logs);
}
}

View File

@@ -0,0 +1,525 @@
using System.Globalization;
using Microsoft.Extensions.Logging;
using Npgsql;
using NpgsqlTypes;
using StellaOps.Orchestrator.Core.Domain;
using StellaOps.Orchestrator.Infrastructure.Repositories;
namespace StellaOps.Orchestrator.Infrastructure.Postgres;
/// <summary>
/// PostgreSQL implementation for pack run persistence.
/// </summary>
public sealed class PostgresPackRunRepository : IPackRunRepository
{
private const string Columns = """
pack_run_id, tenant_id, project_id, pack_id, pack_version, status, priority, attempt, max_attempts,
parameters, parameters_digest, idempotency_key, correlation_id, lease_id, task_runner_id, lease_until,
created_at, scheduled_at, leased_at, started_at, completed_at, not_before, reason, exit_code, duration_ms,
created_by, metadata
""";
private const string SelectByIdSql = $"SELECT {Columns} FROM pack_runs WHERE tenant_id = @tenant_id AND pack_run_id = @pack_run_id";
private const string SelectByIdempotencySql = $"SELECT {Columns} FROM pack_runs WHERE tenant_id = @tenant_id AND idempotency_key = @idempotency_key";
private const string InsertSql = """
INSERT INTO pack_runs (
pack_run_id, tenant_id, project_id, pack_id, pack_version, status, priority, attempt, max_attempts,
parameters, parameters_digest, idempotency_key, correlation_id, lease_id, task_runner_id, lease_until,
created_at, scheduled_at, leased_at, started_at, completed_at, not_before, reason, exit_code, duration_ms,
created_by, metadata)
VALUES (
@pack_run_id, @tenant_id, @project_id, @pack_id, @pack_version, @status::pack_run_status, @priority,
@attempt, @max_attempts, @parameters, @parameters_digest, @idempotency_key, @correlation_id, @lease_id,
@task_runner_id, @lease_until, @created_at, @scheduled_at, @leased_at, @started_at, @completed_at,
@not_before, @reason, @exit_code, @duration_ms, @created_by, @metadata)
""";
private const string UpdateStatusSql = """
UPDATE pack_runs
SET status = @status::pack_run_status,
attempt = @attempt,
lease_id = @lease_id,
task_runner_id = @task_runner_id,
lease_until = @lease_until,
scheduled_at = @scheduled_at,
leased_at = @leased_at,
started_at = @started_at,
completed_at = @completed_at,
not_before = @not_before,
reason = @reason,
exit_code = @exit_code,
duration_ms = @duration_ms
WHERE tenant_id = @tenant_id AND pack_run_id = @pack_run_id
""";
private const string LeaseNextSqlTemplate = """
UPDATE pack_runs
SET status = 'leased'::pack_run_status,
lease_id = @lease_id,
task_runner_id = @task_runner_id,
lease_until = @lease_until,
leased_at = @leased_at
WHERE tenant_id = @tenant_id
AND pack_run_id = (
SELECT pack_run_id
FROM pack_runs
WHERE tenant_id = @tenant_id
AND status = 'scheduled'::pack_run_status
AND (not_before IS NULL OR not_before <= @now)
{0}
ORDER BY priority DESC, created_at
LIMIT 1
FOR UPDATE SKIP LOCKED)
RETURNING {1};
""";
private const string ExtendLeaseSql = """
UPDATE pack_runs
SET lease_until = @new_lease_until
WHERE tenant_id = @tenant_id
AND pack_run_id = @pack_run_id
AND lease_id = @lease_id
AND status = 'leased'::pack_run_status
AND lease_until > @now
""";
private const string ReleaseLeaseSql = """
UPDATE pack_runs
SET status = @status::pack_run_status,
lease_id = NULL,
task_runner_id = NULL,
lease_until = NULL,
completed_at = CASE WHEN @completed_at IS NULL THEN completed_at ELSE @completed_at END,
reason = @reason
WHERE tenant_id = @tenant_id AND pack_run_id = @pack_run_id AND lease_id = @lease_id
""";
private const string ListSqlTemplate = "SELECT " + Columns + @"
FROM pack_runs
WHERE tenant_id = @tenant_id
{0}
ORDER BY created_at DESC
LIMIT @limit OFFSET @offset";
private const string CountSqlTemplate = @"SELECT COUNT(*)
FROM pack_runs
WHERE tenant_id = @tenant_id
{0}";
private const string ExpiredLeaseSql = $"""
SELECT {Columns}
FROM pack_runs
WHERE status = 'leased'::pack_run_status
AND lease_until < @cutoff
ORDER BY lease_until
LIMIT @limit
""";
private const string CancelPendingSql = """
UPDATE pack_runs
SET status = 'canceled'::pack_run_status,
reason = @reason,
completed_at = NOW()
WHERE tenant_id = @tenant_id
AND status = 'pending'::pack_run_status
{0}
""";
private readonly OrchestratorDataSource _dataSource;
private readonly ILogger<PostgresPackRunRepository> _logger;
public PostgresPackRunRepository(OrchestratorDataSource dataSource, ILogger<PostgresPackRunRepository> logger)
{
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
}
public async Task<PackRun?> GetByIdAsync(string tenantId, Guid packRunId, CancellationToken cancellationToken)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken).ConfigureAwait(false);
await using var command = new NpgsqlCommand(SelectByIdSql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
command.Parameters.AddWithValue("tenant_id", tenantId);
command.Parameters.AddWithValue("pack_run_id", packRunId);
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
if (!await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
return null;
}
return Map(reader);
}
public async Task<PackRun?> GetByIdempotencyKeyAsync(string tenantId, string idempotencyKey, CancellationToken cancellationToken)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken).ConfigureAwait(false);
await using var command = new NpgsqlCommand(SelectByIdempotencySql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
command.Parameters.AddWithValue("tenant_id", tenantId);
command.Parameters.AddWithValue("idempotency_key", idempotencyKey);
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
if (!await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
return null;
}
return Map(reader);
}
public async Task CreateAsync(PackRun packRun, CancellationToken cancellationToken)
{
await using var connection = await _dataSource.OpenConnectionAsync(packRun.TenantId, "writer", cancellationToken).ConfigureAwait(false);
await using var command = new NpgsqlCommand(InsertSql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
AddParameters(command, packRun);
try
{
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
OrchestratorMetrics.PackRunCreated(packRun.TenantId, packRun.PackId);
}
catch (PostgresException ex) when (string.Equals(ex.SqlState, PostgresErrorCodes.UniqueViolation, StringComparison.Ordinal))
{
_logger.LogWarning(ex, "Duplicate pack run idempotency key {Key} for tenant {Tenant}", packRun.IdempotencyKey, packRun.TenantId);
throw;
}
}
public async Task UpdateStatusAsync(
string tenantId,
Guid packRunId,
PackRunStatus status,
int attempt,
Guid? leaseId,
string? taskRunnerId,
DateTimeOffset? leaseUntil,
DateTimeOffset? scheduledAt,
DateTimeOffset? leasedAt,
DateTimeOffset? startedAt,
DateTimeOffset? completedAt,
DateTimeOffset? notBefore,
string? reason,
int? exitCode,
long? durationMs,
CancellationToken cancellationToken)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken).ConfigureAwait(false);
await using var command = new NpgsqlCommand(UpdateStatusSql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
command.Parameters.AddWithValue("tenant_id", tenantId);
command.Parameters.AddWithValue("pack_run_id", packRunId);
command.Parameters.AddWithValue("status", StatusToString(status));
command.Parameters.AddWithValue("attempt", attempt);
command.Parameters.AddWithValue("lease_id", (object?)leaseId ?? DBNull.Value);
command.Parameters.AddWithValue("task_runner_id", (object?)taskRunnerId ?? DBNull.Value);
command.Parameters.AddWithValue("lease_until", (object?)leaseUntil ?? DBNull.Value);
command.Parameters.AddWithValue("scheduled_at", (object?)scheduledAt ?? DBNull.Value);
command.Parameters.AddWithValue("leased_at", (object?)leasedAt ?? DBNull.Value);
command.Parameters.AddWithValue("started_at", (object?)startedAt ?? DBNull.Value);
command.Parameters.AddWithValue("completed_at", (object?)completedAt ?? DBNull.Value);
command.Parameters.AddWithValue("not_before", (object?)notBefore ?? DBNull.Value);
command.Parameters.AddWithValue("reason", (object?)reason ?? DBNull.Value);
command.Parameters.AddWithValue("exit_code", (object?)exitCode ?? DBNull.Value);
command.Parameters.AddWithValue("duration_ms", (object?)durationMs ?? DBNull.Value);
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
public async Task<PackRun?> LeaseNextAsync(
string tenantId,
string? packId,
Guid leaseId,
string taskRunnerId,
DateTimeOffset leaseUntil,
CancellationToken cancellationToken)
{
var packFilter = string.IsNullOrWhiteSpace(packId) ? string.Empty : "AND pack_id = @pack_id";
var sql = string.Format(LeaseNextSqlTemplate, packFilter, Columns);
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken).ConfigureAwait(false);
await using var command = new NpgsqlCommand(sql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
var now = DateTimeOffset.UtcNow;
command.Parameters.AddWithValue("tenant_id", tenantId);
command.Parameters.AddWithValue("lease_id", leaseId);
command.Parameters.AddWithValue("task_runner_id", taskRunnerId);
command.Parameters.AddWithValue("lease_until", leaseUntil);
command.Parameters.AddWithValue("leased_at", now);
command.Parameters.AddWithValue("now", now);
if (!string.IsNullOrWhiteSpace(packId))
{
command.Parameters.AddWithValue("pack_id", packId!);
}
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
if (!await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
return null;
}
return Map(reader);
}
public async Task<bool> ExtendLeaseAsync(string tenantId, Guid packRunId, Guid leaseId, DateTimeOffset newLeaseUntil, CancellationToken cancellationToken)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken).ConfigureAwait(false);
await using var command = new NpgsqlCommand(ExtendLeaseSql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
command.Parameters.AddWithValue("tenant_id", tenantId);
command.Parameters.AddWithValue("pack_run_id", packRunId);
command.Parameters.AddWithValue("lease_id", leaseId);
command.Parameters.AddWithValue("new_lease_until", newLeaseUntil);
command.Parameters.AddWithValue("now", DateTimeOffset.UtcNow);
var rows = await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
return rows > 0;
}
public async Task ReleaseLeaseAsync(string tenantId, Guid packRunId, Guid leaseId, PackRunStatus newStatus, string? reason, CancellationToken cancellationToken)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken).ConfigureAwait(false);
await using var command = new NpgsqlCommand(ReleaseLeaseSql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
command.Parameters.AddWithValue("tenant_id", tenantId);
command.Parameters.AddWithValue("pack_run_id", packRunId);
command.Parameters.AddWithValue("lease_id", leaseId);
command.Parameters.AddWithValue("status", StatusToString(newStatus));
command.Parameters.AddWithValue("reason", (object?)reason ?? DBNull.Value);
command.Parameters.AddWithValue("completed_at", DateTimeOffset.UtcNow);
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
public async Task<IReadOnlyList<PackRun>> ListAsync(
string tenantId,
string? packId,
PackRunStatus? status,
string? projectId,
DateTimeOffset? createdAfter,
DateTimeOffset? createdBefore,
int limit,
int offset,
CancellationToken cancellationToken)
{
var filters = BuildFilters(packId, status, projectId, createdAfter, createdBefore, out var parameters);
var sql = string.Format(ListSqlTemplate, filters);
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken).ConfigureAwait(false);
await using var command = new NpgsqlCommand(sql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
command.Parameters.AddWithValue("tenant_id", tenantId);
command.Parameters.AddWithValue("limit", limit);
command.Parameters.AddWithValue("offset", offset);
foreach (var param in parameters)
{
command.Parameters.Add(param);
}
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
var results = new List<PackRun>();
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
results.Add(Map(reader));
}
return results;
}
public async Task<int> CountAsync(string tenantId, string? packId, PackRunStatus? status, string? projectId, CancellationToken cancellationToken)
{
var filters = BuildFilters(packId, status, projectId, null, null, out var parameters);
var sql = string.Format(CountSqlTemplate, filters);
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken).ConfigureAwait(false);
await using var command = new NpgsqlCommand(sql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
command.Parameters.AddWithValue("tenant_id", tenantId);
foreach (var param in parameters)
{
command.Parameters.Add(param);
}
var countObj = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
return Convert.ToInt32(countObj, CultureInfo.InvariantCulture);
}
public async Task<IReadOnlyList<PackRun>> GetExpiredLeasesAsync(DateTimeOffset cutoff, int limit, CancellationToken cancellationToken)
{
await using var connection = await _dataSource.OpenConnectionAsync("", "reader", cancellationToken).ConfigureAwait(false);
await using var command = new NpgsqlCommand(ExpiredLeaseSql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
command.Parameters.AddWithValue("cutoff", cutoff);
command.Parameters.AddWithValue("limit", limit);
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
var results = new List<PackRun>();
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
results.Add(Map(reader));
}
return results;
}
public async Task<int> CancelPendingAsync(string tenantId, string? packId, string reason, CancellationToken cancellationToken)
{
var filter = string.IsNullOrWhiteSpace(packId) ? string.Empty : "AND pack_id = @pack_id";
var sql = string.Format(CancelPendingSql, filter);
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken).ConfigureAwait(false);
await using var command = new NpgsqlCommand(sql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
command.Parameters.AddWithValue("tenant_id", tenantId);
command.Parameters.AddWithValue("reason", reason);
if (!string.IsNullOrWhiteSpace(packId))
{
command.Parameters.AddWithValue("pack_id", packId!);
}
var rows = await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
return rows;
}
private static string BuildFilters(
string? packId,
PackRunStatus? status,
string? projectId,
DateTimeOffset? createdAfter,
DateTimeOffset? createdBefore,
out List<NpgsqlParameter> parameters)
{
var filters = new List<string>();
parameters = new List<NpgsqlParameter>();
if (!string.IsNullOrWhiteSpace(packId))
{
filters.Add("pack_id = @pack_id");
parameters.Add(new NpgsqlParameter("pack_id", packId!));
}
if (status.HasValue)
{
filters.Add("status = @status::pack_run_status");
parameters.Add(new NpgsqlParameter("status", StatusToString(status.Value)));
}
if (!string.IsNullOrWhiteSpace(projectId))
{
filters.Add("project_id = @project_id");
parameters.Add(new NpgsqlParameter("project_id", projectId!));
}
if (createdAfter.HasValue)
{
filters.Add("created_at >= @created_after");
parameters.Add(new NpgsqlParameter("created_after", createdAfter.Value));
}
if (createdBefore.HasValue)
{
filters.Add("created_at <= @created_before");
parameters.Add(new NpgsqlParameter("created_before", createdBefore.Value));
}
return filters.Count == 0 ? string.Empty : " AND " + string.Join(" AND ", filters);
}
private static void AddParameters(NpgsqlCommand command, PackRun packRun)
{
command.Parameters.AddWithValue("pack_run_id", packRun.PackRunId);
command.Parameters.AddWithValue("tenant_id", packRun.TenantId);
command.Parameters.AddWithValue("project_id", (object?)packRun.ProjectId ?? DBNull.Value);
command.Parameters.AddWithValue("pack_id", packRun.PackId);
command.Parameters.AddWithValue("pack_version", packRun.PackVersion);
command.Parameters.AddWithValue("status", StatusToString(packRun.Status));
command.Parameters.AddWithValue("priority", packRun.Priority);
command.Parameters.AddWithValue("attempt", packRun.Attempt);
command.Parameters.AddWithValue("max_attempts", packRun.MaxAttempts);
command.Parameters.AddWithValue("parameters", packRun.Parameters);
command.Parameters.AddWithValue("parameters_digest", packRun.ParametersDigest);
command.Parameters.AddWithValue("idempotency_key", packRun.IdempotencyKey);
command.Parameters.AddWithValue("correlation_id", (object?)packRun.CorrelationId ?? DBNull.Value);
command.Parameters.AddWithValue("lease_id", (object?)packRun.LeaseId ?? DBNull.Value);
command.Parameters.AddWithValue("task_runner_id", (object?)packRun.TaskRunnerId ?? DBNull.Value);
command.Parameters.AddWithValue("lease_until", (object?)packRun.LeaseUntil ?? DBNull.Value);
command.Parameters.AddWithValue("created_at", packRun.CreatedAt);
command.Parameters.AddWithValue("scheduled_at", (object?)packRun.ScheduledAt ?? DBNull.Value);
command.Parameters.AddWithValue("leased_at", (object?)packRun.LeasedAt ?? DBNull.Value);
command.Parameters.AddWithValue("started_at", (object?)packRun.StartedAt ?? DBNull.Value);
command.Parameters.AddWithValue("completed_at", (object?)packRun.CompletedAt ?? DBNull.Value);
command.Parameters.AddWithValue("not_before", (object?)packRun.NotBefore ?? DBNull.Value);
command.Parameters.AddWithValue("reason", (object?)packRun.Reason ?? DBNull.Value);
command.Parameters.AddWithValue("exit_code", (object?)packRun.ExitCode ?? DBNull.Value);
command.Parameters.AddWithValue("duration_ms", (object?)packRun.DurationMs ?? DBNull.Value);
command.Parameters.AddWithValue("created_by", packRun.CreatedBy);
command.Parameters.Add(new NpgsqlParameter("metadata", NpgsqlDbType.Jsonb)
{
Value = (object?)packRun.Metadata ?? DBNull.Value
});
}
private static string StatusToString(PackRunStatus status) => status switch
{
PackRunStatus.Pending => "pending",
PackRunStatus.Scheduled => "scheduled",
PackRunStatus.Leased => "leased",
PackRunStatus.Running => "running",
PackRunStatus.Succeeded => "succeeded",
PackRunStatus.Failed => "failed",
PackRunStatus.Canceled => "canceled",
PackRunStatus.TimedOut => "timed_out",
_ => throw new ArgumentOutOfRangeException(nameof(status), status, null)
};
private static PackRun Map(NpgsqlDataReader reader)
{
return new PackRun(
PackRunId: reader.GetGuid(0),
TenantId: reader.GetString(1),
ProjectId: reader.IsDBNull(2) ? null : reader.GetString(2),
PackId: reader.GetString(3),
PackVersion: reader.GetString(4),
Status: ParseStatus(reader.GetString(5)),
Priority: reader.GetInt32(6),
Attempt: reader.GetInt32(7),
MaxAttempts: reader.GetInt32(8),
Parameters: reader.GetString(9),
ParametersDigest: reader.GetString(10),
IdempotencyKey: reader.GetString(11),
CorrelationId: reader.IsDBNull(12) ? null : reader.GetString(12),
LeaseId: reader.IsDBNull(13) ? null : reader.GetGuid(13),
TaskRunnerId: reader.IsDBNull(14) ? null : reader.GetString(14),
LeaseUntil: reader.IsDBNull(15) ? null : reader.GetFieldValue<DateTimeOffset>(15),
CreatedAt: reader.GetFieldValue<DateTimeOffset>(16),
ScheduledAt: reader.IsDBNull(17) ? null : reader.GetFieldValue<DateTimeOffset>(17),
LeasedAt: reader.IsDBNull(18) ? null : reader.GetFieldValue<DateTimeOffset>(18),
StartedAt: reader.IsDBNull(19) ? null : reader.GetFieldValue<DateTimeOffset>(19),
CompletedAt: reader.IsDBNull(20) ? null : reader.GetFieldValue<DateTimeOffset>(20),
NotBefore: reader.IsDBNull(21) ? null : reader.GetFieldValue<DateTimeOffset>(21),
Reason: reader.IsDBNull(22) ? null : reader.GetString(22),
ExitCode: reader.IsDBNull(23) ? null : reader.GetInt32(23),
DurationMs: reader.IsDBNull(24) ? null : reader.GetInt64(24),
CreatedBy: reader.GetString(25),
Metadata: reader.IsDBNull(26) ? null : reader.GetString(26));
}
private static PackRunStatus ParseStatus(string value) => value switch
{
"pending" => PackRunStatus.Pending,
"scheduled" => PackRunStatus.Scheduled,
"leased" => PackRunStatus.Leased,
"running" => PackRunStatus.Running,
"succeeded" => PackRunStatus.Succeeded,
"failed" => PackRunStatus.Failed,
"canceled" => PackRunStatus.Canceled,
"timed_out" => PackRunStatus.TimedOut,
_ => throw new ArgumentOutOfRangeException(nameof(value), value, "Unknown pack_run_status")
};
}

View File

@@ -39,6 +39,8 @@ public static class ServiceCollectionExtensions
services.AddScoped<IThrottleRepository, PostgresThrottleRepository>();
services.AddScoped<IWatermarkRepository, PostgresWatermarkRepository>();
services.AddScoped<Infrastructure.Repositories.IBackfillRepository, PostgresBackfillRepository>();
services.AddScoped<IPackRunRepository, PostgresPackRunRepository>();
services.AddScoped<IPackRunLogRepository, PostgresPackRunLogRepository>();
// Register audit and ledger repositories
services.AddScoped<IAuditRepository, PostgresAuditRepository>();

View File

@@ -0,0 +1,81 @@
-- 006_pack_runs.sql
-- Pack run persistence and log streaming schema (ORCH-SVC-41/42-101)
BEGIN;
-- Enum for pack run lifecycle
CREATE TYPE pack_run_status AS ENUM (
'pending',
'scheduled',
'leased',
'running',
'succeeded',
'failed',
'canceled',
'timed_out'
);
-- Pack runs
CREATE TABLE pack_runs (
pack_run_id UUID NOT NULL,
tenant_id TEXT NOT NULL,
project_id TEXT,
pack_id TEXT NOT NULL,
pack_version TEXT NOT NULL,
status pack_run_status NOT NULL DEFAULT 'pending',
priority INTEGER NOT NULL DEFAULT 0,
attempt INTEGER NOT NULL DEFAULT 1,
max_attempts INTEGER NOT NULL DEFAULT 3,
parameters TEXT NOT NULL,
parameters_digest CHAR(64) NOT NULL,
idempotency_key TEXT NOT NULL,
correlation_id TEXT,
lease_id UUID,
task_runner_id TEXT,
lease_until TIMESTAMPTZ,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
scheduled_at TIMESTAMPTZ,
leased_at TIMESTAMPTZ,
started_at TIMESTAMPTZ,
completed_at TIMESTAMPTZ,
not_before TIMESTAMPTZ,
reason TEXT,
exit_code INTEGER,
duration_ms BIGINT,
created_by TEXT NOT NULL,
metadata JSONB,
CONSTRAINT pk_pack_runs PRIMARY KEY (tenant_id, pack_run_id),
CONSTRAINT uq_pack_runs_idempotency UNIQUE (tenant_id, idempotency_key),
CONSTRAINT ck_pack_runs_attempt_positive CHECK (attempt >= 1),
CONSTRAINT ck_pack_runs_max_attempts_positive CHECK (max_attempts >= 1),
CONSTRAINT ck_pack_runs_parameters_digest_hex CHECK (parameters_digest ~ '^[0-9a-f]{64}$')
) PARTITION BY LIST (tenant_id);
CREATE TABLE pack_runs_default PARTITION OF pack_runs DEFAULT;
CREATE INDEX ix_pack_runs_status ON pack_runs (tenant_id, status, priority DESC, created_at);
CREATE INDEX ix_pack_runs_pack ON pack_runs (tenant_id, pack_id, status, created_at DESC);
CREATE INDEX ix_pack_runs_not_before ON pack_runs (tenant_id, not_before) WHERE not_before IS NOT NULL;
CREATE INDEX ix_pack_runs_lease_until ON pack_runs (tenant_id, lease_until) WHERE status = 'leased' AND lease_until IS NOT NULL;
-- Pack run logs
CREATE TABLE pack_run_logs (
log_id UUID NOT NULL,
tenant_id TEXT NOT NULL,
pack_run_id UUID NOT NULL,
sequence BIGINT NOT NULL,
log_level SMALLINT NOT NULL,
source TEXT,
message TEXT NOT NULL,
data JSONB,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT pk_pack_run_logs PRIMARY KEY (tenant_id, pack_run_id, sequence),
CONSTRAINT uq_pack_run_logs_log_id UNIQUE (log_id),
CONSTRAINT fk_pack_run_logs_run FOREIGN KEY (tenant_id, pack_run_id) REFERENCES pack_runs (tenant_id, pack_run_id)
) PARTITION BY LIST (tenant_id);
CREATE TABLE pack_run_logs_default PARTITION OF pack_run_logs DEFAULT;
CREATE INDEX ix_pack_run_logs_level ON pack_run_logs (tenant_id, pack_run_id, log_level, sequence);
CREATE INDEX ix_pack_run_logs_created ON pack_run_logs (tenant_id, pack_run_id, created_at);
COMMIT;

View File

@@ -0,0 +1,76 @@
using System.Text.Json;
using Microsoft.AspNetCore.Http;
using StellaOps.Orchestrator.WebService.Contracts;
using StellaOps.Orchestrator.WebService.Services;
namespace StellaOps.Orchestrator.Tests.ControlPlane;
/// <summary>
/// Unit coverage for OpenAPI discovery documents and deprecation headers (ORCH-OAS-61/63).
/// </summary>
public sealed class OpenApiDocumentsTests
{
[Fact]
public void DiscoveryDocument_ContainsServiceMetadata()
{
var doc = OpenApiDocuments.CreateDiscoveryDocument("1.2.3");
Assert.Equal("orchestrator", doc.Service);
Assert.Equal("3.1.0", doc.SpecVersion);
Assert.Equal("1.2.3", doc.Version);
Assert.Equal("/openapi/orchestrator.json", doc.Url);
Assert.Equal("application/json", doc.Format);
Assert.Equal("#/components/schemas/Error", doc.ErrorEnvelopeSchema);
Assert.True(doc.Notifications.ContainsKey("topic"));
}
[Fact]
public void Specification_IncludesKeyPathsAndIdempotencyHeaders()
{
var spec = OpenApiDocuments.CreateSpecification("1.2.3");
var json = JsonSerializer.Serialize(spec, OpenApiDocuments.SerializerOptions);
Assert.Contains("/api/v1/orchestrator/jobs", json);
Assert.DoesNotContain("/.well-known/openapi", json); // spec is per-service
Assert.Contains("Idempotency-Key", json);
Assert.Contains("deprecated", json);
Assert.Contains("error", json);
}
[Fact]
public void Specification_ExposesPaginationForJobs()
{
var spec = OpenApiDocuments.CreateSpecification("1.2.3");
var json = JsonSerializer.Serialize(spec, OpenApiDocuments.SerializerOptions);
Assert.Contains("/api/v1/orchestrator/jobs", json);
Assert.Contains("nextCursor", json);
Assert.Contains("cursor=", json); // RFC 8288 Link header example for SDK paginators
}
[Fact]
public void Specification_IncludesPackRunScheduleAndRetry()
{
var spec = OpenApiDocuments.CreateSpecification("1.2.3");
var json = JsonSerializer.Serialize(spec, OpenApiDocuments.SerializerOptions);
Assert.Contains("/api/v1/orchestrator/pack-runs", json);
Assert.Contains("SchedulePackRunRequest", json);
Assert.Contains("/api/v1/orchestrator/pack-runs/{packRunId}/retry", json);
Assert.Contains("RetryPackRunResponse", json);
}
[Fact]
public void DeprecationHeaders_AddsStandardMetadata()
{
var context = new DefaultHttpContext();
DeprecationHeaders.Apply(context.Response, "/api/v1/orchestrator/jobs");
var headers = context.Response.Headers;
Assert.Equal("true", headers["Deprecation"].ToString());
Assert.Contains("alternate", headers["Link"].ToString());
Assert.False(string.IsNullOrWhiteSpace(headers["Sunset"]));
Assert.Equal("orchestrator:legacy-endpoint", headers["X-StellaOps-Deprecated"].ToString());
}
}

View File

@@ -571,7 +571,9 @@ public sealed class ExportAlertTests
var after = DateTimeOffset.UtcNow;
Assert.NotNull(resolved.ResolvedAt);
Assert.InRange(resolved.ResolvedAt.Value, before, after);
var windowStart = before <= after ? before : after;
var windowEnd = before >= after ? before : after;
Assert.InRange(resolved.ResolvedAt.Value, windowStart, windowEnd);
Assert.Equal("Fixed database connection issue", resolved.ResolutionNotes);
Assert.False(resolved.IsActive);
}

View File

@@ -0,0 +1,120 @@
using System.Text;
using Microsoft.AspNetCore.Http;
using Microsoft.Extensions.Logging.Abstractions;
using Microsoft.Extensions.Options;
using StellaOps.Orchestrator.Core.Domain;
using StellaOps.Orchestrator.Infrastructure.Repositories;
using StellaOps.Orchestrator.WebService.Streaming;
using PackRunDomain = StellaOps.Orchestrator.Core.Domain.PackRun;
namespace StellaOps.Orchestrator.Tests.PackRuns;
public sealed class PackRunStreamCoordinatorTests
{
[Fact]
public async Task StreamAsync_TerminalRun_WritesInitialHeartbeatAndCompleted()
{
var now = DateTimeOffset.UtcNow;
var packRun = new PackRunDomain(
PackRunId: Guid.NewGuid(),
TenantId: "tenantA",
ProjectId: null,
PackId: "pack.demo",
PackVersion: "1.0.0",
Status: PackRunStatus.Succeeded,
Priority: 0,
Attempt: 1,
MaxAttempts: 3,
Parameters: "{}",
ParametersDigest: new string('a', 64),
IdempotencyKey: "idem-1",
CorrelationId: null,
LeaseId: null,
TaskRunnerId: "runner-1",
LeaseUntil: null,
CreatedAt: now.AddMinutes(-2),
ScheduledAt: now.AddMinutes(-2),
LeasedAt: now.AddMinutes(-1),
StartedAt: now.AddMinutes(-1),
CompletedAt: now,
NotBefore: null,
Reason: null,
ExitCode: 0,
DurationMs: 120_000,
CreatedBy: "tester",
Metadata: null);
var logRepo = new StubPackRunLogRepository((2, 5));
var streamOptions = Options.Create(new StreamOptions
{
PollInterval = TimeSpan.FromMilliseconds(150),
HeartbeatInterval = TimeSpan.FromMilliseconds(150),
MaxStreamDuration = TimeSpan.FromMinutes(1)
});
var coordinator = new PackRunStreamCoordinator(
new StubPackRunRepository(packRun),
logRepo,
streamOptions,
TimeProvider.System,
NullLogger<PackRunStreamCoordinator>.Instance);
var context = new DefaultHttpContext();
await using var body = new MemoryStream();
context.Response.Body = body;
await coordinator.StreamAsync(context, packRun.TenantId, packRun, CancellationToken.None);
body.Position = 0;
var payload = Encoding.UTF8.GetString(body.ToArray());
Assert.Contains("event: initial", payload);
Assert.Contains("event: heartbeat", payload);
Assert.Contains("event: completed", payload);
}
private sealed class StubPackRunRepository : IPackRunRepository
{
private readonly PackRunDomain _packRun;
public StubPackRunRepository(PackRunDomain packRun)
{
_packRun = packRun;
}
public Task<PackRunDomain?> GetByIdAsync(string tenantId, Guid packRunId, CancellationToken cancellationToken)
=> Task.FromResult<PackRunDomain?>(_packRun);
public Task<PackRunDomain?> GetByIdempotencyKeyAsync(string tenantId, string idempotencyKey, CancellationToken cancellationToken) => Task.FromResult<PackRunDomain?>(_packRun);
public Task CreateAsync(PackRunDomain packRun, CancellationToken cancellationToken) => Task.CompletedTask;
public Task UpdateStatusAsync(string tenantId, Guid packRunId, PackRunStatus status, int attempt, Guid? leaseId, string? taskRunnerId, DateTimeOffset? leaseUntil, DateTimeOffset? scheduledAt, DateTimeOffset? leasedAt, DateTimeOffset? startedAt, DateTimeOffset? completedAt, DateTimeOffset? notBefore, string? reason, int? exitCode, long? durationMs, CancellationToken cancellationToken) => Task.CompletedTask;
public Task<PackRunDomain?> LeaseNextAsync(string tenantId, string? packId, Guid leaseId, string taskRunnerId, DateTimeOffset leaseUntil, CancellationToken cancellationToken) => Task.FromResult<PackRunDomain?>(_packRun);
public Task<bool> ExtendLeaseAsync(string tenantId, Guid packRunId, Guid leaseId, DateTimeOffset newLeaseUntil, CancellationToken cancellationToken) => Task.FromResult(true);
public Task ReleaseLeaseAsync(string tenantId, Guid packRunId, Guid leaseId, PackRunStatus newStatus, string? reason, CancellationToken cancellationToken) => Task.CompletedTask;
public Task<IReadOnlyList<PackRunDomain>> ListAsync(string tenantId, string? packId, PackRunStatus? status, string? projectId, DateTimeOffset? createdAfter, DateTimeOffset? createdBefore, int limit, int offset, CancellationToken cancellationToken) => Task.FromResult<IReadOnlyList<PackRunDomain>>(new[] { _packRun });
public Task<int> CountAsync(string tenantId, string? packId, PackRunStatus? status, string? projectId, CancellationToken cancellationToken) => Task.FromResult(1);
public Task<IReadOnlyList<PackRunDomain>> GetExpiredLeasesAsync(DateTimeOffset cutoff, int limit, CancellationToken cancellationToken) => Task.FromResult<IReadOnlyList<PackRunDomain>>(Array.Empty<PackRunDomain>());
public Task<int> CancelPendingAsync(string tenantId, string? packId, string reason, CancellationToken cancellationToken) => Task.FromResult(0);
}
private sealed class StubPackRunLogRepository : IPackRunLogRepository
{
private readonly (long Count, long Latest) _stats;
public StubPackRunLogRepository((long Count, long Latest) stats)
{
_stats = stats;
}
public Task AppendAsync(PackRunLog log, CancellationToken cancellationToken) => Task.CompletedTask;
public Task AppendBatchAsync(IReadOnlyList<PackRunLog> logs, CancellationToken cancellationToken) => Task.CompletedTask;
public Task<PackRunLogBatch> GetLogsAsync(string tenantId, Guid packRunId, long afterSequence, int limit, CancellationToken cancellationToken)
=> Task.FromResult(new PackRunLogBatch(packRunId, tenantId, afterSequence, new List<PackRunLog>()));
public Task<(long Count, long LatestSequence)> GetLogStatsAsync(string tenantId, Guid packRunId, CancellationToken cancellationToken)
=> Task.FromResult(_stats);
public Task<PackRunLogBatch> GetLogsByLevelAsync(string tenantId, Guid packRunId, LogLevel minLevel, long afterSequence, int limit, CancellationToken cancellationToken)
=> Task.FromResult(new PackRunLogBatch(packRunId, tenantId, afterSequence, new List<PackRunLog>()));
public Task<PackRunLogBatch> SearchLogsAsync(string tenantId, Guid packRunId, string pattern, long afterSequence, int limit, CancellationToken cancellationToken)
=> Task.FromResult(new PackRunLogBatch(packRunId, tenantId, afterSequence, new List<PackRunLog>()));
public Task<long> DeleteLogsAsync(string tenantId, Guid packRunId, CancellationToken cancellationToken) => Task.FromResult(0L);
}
}

View File

@@ -6,6 +6,8 @@ namespace StellaOps.Orchestrator.WebService.Contracts;
/// Response representing a job.
/// </summary>
public sealed record JobResponse(
string TenantId,
string? ProjectId,
Guid JobId,
Guid? RunId,
string JobType,
@@ -26,6 +28,8 @@ public sealed record JobResponse(
string CreatedBy)
{
public static JobResponse FromDomain(Job job) => new(
job.TenantId,
job.ProjectId,
job.JobId,
job.RunId,
job.JobType,
@@ -50,6 +54,8 @@ public sealed record JobResponse(
/// Response representing a job with its full payload.
/// </summary>
public sealed record JobDetailResponse(
string TenantId,
string? ProjectId,
Guid JobId,
Guid? RunId,
string JobType,
@@ -75,6 +81,8 @@ public sealed record JobDetailResponse(
string CreatedBy)
{
public static JobDetailResponse FromDomain(Job job) => new(
job.TenantId,
job.ProjectId,
job.JobId,
job.RunId,
job.JobType,

View File

@@ -0,0 +1,760 @@
using System.Reflection;
using System.Text.Json;
using System.Text.Json.Serialization;
namespace StellaOps.Orchestrator.WebService.Contracts;
/// <summary>
/// Factory for per-service OpenAPI discovery and specification documents.
/// </summary>
public static class OpenApiDocuments
{
public static readonly JsonSerializerOptions SerializerOptions = new(JsonSerializerDefaults.Web)
{
WriteIndented = true
};
/// <summary>
/// Return the service build/version string based on the executing assembly.
/// </summary>
public static string GetServiceVersion()
=> Assembly.GetExecutingAssembly().GetName().Version?.ToString() ?? "0.0.0";
public static OpenApiDiscoveryDocument CreateDiscoveryDocument(string version)
{
return new OpenApiDiscoveryDocument(
Service: "orchestrator",
SpecVersion: "3.1.0",
Version: version,
Format: "application/json",
Url: "/openapi/orchestrator.json",
ErrorEnvelopeSchema: "#/components/schemas/Error",
Notifications: new Dictionary<string, string>
{
["topic"] = "orchestrator.contracts",
["event"] = "orchestrator.openapi.updated"
});
}
public static OpenApiSpecDocument CreateSpecification(string version)
{
var exampleJob = ExampleJob();
var exampleJobDetail = ExampleJobDetail();
var exampleClaimRequest = new
{
workerId = "worker-7f9",
jobType = "sbom.build",
idempotencyKey = "claim-12af",
leaseSeconds = 300,
taskRunnerId = "runner-01"
};
var exampleClaimResponse = new
{
jobId = Guid.Parse("11111111-2222-3333-4444-555555555555"),
leaseId = Guid.Parse("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"),
leaseUntil = "2025-11-30T12:05:00Z",
job = exampleJobDetail
};
var examplePackRunRequest = new
{
packId = "pack.advisory.sbom",
packVersion = "1.2.3",
parameters = @"{""image"":""registry.example/app:1.0.0""}",
projectId = "proj-17",
idempotencyKey = "packrun-123",
priority = 5,
maxAttempts = 3
};
var examplePackRunResponse = new
{
packRunId = Guid.Parse("99999999-0000-1111-2222-333333333333"),
packId = "pack.advisory.sbom",
packVersion = "1.2.3",
status = "scheduled",
idempotencyKey = "packrun-123",
createdAt = "2025-11-30T12:00:00Z",
wasAlreadyScheduled = false
};
var exampleRetryRequest = new
{
parameters = @"{""image"":""registry.example/app:1.0.1""}",
idempotencyKey = "retry-123"
};
var exampleRetryResponse = new
{
originalPackRunId = Guid.Parse("99999999-0000-1111-2222-333333333333"),
newPackRunId = Guid.Parse("aaaaaaaa-0000-1111-2222-bbbbbbbbbbbb"),
status = "scheduled",
createdAt = "2025-11-30T12:10:00Z"
};
var paths = new Dictionary<string, object>
{
["/api/v1/orchestrator/jobs"] = new
{
get = new
{
summary = "List jobs",
description = "Paginated job listing with deterministic cursor ordering and idempotent retries.",
parameters = new object[]
{
QueryParameter("status", "query", "Job status filter (pending|scheduled|leased|succeeded|failed)", "string", "scheduled"),
QueryParameter("jobType", "query", "Filter by job type", "string", "sbom.build"),
QueryParameter("projectId", "query", "Filter by project identifier", "string", "proj-17"),
QueryParameter("createdAfter", "query", "RFC3339 timestamp for start of window", "string", "2025-11-01T00:00:00Z"),
QueryParameter("createdBefore", "query", "RFC3339 timestamp for end of window", "string", "2025-11-30T00:00:00Z"),
QueryParameter("limit", "query", "Results per page (max 200)", "integer", 50),
QueryParameter("cursor", "query", "Opaque pagination cursor", "string", "c3RhcnQ6NTA=")
},
responses = new Dictionary<string, object>
{
["200"] = new
{
description = "Jobs page",
headers = new Dictionary<string, object>
{
["Link"] = new
{
description = "RFC 8288 pagination cursor links",
schema = new { type = "string" },
example = "</api/v1/orchestrator/jobs?cursor=c3RhcnQ6NTA=>; rel=\"next\""
},
["X-StellaOps-Api-Version"] = new
{
description = "Service build version",
schema = new { type = "string" },
example = version
}
},
content = new Dictionary<string, object>
{
["application/json"] = new
{
schema = new { @ref = "#/components/schemas/JobList" },
examples = new Dictionary<string, object>
{
["default"] = new
{
value = new
{
jobs = new[] { exampleJob },
nextCursor = "c3RhcnQ6NTA="
}
}
}
}
}
},
["400"] = ErrorResponse("Invalid filter")
}
}
},
["/api/v1/orchestrator/jobs/{jobId}"] = new
{
get = new
{
summary = "Get job",
description = "Fetch job metadata by identifier.",
parameters = new object[]
{
RouteParameter("jobId", "Job identifier", "string")
},
responses = new Dictionary<string, object>
{
["200"] = new
{
description = "Job metadata",
content = new Dictionary<string, object>
{
["application/json"] = new
{
schema = new { @ref = "#/components/schemas/Job" },
examples = new Dictionary<string, object>
{
["default"] = new { value = exampleJob }
}
}
}
},
["404"] = ErrorResponse("Not found")
}
}
},
["/api/v1/orchestrator/jobs/{jobId}/detail"] = new
{
get = new
{
summary = "Legacy job detail (deprecated)",
description = "Legacy payload-inclusive job detail; prefer GET /api/v1/orchestrator/jobs/{jobId} plus artifact lookup.",
deprecated = true,
parameters = new object[]
{
RouteParameter("jobId", "Job identifier", "string")
},
responses = new Dictionary<string, object>
{
["200"] = new
{
description = "Job detail including payload (deprecated)",
headers = StandardDeprecationHeaders("/api/v1/orchestrator/jobs/{jobId}"),
content = new Dictionary<string, object>
{
["application/json"] = new
{
schema = new { @ref = "#/components/schemas/JobDetail" },
examples = new Dictionary<string, object>
{
["legacy"] = new { value = exampleJobDetail }
}
}
}
},
["404"] = ErrorResponse("Not found")
}
}
},
["/api/v1/orchestrator/jobs/summary"] = new
{
get = new
{
summary = "Legacy job summary (deprecated)",
description = "Legacy summary endpoint; use pagination + counts or analytics feed.",
deprecated = true,
responses = new Dictionary<string, object>
{
["200"] = new
{
description = "Summary counts",
headers = StandardDeprecationHeaders("/api/v1/orchestrator/jobs"),
content = new Dictionary<string, object>
{
["application/json"] = new
{
schema = new { @ref = "#/components/schemas/JobSummary" },
examples = new Dictionary<string, object>
{
["summary"] = new
{
value = new { totalJobs = 120, pendingJobs = 12, scheduledJobs = 30, leasedJobs = 20, succeededJobs = 45, failedJobs = 8, canceledJobs = 3, timedOutJobs = 2 }
}
}
}
}
}
}
}
},
["/api/v1/orchestrator/pack-runs"] = new
{
post = new
{
summary = "Schedule pack run",
description = "Schedule an orchestrated pack run with idempotency and quota enforcement.",
requestBody = new
{
required = true,
content = new Dictionary<string, object>
{
["application/json"] = new
{
schema = new { @ref = "#/components/schemas/SchedulePackRunRequest" },
examples = new Dictionary<string, object> { ["default"] = new { value = examplePackRunRequest } }
}
}
},
responses = new Dictionary<string, object>
{
["201"] = new
{
description = "Pack run scheduled",
headers = new Dictionary<string, object>
{
["Location"] = new { description = "Pack run resource URL", schema = new { type = "string" }, example = "/api/v1/orchestrator/pack-runs/99999999-0000-1111-2222-333333333333" }
},
content = new Dictionary<string, object>
{
["application/json"] = new
{
schema = new { @ref = "#/components/schemas/SchedulePackRunResponse" },
examples = new Dictionary<string, object> { ["default"] = new { value = examplePackRunResponse } }
}
}
},
["429"] = new
{
description = "Quota exceeded",
headers = new Dictionary<string, object> { ["Retry-After"] = new { description = "Seconds until retry", schema = new { type = "integer" }, example = 60 } },
content = new Dictionary<string, object> { ["application/json"] = new { schema = new { @ref = "#/components/schemas/PackRunError" } } }
}
}
}
},
["/api/v1/orchestrator/pack-runs/{packRunId}/retry"] = new
{
post = new
{
summary = "Retry failed pack run",
description = "Create a new pack run based on a failed one with optional parameter override.",
parameters = new object[] { RouteParameter("packRunId", "Pack run identifier", "string") },
requestBody = new
{
required = true,
content = new Dictionary<string, object>
{
["application/json"] = new
{
schema = new { @ref = "#/components/schemas/RetryPackRunRequest" },
examples = new Dictionary<string, object> { ["default"] = new { value = exampleRetryRequest } }
}
}
},
responses = new Dictionary<string, object>
{
["201"] = new
{
description = "Retry scheduled",
content = new Dictionary<string, object>
{
["application/json"] = new
{
schema = new { @ref = "#/components/schemas/RetryPackRunResponse" },
examples = new Dictionary<string, object> { ["default"] = new { value = exampleRetryResponse } }
}
}
},
["404"] = ErrorResponse("Pack run not found"),
["409"] = new
{
description = "Retry not allowed",
content = new Dictionary<string, object> { ["application/json"] = new { schema = new { @ref = "#/components/schemas/PackRunError" } } }
}
}
}
},
["/api/v1/orchestrator/worker/claim"] = new
{
post = new
{
summary = "Claim next job",
description = "Idempotent worker claim endpoint with optional idempotency key and task runner context.",
parameters = new object[]
{
HeaderParameter("Idempotency-Key", "Optional idempotency key for claim replay safety", "string", "claim-12af")
},
requestBody = new
{
required = true,
content = new Dictionary<string, object>
{
["application/json"] = new
{
schema = new { @ref = "#/components/schemas/WorkerClaimRequest" },
examples = new Dictionary<string, object>
{
["default"] = new { value = exampleClaimRequest }
}
}
}
},
responses = new Dictionary<string, object>
{
["200"] = new
{
description = "Job claim response",
content = new Dictionary<string, object>
{
["application/json"] = new
{
schema = new { @ref = "#/components/schemas/WorkerClaimResponse" },
examples = new Dictionary<string, object>
{
["default"] = new { value = exampleClaimResponse }
}
}
}
},
["204"] = new { description = "No jobs available" },
["400"] = ErrorResponse("Invalid claim request")
}
}
},
["/healthz"] = new
{
get = new
{
summary = "Health check",
description = "Basic service health probe.",
responses = new Dictionary<string, object>
{
["200"] = new
{
description = "Healthy",
content = new Dictionary<string, object>
{
["application/json"] = new
{
examples = new Dictionary<string, object>
{
["example"] = new
{
value = new { status = "ok", timestamp = "2025-11-30T00:00:00Z" }
}
}
}
}
}
}
}
}
};
var components = new OpenApiComponents(
Schemas: new Dictionary<string, object>
{
["Error"] = new
{
type = "object",
properties = new
{
error = new { type = "string" },
detail = new { type = "string" }
},
required = new[] { "error" }
},
["Job"] = new
{
type = "object",
properties = new
{
jobId = new { type = "string", format = "uuid" },
runId = new { type = "string", format = "uuid", nullable = true },
jobType = new { type = "string" },
status = new { type = "string" },
priority = new { type = "integer" },
attempt = new { type = "integer" },
maxAttempts = new { type = "integer" },
correlationId = new { type = "string", nullable = true },
workerId = new { type = "string", nullable = true },
taskRunnerId = new { type = "string", nullable = true },
createdAt = new { type = "string", format = "date-time" },
scheduledAt = new { type = "string", format = "date-time", nullable = true },
leasedAt = new { type = "string", format = "date-time", nullable = true },
completedAt = new { type = "string", format = "date-time", nullable = true },
notBefore = new { type = "string", format = "date-time", nullable = true },
reason = new { type = "string", nullable = true },
replayOf = new { type = "string", format = "uuid", nullable = true },
createdBy = new { type = "string" }
},
required = new[] { "jobId", "jobType", "status", "priority", "attempt", "maxAttempts", "createdAt", "createdBy" }
},
["JobDetail"] = new
{
allOf = new object[]
{
new { @ref = "#/components/schemas/Job" },
new
{
type = "object",
properties = new
{
payloadDigest = new { type = "string" },
payload = new { type = "string" },
idempotencyKey = new { type = "string" },
leaseId = new { type = "string", format = "uuid", nullable = true },
leaseUntil = new { type = "string", format = "date-time", nullable = true }
}
}
}
},
["JobList"] = new
{
type = "object",
properties = new
{
jobs = new
{
type = "array",
items = new { @ref = "#/components/schemas/Job" }
},
nextCursor = new { type = "string", nullable = true }
},
required = new[] { "jobs" }
},
["JobSummary"] = new
{
type = "object",
properties = new
{
totalJobs = new { type = "integer" },
pendingJobs = new { type = "integer" },
scheduledJobs = new { type = "integer" },
leasedJobs = new { type = "integer" },
succeededJobs = new { type = "integer" },
failedJobs = new { type = "integer" },
canceledJobs = new { type = "integer" },
timedOutJobs = new { type = "integer" }
}
},
["WorkerClaimRequest"] = new
{
type = "object",
properties = new
{
workerId = new { type = "string" },
jobType = new { type = "string" },
idempotencyKey = new { type = "string", nullable = true },
leaseSeconds = new { type = "integer", nullable = true },
taskRunnerId = new { type = "string", nullable = true }
},
required = new[] { "workerId" }
},
["WorkerClaimResponse"] = new
{
type = "object",
properties = new
{
jobId = new { type = "string", format = "uuid" },
leaseId = new { type = "string", format = "uuid" },
leaseUntil = new { type = "string", format = "date-time" },
job = new { @ref = "#/components/schemas/JobDetail" }
},
required = new[] { "jobId", "leaseId", "leaseUntil", "job" }
},
["SchedulePackRunRequest"] = new
{
type = "object",
properties = new
{
packId = new { type = "string" },
packVersion = new { type = "string" },
parameters = new { type = "string", nullable = true },
projectId = new { type = "string", nullable = true },
idempotencyKey = new { type = "string", nullable = true },
correlationId = new { type = "string", nullable = true },
priority = new { type = "integer", nullable = true },
maxAttempts = new { type = "integer", nullable = true },
metadata = new { type = "string", nullable = true }
},
required = new[] { "packId", "packVersion" }
},
["SchedulePackRunResponse"] = new
{
type = "object",
properties = new
{
packRunId = new { type = "string", format = "uuid" },
packId = new { type = "string" },
packVersion = new { type = "string" },
status = new { type = "string" },
idempotencyKey = new { type = "string" },
createdAt = new { type = "string", format = "date-time" },
wasAlreadyScheduled = new { type = "boolean" }
},
required = new[] { "packRunId", "packId", "packVersion", "status", "createdAt", "wasAlreadyScheduled" }
},
["RetryPackRunRequest"] = new
{
type = "object",
properties = new
{
parameters = new { type = "string", nullable = true },
idempotencyKey = new { type = "string", nullable = true }
}
},
["RetryPackRunResponse"] = new
{
type = "object",
properties = new
{
originalPackRunId = new { type = "string", format = "uuid" },
newPackRunId = new { type = "string", format = "uuid" },
status = new { type = "string" },
createdAt = new { type = "string", format = "date-time" }
},
required = new[] { "originalPackRunId", "newPackRunId", "status", "createdAt" }
},
["PackRunError"] = new
{
type = "object",
properties = new
{
code = new { type = "string" },
message = new { type = "string" },
packRunId = new { type = "string", format = "uuid", nullable = true },
retryAfterSeconds = new { type = "integer", nullable = true }
},
required = new[] { "code", "message" }
}
},
Headers: new Dictionary<string, object>
{
["Deprecation"] = new { description = "RFC 8594 deprecation marker", schema = new { type = "string" }, example = "true" },
["Sunset"] = new { description = "Target removal date", schema = new { type = "string" }, example = "Tue, 31 Mar 2026 00:00:00 GMT" },
["Link"] = new { description = "Alternate endpoint for deprecated operation", schema = new { type = "string" } }
});
return new OpenApiSpecDocument(
OpenApi: "3.1.0",
Info: new OpenApiInfo("StellaOps Orchestrator API", version, "Scheduling and automation control plane APIs with pagination, idempotency, and error envelopes."),
Paths: paths,
Components: components,
Servers: new List<object>
{
new { url = "https://api.stella-ops.local" },
new { url = "http://localhost:5201" }
});
// Local helper functions keep the anonymous object creation terse.
static object QueryParameter(string name, string @in, string description, string type, object? example = null)
{
return new Dictionary<string, object?>
{
["name"] = name,
["in"] = @in,
["description"] = description,
["required"] = false,
["schema"] = new { type },
["example"] = example
};
}
static object RouteParameter(string name, string description, string type)
{
return new Dictionary<string, object?>
{
["name"] = name,
["in"] = "path",
["description"] = description,
["required"] = true,
["schema"] = new { type }
};
}
static object HeaderParameter(string name, string description, string type, object? example = null)
{
return new Dictionary<string, object?>
{
["name"] = name,
["in"] = "header",
["description"] = description,
["required"] = false,
["schema"] = new { type },
["example"] = example
};
}
static object ErrorResponse(string description)
{
return new
{
description,
content = new Dictionary<string, object>
{
["application/json"] = new
{
schema = new { @ref = "#/components/schemas/Error" },
examples = new Dictionary<string, object>
{
["error"] = new { value = new { error = "invalid_request", detail = description } }
}
}
}
};
}
static Dictionary<string, object> StandardDeprecationHeaders(string alternate)
{
return new Dictionary<string, object>
{
["Deprecation"] = new { description = "This endpoint is deprecated", schema = new { type = "string" }, example = "true" },
["Link"] = new { description = "Alternate endpoint", schema = new { type = "string" }, example = $"<{alternate}>; rel=\"alternate\"" },
["Sunset"] = new { description = "Planned removal", schema = new { type = "string" }, example = "Tue, 31 Mar 2026 00:00:00 GMT" }
};
}
}
private static object ExampleJob()
{
return new
{
jobId = Guid.Parse("aaaaaaaa-1111-2222-3333-bbbbbbbbbbbb"),
runId = Guid.Parse("cccccccc-1111-2222-3333-dddddddddddd"),
jobType = "scan.image",
status = "scheduled",
priority = 5,
attempt = 0,
maxAttempts = 3,
correlationId = "corr-abc",
workerId = (string?)null,
taskRunnerId = "runner-01",
createdAt = "2025-11-30T12:00:00Z",
scheduledAt = "2025-11-30T12:05:00Z",
leasedAt = (string?)null,
completedAt = (string?)null,
notBefore = "2025-11-30T12:04:00Z",
reason = (string?)null,
replayOf = (string?)null,
createdBy = "scheduler"
};
}
private static object ExampleJobDetail()
{
return new
{
jobId = Guid.Parse("aaaaaaaa-1111-2222-3333-bbbbbbbbbbbb"),
runId = Guid.Parse("cccccccc-1111-2222-3333-dddddddddddd"),
jobType = "scan.image",
status = "leased",
priority = 5,
attempt = 1,
maxAttempts = 3,
payloadDigest = "sha256:abc123",
payload = "{\"image\":\"alpine:3.18\"}",
idempotencyKey = "claim-12af",
correlationId = "corr-abc",
leaseId = Guid.Parse("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"),
leaseUntil = "2025-11-30T12:05:00Z",
workerId = "worker-7f9",
taskRunnerId = "runner-01",
createdAt = "2025-11-30T12:00:00Z",
scheduledAt = "2025-11-30T12:05:00Z",
leasedAt = "2025-11-30T12:00:15Z",
completedAt = (string?)null,
notBefore = "2025-11-30T12:04:00Z",
reason = (string?)null,
replayOf = (string?)null,
createdBy = "scheduler"
};
}
}
public sealed record OpenApiDiscoveryDocument(
[property: JsonPropertyName("service")] string Service,
[property: JsonPropertyName("specVersion")] string SpecVersion,
[property: JsonPropertyName("version")] string Version,
[property: JsonPropertyName("format")] string Format,
[property: JsonPropertyName("url")] string Url,
[property: JsonPropertyName("errorEnvelopeSchema")] string ErrorEnvelopeSchema,
[property: JsonPropertyName("notifications")] IReadOnlyDictionary<string, string> Notifications);
public sealed record OpenApiSpecDocument(
[property: JsonPropertyName("openapi")] string OpenApi,
[property: JsonPropertyName("info")] OpenApiInfo Info,
[property: JsonPropertyName("paths")] IReadOnlyDictionary<string, object> Paths,
[property: JsonPropertyName("components")] OpenApiComponents Components,
[property: JsonPropertyName("servers")] IReadOnlyList<object>? Servers = null);
public sealed record OpenApiInfo(
[property: JsonPropertyName("title")] string Title,
[property: JsonPropertyName("version")] string Version,
[property: JsonPropertyName("description")] string Description);
public sealed record OpenApiComponents(
[property: JsonPropertyName("schemas")] IReadOnlyDictionary<string, object> Schemas,
[property: JsonPropertyName("headers")] IReadOnlyDictionary<string, object>? Headers = null);

View File

@@ -97,6 +97,24 @@ public sealed record PackRunListResponse(
int TotalCount,
string? NextCursor);
/// <summary>
/// Manifest response summarizing pack run state and log statistics.
/// </summary>
public sealed record PackRunManifestResponse(
Guid PackRunId,
string PackId,
string PackVersion,
string Status,
int Attempt,
int MaxAttempts,
DateTimeOffset CreatedAt,
DateTimeOffset? ScheduledAt,
DateTimeOffset? StartedAt,
DateTimeOffset? CompletedAt,
string? Reason,
long LogCount,
long LatestSequence);
// ========== Task Runner (Worker) Requests/Responses ==========
/// <summary>

View File

@@ -120,6 +120,7 @@ public static class JobEndpoints
try
{
var tenantId = tenantResolver.Resolve(context);
DeprecationHeaders.Apply(context.Response, "/api/v1/orchestrator/jobs/{jobId}");
var job = await repository.GetByIdAsync(tenantId, jobId, cancellationToken).ConfigureAwait(false);
if (job is null)
@@ -146,6 +147,7 @@ public static class JobEndpoints
try
{
var tenantId = tenantResolver.Resolve(context);
DeprecationHeaders.Apply(context.Response, "/api/v1/orchestrator/jobs");
// Get counts for each status
var pending = await repository.CountAsync(tenantId, Core.Domain.JobStatus.Pending, jobType, projectId, cancellationToken).ConfigureAwait(false);

View File

@@ -0,0 +1,41 @@
using StellaOps.Orchestrator.WebService.Contracts;
namespace StellaOps.Orchestrator.WebService.Endpoints;
/// <summary>
/// OpenAPI discovery and specification endpoints.
/// </summary>
public static class OpenApiEndpoints
{
/// <summary>
/// Maps OpenAPI discovery endpoints.
/// </summary>
public static IEndpointRouteBuilder MapOpenApiEndpoints(this IEndpointRouteBuilder app)
{
app.MapGet("/.well-known/openapi", (HttpContext context) =>
{
var version = OpenApiDocuments.GetServiceVersion();
var discovery = OpenApiDocuments.CreateDiscoveryDocument(version);
context.Response.Headers.CacheControl = "private, max-age=300";
context.Response.Headers.ETag = $"W/\"oas-{version}\"";
context.Response.Headers["X-StellaOps-Service"] = "orchestrator";
context.Response.Headers["X-StellaOps-Api-Version"] = version;
return Results.Json(discovery, OpenApiDocuments.SerializerOptions);
})
.WithName("Orchestrator_OpenApiDiscovery")
.WithTags("OpenAPI");
app.MapGet("/openapi/orchestrator.json", () =>
{
var version = OpenApiDocuments.GetServiceVersion();
var spec = OpenApiDocuments.CreateSpecification(version);
return Results.Json(spec, OpenApiDocuments.SerializerOptions);
})
.WithName("Orchestrator_OpenApiSpec")
.WithTags("OpenAPI");
return app;
}
}

View File

@@ -1,3 +1,4 @@
using System.Globalization;
using System.Security.Cryptography;
using System.Text;
using System.Text.Json;
@@ -23,6 +24,11 @@ public static class PackRunEndpoints
private const int MaxExtendSeconds = 1800; // 30 minutes
private const int DefaultLogLimit = 100;
private const int MaxLogLimit = 1000;
private const string PackRunJobType = "pack-run";
private const int PackRunQuotaMaxActive = 10;
private const int PackRunQuotaMaxPerHour = 200;
private const int PackRunQuotaBurst = 20;
private const double PackRunQuotaRefillPerSecond = 1.0;
/// <summary>
/// Maps pack run endpoints to the route builder.
@@ -45,6 +51,10 @@ public static class PackRunEndpoints
.WithName("Orchestrator_ListPackRuns")
.WithDescription("List pack runs with filters");
group.MapGet("{packRunId:guid}/manifest", GetPackRunManifest)
.WithName("Orchestrator_GetPackRunManifest")
.WithDescription("Get pack run manifest including log stats and status");
// Task runner (worker) endpoints
group.MapPost("claim", ClaimPackRun)
.WithName("Orchestrator_ClaimPackRun")
@@ -90,6 +100,7 @@ public static class PackRunEndpoints
[FromBody] SchedulePackRunRequest request,
[FromServices] TenantResolver tenantResolver,
[FromServices] IPackRunRepository packRunRepository,
[FromServices] IQuotaRepository quotaRepository,
[FromServices] IEventPublisher eventPublisher,
[FromServices] TimeProvider timeProvider,
CancellationToken cancellationToken)
@@ -107,6 +118,12 @@ public static class PackRunEndpoints
"invalid_request", "PackVersion is required", null, null));
}
if (string.IsNullOrWhiteSpace(request.ProjectId))
{
return Results.BadRequest(new PackRunErrorResponse(
"invalid_request", "ProjectId is required", null, null));
}
var tenantId = tenantResolver.Resolve(context);
var now = timeProvider.GetUtcNow();
var parameters = request.Parameters ?? "{}";
@@ -132,7 +149,7 @@ public static class PackRunEndpoints
var packRun = PackRun.Create(
packRunId: packRunId,
tenantId: tenantId,
projectId: request.ProjectId,
projectId: request.ProjectId!.Trim(),
packId: request.PackId,
packVersion: request.PackVersion,
parameters: parameters,
@@ -145,9 +162,49 @@ public static class PackRunEndpoints
metadata: request.Metadata,
createdAt: now);
// Enforce pack-run quota
var quotaResult = await TryConsumePackRunQuotaAsync(quotaRepository, tenantId, context.User?.Identity?.Name ?? "system", now, cancellationToken);
if (!quotaResult.Allowed)
{
if (quotaResult.RetryAfter.HasValue)
{
context.Response.Headers.RetryAfter = ((int)Math.Ceiling(quotaResult.RetryAfter.Value.TotalSeconds)).ToString(CultureInfo.InvariantCulture);
}
return Results.Json(
new PackRunErrorResponse(
"quota_exhausted",
"Pack run quota exceeded",
null,
quotaResult.RetryAfter.HasValue
? (int?)Math.Ceiling(quotaResult.RetryAfter.Value.TotalSeconds)
: null),
statusCode: StatusCodes.Status429TooManyRequests);
}
await packRunRepository.CreateAsync(packRun, cancellationToken);
// Mark as scheduled immediately
await packRunRepository.UpdateStatusAsync(
tenantId,
packRunId,
PackRunStatus.Scheduled,
packRun.Attempt,
null,
null,
null,
now,
null,
null,
null,
null,
null,
null,
null,
cancellationToken);
OrchestratorMetrics.PackRunCreated(tenantId, request.PackId);
OrchestratorMetrics.PackRunScheduled(tenantId, request.PackId);
// Publish event
var envelope = EventEnvelope.Create(
@@ -163,7 +220,7 @@ public static class PackRunEndpoints
packRunId,
request.PackId,
request.PackVersion,
packRun.Status.ToString().ToLowerInvariant(),
PackRunStatus.Scheduled.ToString().ToLowerInvariant(),
idempotencyKey,
now,
WasAlreadyScheduled: false));
@@ -188,6 +245,42 @@ public static class PackRunEndpoints
return Results.Ok(PackRunResponse.FromDomain(packRun));
}
private static async Task<IResult> GetPackRunManifest(
HttpContext context,
[FromRoute] Guid packRunId,
[FromServices] TenantResolver tenantResolver,
[FromServices] IPackRunRepository packRunRepository,
[FromServices] IPackRunLogRepository logRepository,
CancellationToken cancellationToken)
{
var tenantId = tenantResolver.Resolve(context);
var packRun = await packRunRepository.GetByIdAsync(tenantId, packRunId, cancellationToken);
if (packRun is null)
{
return Results.NotFound(new PackRunErrorResponse(
"not_found", $"Pack run {packRunId} not found", packRunId, null));
}
var (logCount, latestSeq) = await logRepository.GetLogStatsAsync(tenantId, packRunId, cancellationToken);
var response = new PackRunManifestResponse(
PackRunId: packRun.PackRunId,
PackId: packRun.PackId,
PackVersion: packRun.PackVersion,
Status: packRun.Status.ToString().ToLowerInvariant(),
Attempt: packRun.Attempt,
MaxAttempts: packRun.MaxAttempts,
CreatedAt: packRun.CreatedAt,
ScheduledAt: packRun.ScheduledAt,
StartedAt: packRun.StartedAt,
CompletedAt: packRun.CompletedAt,
Reason: packRun.Reason,
LogCount: logCount,
LatestSequence: latestSeq);
return Results.Ok(response);
}
private static async Task<IResult> ListPackRuns(
HttpContext context,
[FromQuery] string? packId,
@@ -403,6 +496,7 @@ public static class PackRunEndpoints
[FromServices] TenantResolver tenantResolver,
[FromServices] IPackRunRepository packRunRepository,
[FromServices] IPackRunLogRepository logRepository,
[FromServices] IQuotaRepository quotaRepository,
[FromServices] IArtifactRepository artifactRepository,
[FromServices] IEventPublisher eventPublisher,
[FromServices] TimeProvider timeProvider,
@@ -503,6 +597,8 @@ public static class PackRunEndpoints
OrchestratorMetrics.RecordPackRunDuration(tenantId, packRun.PackId, durationSeconds);
OrchestratorMetrics.RecordPackRunLogCount(tenantId, packRun.PackId, logCount + 1);
await ReleasePackRunQuotaAsync(quotaRepository, tenantId, cancellationToken);
// Publish event
var eventType = request.Success
? OrchestratorEventType.PackRunCompleted
@@ -664,6 +760,7 @@ public static class PackRunEndpoints
[FromServices] TenantResolver tenantResolver,
[FromServices] IPackRunRepository packRunRepository,
[FromServices] IPackRunLogRepository logRepository,
[FromServices] IQuotaRepository quotaRepository,
[FromServices] IEventPublisher eventPublisher,
[FromServices] TimeProvider timeProvider,
CancellationToken cancellationToken)
@@ -709,6 +806,8 @@ public static class PackRunEndpoints
OrchestratorMetrics.PackRunCanceled(tenantId, packRun.PackId);
await ReleasePackRunQuotaAsync(quotaRepository, tenantId, cancellationToken);
// Publish event
var envelope = EventEnvelope.Create(
eventType: OrchestratorEventType.PackRunFailed, // Use Failed for canceled
@@ -818,6 +917,102 @@ public static class PackRunEndpoints
packRun.Metadata);
}
private static async Task<(bool Allowed, TimeSpan? RetryAfter)> TryConsumePackRunQuotaAsync(
IQuotaRepository quotaRepository,
string tenantId,
string actor,
DateTimeOffset now,
CancellationToken cancellationToken)
{
var quota = await quotaRepository.GetByTenantAndJobTypeAsync(tenantId, PackRunJobType, cancellationToken).ConfigureAwait(false)
?? await CreateDefaultPackRunQuotaAsync(quotaRepository, tenantId, actor, now, cancellationToken).ConfigureAwait(false);
var tokens = Math.Min(quota.BurstCapacity, quota.CurrentTokens + (now - quota.LastRefillAt).TotalSeconds * quota.RefillRate);
var hourStart = quota.CurrentHourStart;
var hourCount = quota.CurrentHourCount;
if (now - hourStart >= TimeSpan.FromHours(1))
{
hourStart = now;
hourCount = 0;
}
if (tokens < 1)
{
var deficitSeconds = (1 - tokens) / quota.RefillRate;
return (false, TimeSpan.FromSeconds(Math.Ceiling(deficitSeconds)));
}
if (quota.CurrentActive >= quota.MaxActive)
{
return (false, TimeSpan.FromSeconds(5));
}
if (hourCount >= quota.MaxPerHour)
{
return (false, TimeSpan.FromMinutes(5));
}
tokens -= 1;
hourCount += 1;
await quotaRepository.UpdateStateAsync(
tenantId,
quota.QuotaId,
currentTokens: Math.Max(0, tokens),
lastRefillAt: now,
currentActive: quota.CurrentActive + 1,
currentHourCount: hourCount,
currentHourStart: hourStart,
updatedBy: actor,
cancellationToken: cancellationToken).ConfigureAwait(false);
return (true, null);
}
private static async Task ReleasePackRunQuotaAsync(IQuotaRepository quotaRepository, string tenantId, CancellationToken cancellationToken)
{
var quota = await quotaRepository.GetByTenantAndJobTypeAsync(tenantId, PackRunJobType, cancellationToken).ConfigureAwait(false);
if (quota is null || quota.CurrentActive <= 0)
{
return;
}
await quotaRepository.DecrementActiveAsync(tenantId, quota.QuotaId, cancellationToken).ConfigureAwait(false);
}
private static async Task<Quota> CreateDefaultPackRunQuotaAsync(
IQuotaRepository quotaRepository,
string tenantId,
string actor,
DateTimeOffset now,
CancellationToken cancellationToken)
{
var quota = new Quota(
QuotaId: Guid.NewGuid(),
TenantId: tenantId,
JobType: PackRunJobType,
MaxActive: PackRunQuotaMaxActive,
MaxPerHour: PackRunQuotaMaxPerHour,
BurstCapacity: PackRunQuotaBurst,
RefillRate: PackRunQuotaRefillPerSecond,
CurrentTokens: PackRunQuotaBurst,
LastRefillAt: now,
CurrentActive: 0,
CurrentHourCount: 0,
CurrentHourStart: now,
Paused: false,
PauseReason: null,
QuotaTicket: null,
CreatedAt: now,
UpdatedAt: now,
UpdatedBy: actor);
await quotaRepository.CreateAsync(quota, cancellationToken).ConfigureAwait(false);
OrchestratorMetrics.QuotaCreated(tenantId, PackRunJobType);
return quota;
}
private static string ComputeDigest(string content)
{
var bytes = Encoding.UTF8.GetBytes(content);

View File

@@ -26,6 +26,10 @@ public static class StreamEndpoints
.WithName("Orchestrator_StreamRun")
.WithDescription("Stream real-time run progress updates via SSE");
group.MapGet("pack-runs/{packRunId:guid}", StreamPackRun)
.WithName("Orchestrator_StreamPackRun")
.WithDescription("Stream real-time pack run log and status updates via SSE");
return group;
}
@@ -100,4 +104,38 @@ public static class StreamEndpoints
}
}
}
private static async Task StreamPackRun(
HttpContext context,
[FromRoute] Guid packRunId,
[FromServices] TenantResolver tenantResolver,
[FromServices] IPackRunRepository packRunRepository,
[FromServices] IPackRunStreamCoordinator streamCoordinator,
CancellationToken cancellationToken)
{
try
{
var tenantId = tenantResolver.Resolve(context);
var packRun = await packRunRepository.GetByIdAsync(tenantId, packRunId, cancellationToken).ConfigureAwait(false);
if (packRun is null)
{
context.Response.StatusCode = StatusCodes.Status404NotFound;
await context.Response.WriteAsJsonAsync(new { error = "Pack run not found" }, cancellationToken).ConfigureAwait(false);
return;
}
await streamCoordinator.StreamAsync(context, tenantId, packRun, cancellationToken).ConfigureAwait(false);
}
catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested)
{
}
catch (InvalidOperationException ex)
{
if (!context.Response.HasStarted)
{
context.Response.StatusCode = StatusCodes.Status400BadRequest;
await context.Response.WriteAsJsonAsync(new { error = ex.Message }, cancellationToken).ConfigureAwait(false);
}
}
}
}

View File

@@ -21,6 +21,7 @@ builder.Services.AddSingleton(TimeProvider.System);
builder.Services.Configure<StreamOptions>(builder.Configuration.GetSection(StreamOptions.SectionName));
builder.Services.AddSingleton<IJobStreamCoordinator, JobStreamCoordinator>();
builder.Services.AddSingleton<IRunStreamCoordinator, RunStreamCoordinator>();
builder.Services.AddSingleton<IPackRunStreamCoordinator, PackRunStreamCoordinator>();
// Register scale metrics and load shedding services
builder.Services.AddSingleton<ScaleMetrics>();
@@ -34,6 +35,9 @@ if (app.Environment.IsDevelopment())
app.MapOpenApi();
}
// OpenAPI discovery endpoints (available in all environments)
app.MapOpenApiEndpoints();
// Register health endpoints (replaces simple /healthz and /readyz)
app.MapHealthEndpoints();
@@ -45,6 +49,7 @@ app.MapSourceEndpoints();
app.MapRunEndpoints();
app.MapJobEndpoints();
app.MapDagEndpoints();
app.MapPackRunEndpoints();
// Register streaming endpoints
app.MapStreamEndpoints();

View File

@@ -0,0 +1,36 @@
using System.Globalization;
using Microsoft.AspNetCore.Http;
namespace StellaOps.Orchestrator.WebService.Services;
/// <summary>
/// Helper for applying HTTP deprecation metadata to legacy endpoints.
/// </summary>
public static class DeprecationHeaders
{
/// <summary>
/// Apply standard deprecation headers and alternate link hint to the response.
/// </summary>
/// <param name="response">HTTP response to annotate.</param>
/// <param name="alternate">Alternate endpoint that supersedes the deprecated one.</param>
/// <param name="sunset">Optional sunset date (UTC).</param>
public static void Apply(HttpResponse response, string alternate, DateTimeOffset? sunset = null)
{
// RFC 8594 recommends HTTP-date for Sunset; default to a near-term horizon to prompt migrations.
var sunsetValue = (sunset ?? new DateTimeOffset(2026, 03, 31, 0, 0, 0, TimeSpan.Zero))
.UtcDateTime
.ToString("r", CultureInfo.InvariantCulture);
if (!response.Headers.ContainsKey("Deprecation"))
{
response.Headers.Append("Deprecation", "true");
}
// Link: <...>; rel="alternate"; title="Replacement"
var linkValue = $"<{alternate}>; rel=\"alternate\"; title=\"Replacement endpoint\"";
response.Headers.Append("Link", linkValue);
response.Headers.Append("Sunset", sunsetValue);
response.Headers.Append("X-StellaOps-Deprecated", "orchestrator:legacy-endpoint");
}
}

View File

@@ -0,0 +1,200 @@
using System.Text.Json;
using Microsoft.Extensions.Options;
using StellaOps.Orchestrator.Core.Domain;
using StellaOps.Orchestrator.Infrastructure.Repositories;
namespace StellaOps.Orchestrator.WebService.Streaming;
public interface IPackRunStreamCoordinator
{
Task StreamAsync(HttpContext context, string tenantId, PackRun packRun, CancellationToken cancellationToken);
}
/// <summary>
/// Streams pack run status/log updates over SSE.
/// </summary>
public sealed class PackRunStreamCoordinator : IPackRunStreamCoordinator
{
private const int DefaultBatchSize = 200;
private static readonly JsonSerializerOptions SerializerOptions = new(JsonSerializerDefaults.Web);
private readonly IPackRunRepository _packRunRepository;
private readonly IPackRunLogRepository _logRepository;
private readonly TimeProvider _timeProvider;
private readonly StreamOptions _options;
private readonly ILogger<PackRunStreamCoordinator> _logger;
public PackRunStreamCoordinator(
IPackRunRepository packRunRepository,
IPackRunLogRepository logRepository,
IOptions<StreamOptions> options,
TimeProvider? timeProvider,
ILogger<PackRunStreamCoordinator> logger)
{
_packRunRepository = packRunRepository ?? throw new ArgumentNullException(nameof(packRunRepository));
_logRepository = logRepository ?? throw new ArgumentNullException(nameof(logRepository));
_options = (options ?? throw new ArgumentNullException(nameof(options))).Value.Validate();
_timeProvider = timeProvider ?? TimeProvider.System;
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
}
public async Task StreamAsync(HttpContext context, string tenantId, PackRun packRun, CancellationToken cancellationToken)
{
var response = context.Response;
SseWriter.ConfigureSseHeaders(response);
await SseWriter.WriteRetryAsync(response, _options.ReconnectDelay, cancellationToken).ConfigureAwait(false);
var (logCount, latestSeq) = await _logRepository.GetLogStatsAsync(tenantId, packRun.PackRunId, cancellationToken).ConfigureAwait(false);
await SseWriter.WriteEventAsync(response, "initial", PackRunSnapshotPayload.From(packRun, logCount, latestSeq), SerializerOptions, cancellationToken).ConfigureAwait(false);
await SseWriter.WriteEventAsync(response, "heartbeat", HeartbeatPayload.Create(_timeProvider.GetUtcNow()), SerializerOptions, cancellationToken).ConfigureAwait(false);
if (IsTerminal(packRun.Status))
{
await EmitCompletedAsync(response, packRun, logCount, latestSeq, cancellationToken).ConfigureAwait(false);
return;
}
var last = packRun;
var lastSeq = latestSeq;
var start = _timeProvider.GetUtcNow();
using var poll = new PeriodicTimer(_options.PollInterval);
using var heartbeat = new PeriodicTimer(_options.HeartbeatInterval);
try
{
while (!cancellationToken.IsCancellationRequested)
{
if (_timeProvider.GetUtcNow() - start > _options.MaxStreamDuration)
{
await SseWriter.WriteEventAsync(response, "timeout", new { packRunId = last.PackRunId, reason = "Max stream duration reached" }, SerializerOptions, cancellationToken).ConfigureAwait(false);
break;
}
var pollTask = poll.WaitForNextTickAsync(cancellationToken).AsTask();
var hbTask = heartbeat.WaitForNextTickAsync(cancellationToken).AsTask();
var completed = await Task.WhenAny(pollTask, hbTask).ConfigureAwait(false);
if (completed == hbTask && await hbTask.ConfigureAwait(false))
{
await SseWriter.WriteEventAsync(response, "heartbeat", HeartbeatPayload.Create(_timeProvider.GetUtcNow()), SerializerOptions, cancellationToken).ConfigureAwait(false);
continue;
}
if (completed == pollTask && await pollTask.ConfigureAwait(false))
{
var current = await _packRunRepository.GetByIdAsync(tenantId, last.PackRunId, cancellationToken).ConfigureAwait(false);
if (current is null)
{
await SseWriter.WriteEventAsync(response, "notFound", new NotFoundPayload(last.PackRunId.ToString(), "pack-run"), SerializerOptions, cancellationToken).ConfigureAwait(false);
break;
}
// Send new logs
var batch = await _logRepository.GetLogsAsync(tenantId, current.PackRunId, lastSeq, DefaultBatchSize, cancellationToken).ConfigureAwait(false);
if (batch.Logs.Count > 0)
{
lastSeq = batch.Logs[^1].Sequence;
await SseWriter.WriteEventAsync(response, "logs", batch.Logs.Select(PackRunLogPayload.FromDomain), SerializerOptions, cancellationToken).ConfigureAwait(false);
}
if (HasStatusChanged(last, current))
{
await SseWriter.WriteEventAsync(response, "statusChanged", PackRunSnapshotPayload.From(current, batch.Logs.Count, lastSeq), SerializerOptions, cancellationToken).ConfigureAwait(false);
last = current;
if (IsTerminal(current.Status))
{
await EmitCompletedAsync(response, current, batch.Logs.Count, lastSeq, cancellationToken).ConfigureAwait(false);
break;
}
}
}
}
}
catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested)
{
_logger.LogDebug("Pack run stream cancelled for {PackRunId}.", last.PackRunId);
}
}
private static bool HasStatusChanged(PackRun previous, PackRun current)
{
return previous.Status != current.Status || previous.Attempt != current.Attempt || previous.LeaseId != current.LeaseId;
}
private async Task EmitCompletedAsync(HttpResponse response, PackRun packRun, long logCount, long latestSequence, CancellationToken cancellationToken)
{
var durationSeconds = packRun.CompletedAt.HasValue && packRun.StartedAt.HasValue
? (packRun.CompletedAt.Value - packRun.StartedAt.Value).TotalSeconds
: packRun.CompletedAt.HasValue ? (packRun.CompletedAt.Value - packRun.CreatedAt).TotalSeconds : 0;
var payload = new PackRunCompletedPayload(
PackRunId: packRun.PackRunId,
Status: packRun.Status.ToString().ToLowerInvariant(),
CompletedAt: packRun.CompletedAt ?? _timeProvider.GetUtcNow(),
DurationSeconds: durationSeconds,
LogCount: logCount,
LatestSequence: latestSequence);
await SseWriter.WriteEventAsync(response, "completed", payload, SerializerOptions, cancellationToken).ConfigureAwait(false);
}
private static bool IsTerminal(PackRunStatus status) =>
status is PackRunStatus.Succeeded or PackRunStatus.Failed or PackRunStatus.Canceled or PackRunStatus.TimedOut;
}
internal sealed record PackRunSnapshotPayload(
Guid PackRunId,
string Status,
string PackId,
string PackVersion,
int Attempt,
int MaxAttempts,
string? TaskRunnerId,
Guid? LeaseId,
DateTimeOffset CreatedAt,
DateTimeOffset? StartedAt,
DateTimeOffset? CompletedAt,
long LogCount,
long LatestSequence)
{
public static PackRunSnapshotPayload From(PackRun packRun, long logCount, long latestSequence) => new(
packRun.PackRunId,
packRun.Status.ToString().ToLowerInvariant(),
packRun.PackId,
packRun.PackVersion,
packRun.Attempt,
packRun.MaxAttempts,
packRun.TaskRunnerId,
packRun.LeaseId,
packRun.CreatedAt,
packRun.StartedAt,
packRun.CompletedAt,
logCount,
latestSequence);
}
internal sealed record PackRunLogPayload(
long Sequence,
string Level,
string Source,
string Message,
DateTimeOffset Timestamp,
string? Data)
{
public static PackRunLogPayload FromDomain(PackRunLog log) => new(
log.Sequence,
log.Level.ToString().ToLowerInvariant(),
log.Source,
log.Message,
log.Timestamp,
log.Data);
}
internal sealed record PackRunCompletedPayload(
Guid PackRunId,
string Status,
DateTimeOffset CompletedAt,
double DurationSeconds,
long LogCount,
long LatestSequence);

View File

@@ -0,0 +1,23 @@
# StellaOps Orchestrator · Sprint 0152-0001-0002 Mirror
Status mirror for `docs/implplan/SPRINT_0152_0001_0002_orchestrator_ii.md`. Update alongside the sprint file to avoid drift.
| # | Task ID | Status | Notes |
| --- | --- | --- | --- |
| 1 | ORCH-SVC-32-002 | DONE | DAG planner + job state machine implemented. |
| 2 | ORCH-SVC-32-003 | DONE | Read-only REST APIs with pagination/idempotency. |
| 3 | ORCH-SVC-32-004 | DONE | SSE streams, metrics, health probes delivered. |
| 4 | ORCH-SVC-32-005 | DONE | Worker claim/heartbeat/progress/complete endpoints live. |
| 5 | ORCH-SVC-33-001 | DONE | Sources control-plane validation + Postgres repos. |
| 6 | ORCH-SVC-33-002 | DONE | Adaptive rate limiting (token bucket + concurrency + backpressure). |
| 7 | ORCH-SVC-33-003 | DONE | Watermark/backfill manager with duplicate suppression. |
| 8 | ORCH-SVC-33-004 | DONE | Dead-letter store, replay, notifications. |
| 9 | ORCH-SVC-34-001 | DONE | Quotas + SLO burn-rate computation and alerts. |
| 10 | ORCH-SVC-34-002 | DONE | Audit log + run ledger export with signed manifest. |
| 11 | ORCH-SVC-34-003 | DONE | Perf/scale validation + autoscale/load-shed hooks. |
| 12 | ORCH-SVC-34-004 | DONE | GA packaging (Docker/Helm/air-gap bundle/provenance checklist). |
| 13 | ORCH-SVC-35-101 | DONE | Export job class registration + quotas and telemetry. |
| 14 | ORCH-SVC-36-101 | DONE | Export distribution + retention lifecycle metadata. |
| 15 | ORCH-SVC-37-101 | DONE | Scheduled exports, pruning, failure alerting. |
Last synced: 2025-11-30 (UTC).

26
src/Orchestrator/TASKS.md Normal file
View File

@@ -0,0 +1,26 @@
# Orchestrator · Sprint Mirrors (0151 / 0152)
Local status mirror for orchestration sprints to keep doc and code views aligned. Update this alongside the canonical sprint files:
- `docs/implplan/SPRINT_0151_0001_0001_orchestrator_i.md`
- `docs/implplan/SPRINT_0152_0001_0002_orchestrator_ii.md`
| Sprint | Task ID | Status | Notes |
| --- | --- | --- | --- |
| 0151 | ORCH-OAS-61-001 | DONE | Per-service OpenAPI doc with pagination/idempotency/error envelopes. |
| 0151 | ORCH-OAS-61-002 | DONE | `/.well-known/openapi` discovery and version metadata. |
| 0151 | ORCH-OAS-62-001 | DONE | OpenAPI + SDK smoke tests for pagination and pack-run schedule/retry endpoints. |
| 0151 | ORCH-OAS-63-001 | DONE | Deprecation headers/metadata for legacy job endpoints. |
| 0151 | ORCH-OBS-50-001 | BLOCKED | Waiting on Telemetry Core (Sprint 0174). |
| 0151 | ORCH-OBS-51-001 | BLOCKED | Depends on 50-001 and telemetry schema. |
| 0151 | ORCH-OBS-52-001 | BLOCKED | Needs event schema from Sprint 0150.A. |
| 0151 | ORCH-OBS-53-001 | BLOCKED | Evidence Locker capsule inputs not frozen. |
| 0151 | ORCH-OBS-54-001 | BLOCKED | Provenance attestations depend on 53-001. |
| 0151 | ORCH-OBS-55-001 | BLOCKED | Incident-mode hooks depend on 54-001. |
| 0151 | ORCH-AIRGAP-56-001 | BLOCKED | Await AirGap staleness contracts (Sprint 0120.A). |
| 0151 | ORCH-AIRGAP-56-002 | BLOCKED | Await upstream 56-001. |
| 0151 | ORCH-AIRGAP-57-001 | BLOCKED | Await upstream 56-002. |
| 0151 | ORCH-AIRGAP-58-001 | BLOCKED | Await upstream 57-001. |
| 0151 | ORCH-SVC-32-001 | DONE | Service bootstrap + initial schema/migrations. |
| 0152 | ORCH-SVC-32-002…37-101 | DONE | See `src/Orchestrator/StellaOps.Orchestrator/TASKS.md` for per-task detail. |
Last synced: 2025-11-30 (UTC).