Restructure solution layout by module
This commit is contained in:
28
src/Scanner/__Libraries/StellaOps.Scanner.Storage/AGENTS.md
Normal file
28
src/Scanner/__Libraries/StellaOps.Scanner.Storage/AGENTS.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# AGENTS
|
||||
## Role
|
||||
Provide durable catalog and artifact storage for the Scanner plane, spanning Mongo catalog collections and MinIO object storage. Expose repositories and services used by WebService and Worker components to persist job state, image metadata, and exported artefacts deterministically.
|
||||
## Scope
|
||||
- Mongo collections: artifacts, images, layers, links, jobs, lifecycle_rules, migrations.
|
||||
- Metadata documents: enforce majority write/read concerns, UTC timestamps, deterministic identifiers (SHA-256 digests, ULIDs for jobs).
|
||||
- Bootstrapper: create collections + indexes (unique digests, compound references, TTL on lifecycle rules, sparse lookup helpers) and run schema migrations.
|
||||
- Object storage (MinIO/S3): manage bucket layout (layers/, images/, indexes/, attest/), immutability policies, deterministic paths, and retention classes.
|
||||
- Services: coordinate dual-write between Mongo metadata and MinIO blobs, compute digests, manage reference counts, and expose typed repositories for WebService/Worker interactions.
|
||||
## Participants
|
||||
- Scanner.WebService binds configuration, runs bootstrapper during startup, and uses repositories to enqueue scans, look up catalog entries, and manage lifecycle policies.
|
||||
- Scanner.Worker writes job progress, uploads SBOM artefacts, and updates artefact reference counts.
|
||||
- Policy / Notify consumers resolve artefact metadata for reports via catalog APIs once exposed.
|
||||
## Interfaces & contracts
|
||||
- Options configured via `ScannerStorageOptions` (Mongo + object store). `EnsureValid` rejects incomplete/unsafe configuration.
|
||||
- Mongo access uses `IMongoDatabase` scoped with majority `ReadConcern`/`WriteConcern` and cancellation tokens.
|
||||
- Object store abstraction (`IArtifactObjectStore`) encapsulates MinIO (S3) operations with server-side checksum validation and optional object-lock retain-until.
|
||||
- Service APIs follow deterministic naming: digests normalized (`sha256:<hex>`), ULIDs sortable, timestamps ISO-8601 UTC.
|
||||
## In/Out of scope
|
||||
In: persistence models, bootstrap/migrations, catalog repositories, object storage client, retention helpers, dual-write coordination, deterministic digests.
|
||||
Out: HTTP endpoints, queue processing, analyzer logic, SBOM composition, policy decisions, UI contracts.
|
||||
## Observability & security expectations
|
||||
- Emit structured logs for catalog/object-store writes including correlation IDs and digests.
|
||||
- Guard against double writes; idempotent operations keyed by digests.
|
||||
- Do not log credentials; redact connection strings. Honour cancellation tokens.
|
||||
- Metrics hooks (pending) must expose duration counters for Mongo and MinIO operations.
|
||||
## Tests
|
||||
- Integration tests with ephemeral Mongo/MinIO stubs covering bootstrapper indexes, TTL enforcement, dual-write coordination, digest determinism, and majority read/write concerns.
|
||||
@@ -0,0 +1,85 @@
|
||||
using MongoDB.Bson.Serialization.Attributes;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Catalog;
|
||||
|
||||
public enum ArtifactDocumentType
|
||||
{
|
||||
LayerBom,
|
||||
ImageBom,
|
||||
Diff,
|
||||
Index,
|
||||
Attestation,
|
||||
}
|
||||
|
||||
public enum ArtifactDocumentFormat
|
||||
{
|
||||
CycloneDxJson,
|
||||
CycloneDxProtobuf,
|
||||
SpdxJson,
|
||||
BomIndex,
|
||||
DsseJson,
|
||||
}
|
||||
|
||||
[BsonIgnoreExtraElements]
|
||||
public sealed class ArtifactDocument
|
||||
{
|
||||
[BsonId]
|
||||
public string Id { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("type")]
|
||||
public ArtifactDocumentType Type { get; set; }
|
||||
= ArtifactDocumentType.ImageBom;
|
||||
|
||||
[BsonElement("format")]
|
||||
public ArtifactDocumentFormat Format { get; set; }
|
||||
= ArtifactDocumentFormat.CycloneDxJson;
|
||||
|
||||
[BsonElement("mediaType")]
|
||||
public string MediaType { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("bytesSha256")]
|
||||
public string BytesSha256 { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("sizeBytes")]
|
||||
public long SizeBytes { get; set; }
|
||||
= 0;
|
||||
|
||||
[BsonElement("immutable")]
|
||||
public bool Immutable { get; set; }
|
||||
= false;
|
||||
|
||||
[BsonElement("refCount")]
|
||||
public long RefCount { get; set; }
|
||||
= 0;
|
||||
|
||||
[BsonElement("rekor")]
|
||||
[BsonIgnoreIfNull]
|
||||
public RekorReference? Rekor { get; set; }
|
||||
= null;
|
||||
|
||||
[BsonElement("createdAt")]
|
||||
public DateTime CreatedAtUtc { get; set; }
|
||||
= DateTime.UtcNow;
|
||||
|
||||
[BsonElement("updatedAt")]
|
||||
public DateTime UpdatedAtUtc { get; set; }
|
||||
= DateTime.UtcNow;
|
||||
|
||||
[BsonElement("ttlClass")]
|
||||
public string TtlClass { get; set; } = "default";
|
||||
}
|
||||
|
||||
public sealed class RekorReference
|
||||
{
|
||||
[BsonElement("uuid")]
|
||||
public string? Uuid { get; set; }
|
||||
= null;
|
||||
|
||||
[BsonElement("index")]
|
||||
public long? Index { get; set; }
|
||||
= null;
|
||||
|
||||
[BsonElement("url")]
|
||||
public string? Url { get; set; }
|
||||
= null;
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Catalog;
|
||||
|
||||
public static class CatalogIdFactory
|
||||
{
|
||||
public static string CreateArtifactId(ArtifactDocumentType type, string digest)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(digest);
|
||||
return $"{type.ToString().ToLowerInvariant()}::{NormalizeDigest(digest)}";
|
||||
}
|
||||
|
||||
public static string CreateLinkId(LinkSourceType type, string fromDigest, string artifactId)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(fromDigest);
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(artifactId);
|
||||
|
||||
var input = Encoding.UTF8.GetBytes($"{type}:{NormalizeDigest(fromDigest)}:{artifactId}");
|
||||
var hash = SHA256.HashData(input);
|
||||
return Convert.ToHexString(hash).ToLowerInvariant();
|
||||
}
|
||||
|
||||
public static string CreateLifecycleRuleId(string artifactId, string @class)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(artifactId);
|
||||
var normalizedClass = string.IsNullOrWhiteSpace(@class) ? "default" : @class.Trim().ToLowerInvariant();
|
||||
var payload = Encoding.UTF8.GetBytes($"{artifactId}:{normalizedClass}");
|
||||
var hash = SHA256.HashData(payload);
|
||||
return Convert.ToHexString(hash).ToLowerInvariant();
|
||||
}
|
||||
|
||||
private static string NormalizeDigest(string digest)
|
||||
{
|
||||
if (!digest.Contains(':', StringComparison.Ordinal))
|
||||
{
|
||||
return $"sha256:{digest.Trim().ToLowerInvariant()}";
|
||||
}
|
||||
|
||||
var parts = digest.Split(':', 2, StringSplitOptions.TrimEntries);
|
||||
return $"{parts[0].ToLowerInvariant()}:{parts[1].ToLowerInvariant()}";
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
using MongoDB.Bson.Serialization.Attributes;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Catalog;
|
||||
|
||||
[BsonIgnoreExtraElements]
|
||||
public sealed class ImageDocument
|
||||
{
|
||||
[BsonId]
|
||||
public string ImageDigest { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("repository")]
|
||||
public string Repository { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("tag")]
|
||||
[BsonIgnoreIfNull]
|
||||
public string? Tag { get; set; }
|
||||
= null;
|
||||
|
||||
[BsonElement("architecture")]
|
||||
public string Architecture { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("createdAt")]
|
||||
public DateTime CreatedAtUtc { get; set; }
|
||||
= DateTime.UtcNow;
|
||||
|
||||
[BsonElement("lastSeenAt")]
|
||||
public DateTime LastSeenAtUtc { get; set; }
|
||||
= DateTime.UtcNow;
|
||||
}
|
||||
@@ -0,0 +1,54 @@
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Bson.Serialization.Attributes;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Catalog;
|
||||
|
||||
public enum JobState
|
||||
{
|
||||
Pending,
|
||||
Running,
|
||||
Succeeded,
|
||||
Failed,
|
||||
Cancelled,
|
||||
}
|
||||
|
||||
[BsonIgnoreExtraElements]
|
||||
public sealed class JobDocument
|
||||
{
|
||||
[BsonId]
|
||||
public string Id { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("kind")]
|
||||
public string Kind { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("state")]
|
||||
public JobState State { get; set; } = JobState.Pending;
|
||||
|
||||
[BsonElement("args")]
|
||||
public BsonDocument Arguments { get; set; }
|
||||
= new();
|
||||
|
||||
[BsonElement("createdAt")]
|
||||
public DateTime CreatedAtUtc { get; set; }
|
||||
= DateTime.UtcNow;
|
||||
|
||||
[BsonElement("startedAt")]
|
||||
[BsonIgnoreIfNull]
|
||||
public DateTime? StartedAtUtc { get; set; }
|
||||
= null;
|
||||
|
||||
[BsonElement("completedAt")]
|
||||
[BsonIgnoreIfNull]
|
||||
public DateTime? CompletedAtUtc { get; set; }
|
||||
= null;
|
||||
|
||||
[BsonElement("heartbeatAt")]
|
||||
[BsonIgnoreIfNull]
|
||||
public DateTime? HeartbeatAtUtc { get; set; }
|
||||
= null;
|
||||
|
||||
[BsonElement("error")]
|
||||
[BsonIgnoreIfNull]
|
||||
public string? Error { get; set; }
|
||||
= null;
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
using MongoDB.Bson.Serialization.Attributes;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Catalog;
|
||||
|
||||
[BsonIgnoreExtraElements]
|
||||
public sealed class LayerDocument
|
||||
{
|
||||
[BsonId]
|
||||
public string LayerDigest { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("mediaType")]
|
||||
public string MediaType { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("sizeBytes")]
|
||||
public long SizeBytes { get; set; }
|
||||
= 0;
|
||||
|
||||
[BsonElement("createdAt")]
|
||||
public DateTime CreatedAtUtc { get; set; }
|
||||
= DateTime.UtcNow;
|
||||
|
||||
[BsonElement("lastSeenAt")]
|
||||
public DateTime LastSeenAtUtc { get; set; }
|
||||
= DateTime.UtcNow;
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
using MongoDB.Bson.Serialization.Attributes;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Catalog;
|
||||
|
||||
[BsonIgnoreExtraElements]
|
||||
public sealed class LifecycleRuleDocument
|
||||
{
|
||||
[BsonId]
|
||||
public string Id { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("artifactId")]
|
||||
public string ArtifactId { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("class")]
|
||||
public string Class { get; set; } = "default";
|
||||
|
||||
[BsonElement("expiresAt")]
|
||||
[BsonIgnoreIfNull]
|
||||
public DateTime? ExpiresAtUtc { get; set; }
|
||||
= null;
|
||||
|
||||
[BsonElement("createdAt")]
|
||||
public DateTime CreatedAtUtc { get; set; }
|
||||
= DateTime.UtcNow;
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
using MongoDB.Bson.Serialization.Attributes;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Catalog;
|
||||
|
||||
public enum LinkSourceType
|
||||
{
|
||||
Image,
|
||||
Layer,
|
||||
}
|
||||
|
||||
[BsonIgnoreExtraElements]
|
||||
public sealed class LinkDocument
|
||||
{
|
||||
[BsonId]
|
||||
public string Id { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("fromType")]
|
||||
public LinkSourceType FromType { get; set; }
|
||||
= LinkSourceType.Image;
|
||||
|
||||
[BsonElement("fromDigest")]
|
||||
public string FromDigest { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("artifactId")]
|
||||
public string ArtifactId { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("createdAt")]
|
||||
public DateTime CreatedAtUtc { get; set; }
|
||||
= DateTime.UtcNow;
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Bson.Serialization.Attributes;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Catalog;
|
||||
|
||||
/// <summary>
|
||||
/// MongoDB persistence model for runtime events emitted by the Zastava observer.
|
||||
/// </summary>
|
||||
public sealed class RuntimeEventDocument
|
||||
{
|
||||
[BsonId]
|
||||
[BsonRepresentation(BsonType.ObjectId)]
|
||||
public string? Id { get; set; }
|
||||
|
||||
[BsonElement("eventId")]
|
||||
[BsonRequired]
|
||||
public string EventId { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("schemaVersion")]
|
||||
[BsonRequired]
|
||||
public string SchemaVersion { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("tenant")]
|
||||
[BsonRequired]
|
||||
public string Tenant { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("node")]
|
||||
[BsonRequired]
|
||||
public string Node { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("kind")]
|
||||
[BsonRepresentation(BsonType.String)]
|
||||
[BsonRequired]
|
||||
public string Kind { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("when")]
|
||||
[BsonDateTimeOptions(Kind = DateTimeKind.Utc)]
|
||||
public DateTime When { get; set; }
|
||||
|
||||
[BsonElement("receivedAt")]
|
||||
[BsonDateTimeOptions(Kind = DateTimeKind.Utc)]
|
||||
public DateTime ReceivedAt { get; set; }
|
||||
|
||||
[BsonElement("expiresAt")]
|
||||
[BsonDateTimeOptions(Kind = DateTimeKind.Utc)]
|
||||
public DateTime ExpiresAt { get; set; }
|
||||
|
||||
[BsonElement("platform")]
|
||||
public string? Platform { get; set; }
|
||||
|
||||
[BsonElement("namespace")]
|
||||
public string? Namespace { get; set; }
|
||||
|
||||
[BsonElement("pod")]
|
||||
public string? Pod { get; set; }
|
||||
|
||||
[BsonElement("container")]
|
||||
public string? Container { get; set; }
|
||||
|
||||
[BsonElement("containerId")]
|
||||
public string? ContainerId { get; set; }
|
||||
|
||||
[BsonElement("imageRef")]
|
||||
public string? ImageRef { get; set; }
|
||||
|
||||
[BsonElement("imageDigest")]
|
||||
public string? ImageDigest { get; set; }
|
||||
|
||||
[BsonElement("engine")]
|
||||
public string? Engine { get; set; }
|
||||
|
||||
[BsonElement("engineVersion")]
|
||||
public string? EngineVersion { get; set; }
|
||||
|
||||
[BsonElement("baselineDigest")]
|
||||
public string? BaselineDigest { get; set; }
|
||||
|
||||
[BsonElement("imageSigned")]
|
||||
public bool? ImageSigned { get; set; }
|
||||
|
||||
[BsonElement("sbomReferrer")]
|
||||
public string? SbomReferrer { get; set; }
|
||||
|
||||
[BsonElement("buildId")]
|
||||
public string? BuildId { get; set; }
|
||||
|
||||
[BsonElement("payload")]
|
||||
public BsonDocument Payload { get; set; } = new();
|
||||
}
|
||||
@@ -0,0 +1,180 @@
|
||||
using System;
|
||||
using System.Net.Http;
|
||||
using Amazon;
|
||||
using Amazon.S3;
|
||||
using Microsoft.Extensions.Configuration;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using Microsoft.Extensions.DependencyInjection.Extensions;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.Scanner.Storage.Migrations;
|
||||
using StellaOps.Scanner.Storage.Mongo;
|
||||
using StellaOps.Scanner.Storage.ObjectStore;
|
||||
using StellaOps.Scanner.Storage.Repositories;
|
||||
using StellaOps.Scanner.Storage.Services;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Extensions;
|
||||
|
||||
public static class ServiceCollectionExtensions
|
||||
{
|
||||
public static IServiceCollection AddScannerStorage(this IServiceCollection services, Action<ScannerStorageOptions> configure)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(configure);
|
||||
|
||||
services.AddOptions<ScannerStorageOptions>().Configure(configure).PostConfigure(options => options.EnsureValid());
|
||||
RegisterScannerStorageServices(services);
|
||||
return services;
|
||||
}
|
||||
|
||||
public static IServiceCollection AddScannerStorage(this IServiceCollection services, IConfiguration configuration)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(configuration);
|
||||
|
||||
services.AddOptions<ScannerStorageOptions>()
|
||||
.Bind(configuration)
|
||||
.PostConfigure(options => options.EnsureValid());
|
||||
|
||||
RegisterScannerStorageServices(services);
|
||||
return services;
|
||||
}
|
||||
|
||||
private static void RegisterScannerStorageServices(IServiceCollection services)
|
||||
{
|
||||
services.TryAddSingleton<TimeProvider>(TimeProvider.System);
|
||||
services.TryAddSingleton(CreateMongoClient);
|
||||
services.TryAddSingleton(CreateMongoDatabase);
|
||||
services.TryAddSingleton<MongoCollectionProvider>();
|
||||
services.TryAddEnumerable(ServiceDescriptor.Singleton<IMongoMigration, EnsureLifecycleRuleTtlMigration>());
|
||||
services.TryAddSingleton(provider =>
|
||||
{
|
||||
var migrations = provider.GetServices<IMongoMigration>();
|
||||
return new MongoMigrationRunner(
|
||||
provider.GetRequiredService<IMongoDatabase>(),
|
||||
migrations,
|
||||
provider.GetRequiredService<ILogger<MongoMigrationRunner>>(),
|
||||
TimeProvider.System);
|
||||
});
|
||||
|
||||
services.TryAddSingleton<MongoBootstrapper>();
|
||||
services.TryAddSingleton<ArtifactRepository>();
|
||||
services.TryAddSingleton<ImageRepository>();
|
||||
services.TryAddSingleton<LayerRepository>();
|
||||
services.TryAddSingleton<LinkRepository>();
|
||||
services.TryAddSingleton<JobRepository>();
|
||||
services.TryAddSingleton<LifecycleRuleRepository>();
|
||||
services.TryAddSingleton<RuntimeEventRepository>();
|
||||
|
||||
services.AddHttpClient(RustFsArtifactObjectStore.HttpClientName)
|
||||
.ConfigureHttpClient((sp, client) =>
|
||||
{
|
||||
var options = sp.GetRequiredService<IOptions<ScannerStorageOptions>>().Value.ObjectStore;
|
||||
if (!options.IsRustFsDriver())
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (!Uri.TryCreate(options.RustFs.BaseUrl, UriKind.Absolute, out var baseUri))
|
||||
{
|
||||
throw new InvalidOperationException("RustFS baseUrl must be a valid absolute URI.");
|
||||
}
|
||||
|
||||
client.BaseAddress = baseUri;
|
||||
client.Timeout = options.RustFs.Timeout;
|
||||
|
||||
foreach (var header in options.Headers)
|
||||
{
|
||||
client.DefaultRequestHeaders.TryAddWithoutValidation(header.Key, header.Value);
|
||||
}
|
||||
|
||||
if (!string.IsNullOrWhiteSpace(options.RustFs.ApiKeyHeader)
|
||||
&& !string.IsNullOrWhiteSpace(options.RustFs.ApiKey))
|
||||
{
|
||||
client.DefaultRequestHeaders.TryAddWithoutValidation(options.RustFs.ApiKeyHeader, options.RustFs.ApiKey);
|
||||
}
|
||||
})
|
||||
.ConfigurePrimaryHttpMessageHandler(sp =>
|
||||
{
|
||||
var options = sp.GetRequiredService<IOptions<ScannerStorageOptions>>().Value.ObjectStore;
|
||||
if (!options.IsRustFsDriver())
|
||||
{
|
||||
return new HttpClientHandler();
|
||||
}
|
||||
|
||||
var handler = new HttpClientHandler();
|
||||
if (options.RustFs.AllowInsecureTls)
|
||||
{
|
||||
handler.ServerCertificateCustomValidationCallback = HttpClientHandler.DangerousAcceptAnyServerCertificateValidator;
|
||||
}
|
||||
|
||||
return handler;
|
||||
});
|
||||
|
||||
services.TryAddSingleton(CreateAmazonS3Client);
|
||||
services.TryAddSingleton<IArtifactObjectStore>(CreateArtifactObjectStore);
|
||||
services.TryAddSingleton<ArtifactStorageService>();
|
||||
}
|
||||
|
||||
private static IMongoClient CreateMongoClient(IServiceProvider provider)
|
||||
{
|
||||
var options = provider.GetRequiredService<IOptions<ScannerStorageOptions>>().Value;
|
||||
options.EnsureValid();
|
||||
|
||||
var settings = MongoClientSettings.FromConnectionString(options.Mongo.ConnectionString);
|
||||
settings.RetryReads = true;
|
||||
settings.RetryWrites = true;
|
||||
settings.DirectConnection = false;
|
||||
settings.ReadPreference = ReadPreference.PrimaryPreferred;
|
||||
settings.ServerSelectionTimeout = options.Mongo.CommandTimeout;
|
||||
settings.ConnectTimeout = options.Mongo.CommandTimeout;
|
||||
settings.SocketTimeout = options.Mongo.CommandTimeout;
|
||||
settings.ReadConcern = options.Mongo.UseMajorityReadConcern ? ReadConcern.Majority : ReadConcern.Local;
|
||||
settings.WriteConcern = options.Mongo.UseMajorityWriteConcern ? WriteConcern.WMajority : WriteConcern.W1;
|
||||
|
||||
return new MongoClient(settings);
|
||||
}
|
||||
|
||||
private static IMongoDatabase CreateMongoDatabase(IServiceProvider provider)
|
||||
{
|
||||
var options = provider.GetRequiredService<IOptions<ScannerStorageOptions>>().Value;
|
||||
var client = provider.GetRequiredService<IMongoClient>();
|
||||
var databaseName = options.Mongo.ResolveDatabaseName();
|
||||
return client.GetDatabase(databaseName);
|
||||
}
|
||||
|
||||
private static IAmazonS3 CreateAmazonS3Client(IServiceProvider provider)
|
||||
{
|
||||
var options = provider.GetRequiredService<IOptions<ScannerStorageOptions>>().Value.ObjectStore;
|
||||
var config = new AmazonS3Config
|
||||
{
|
||||
RegionEndpoint = RegionEndpoint.GetBySystemName(options.Region),
|
||||
ForcePathStyle = options.ForcePathStyle,
|
||||
};
|
||||
|
||||
if (!string.IsNullOrWhiteSpace(options.ServiceUrl))
|
||||
{
|
||||
config.ServiceURL = options.ServiceUrl;
|
||||
}
|
||||
|
||||
return new AmazonS3Client(config);
|
||||
}
|
||||
|
||||
private static IArtifactObjectStore CreateArtifactObjectStore(IServiceProvider provider)
|
||||
{
|
||||
var options = provider.GetRequiredService<IOptions<ScannerStorageOptions>>();
|
||||
var objectStore = options.Value.ObjectStore;
|
||||
|
||||
if (objectStore.IsRustFsDriver())
|
||||
{
|
||||
return new RustFsArtifactObjectStore(
|
||||
provider.GetRequiredService<IHttpClientFactory>(),
|
||||
options,
|
||||
provider.GetRequiredService<ILogger<RustFsArtifactObjectStore>>());
|
||||
}
|
||||
|
||||
return new S3ArtifactObjectStore(
|
||||
provider.GetRequiredService<IAmazonS3>(),
|
||||
options,
|
||||
provider.GetRequiredService<ILogger<S3ArtifactObjectStore>>());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
using System.Linq;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.Scanner.Storage.Catalog;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Migrations;
|
||||
|
||||
public sealed class EnsureLifecycleRuleTtlMigration : IMongoMigration
|
||||
{
|
||||
public string Id => "20251018-lifecycle-ttl";
|
||||
|
||||
public string Description => "Ensure lifecycle_rules expiresAt TTL index exists.";
|
||||
|
||||
public async Task ApplyAsync(IMongoDatabase database, CancellationToken cancellationToken)
|
||||
{
|
||||
var collection = database.GetCollection<LifecycleRuleDocument>(ScannerStorageDefaults.Collections.LifecycleRules);
|
||||
var indexes = await collection.Indexes.ListAsync(cancellationToken).ConfigureAwait(false);
|
||||
var existing = await indexes.ToListAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
if (existing.Any(x => string.Equals(x["name"].AsString, "lifecycle_expiresAt", StringComparison.Ordinal)))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var model = new CreateIndexModel<LifecycleRuleDocument>(
|
||||
Builders<LifecycleRuleDocument>.IndexKeys.Ascending(x => x.ExpiresAtUtc),
|
||||
new CreateIndexOptions { Name = "lifecycle_expiresAt", ExpireAfter = TimeSpan.Zero });
|
||||
|
||||
await collection.Indexes.CreateOneAsync(model, cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
using MongoDB.Driver;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Migrations;
|
||||
|
||||
public interface IMongoMigration
|
||||
{
|
||||
string Id { get; }
|
||||
|
||||
string Description { get; }
|
||||
|
||||
Task ApplyAsync(IMongoDatabase database, CancellationToken cancellationToken);
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
using MongoDB.Bson.Serialization.Attributes;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Migrations;
|
||||
|
||||
[BsonIgnoreExtraElements]
|
||||
internal sealed class MongoMigrationDocument
|
||||
{
|
||||
[BsonId]
|
||||
public string Id { get; set; } = string.Empty;
|
||||
|
||||
[BsonElement("description")]
|
||||
[BsonIgnoreIfNull]
|
||||
public string? Description { get; set; }
|
||||
= null;
|
||||
|
||||
[BsonElement("appliedAt")]
|
||||
public DateTime AppliedAtUtc { get; set; }
|
||||
= DateTime.UtcNow;
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using MongoDB.Driver;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Migrations;
|
||||
|
||||
public sealed class MongoMigrationRunner
|
||||
{
|
||||
private readonly IMongoDatabase _database;
|
||||
private readonly IReadOnlyList<IMongoMigration> _migrations;
|
||||
private readonly ILogger<MongoMigrationRunner> _logger;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
|
||||
public MongoMigrationRunner(
|
||||
IMongoDatabase database,
|
||||
IEnumerable<IMongoMigration> migrations,
|
||||
ILogger<MongoMigrationRunner> logger,
|
||||
TimeProvider? timeProvider = null)
|
||||
{
|
||||
_database = database ?? throw new ArgumentNullException(nameof(database));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
_timeProvider = timeProvider ?? TimeProvider.System;
|
||||
_migrations = (migrations ?? throw new ArgumentNullException(nameof(migrations)))
|
||||
.OrderBy(m => m.Id, StringComparer.Ordinal)
|
||||
.ToArray();
|
||||
}
|
||||
|
||||
public async Task RunAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
if (_migrations.Count == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
await EnsureCollectionExistsAsync(_database, cancellationToken).ConfigureAwait(false);
|
||||
var collection = _database.GetCollection<MongoMigrationDocument>(ScannerStorageDefaults.Collections.Migrations);
|
||||
var applied = await LoadAppliedMigrationIdsAsync(collection, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
foreach (var migration in _migrations)
|
||||
{
|
||||
if (applied.Contains(migration.Id, StringComparer.Ordinal))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
_logger.LogInformation("Applying scanner Mongo migration {MigrationId}: {Description}", migration.Id, migration.Description);
|
||||
try
|
||||
{
|
||||
await migration.ApplyAsync(_database, cancellationToken).ConfigureAwait(false);
|
||||
var document = new MongoMigrationDocument
|
||||
{
|
||||
Id = migration.Id,
|
||||
Description = string.IsNullOrWhiteSpace(migration.Description) ? null : migration.Description,
|
||||
AppliedAtUtc = _timeProvider.GetUtcNow().UtcDateTime,
|
||||
};
|
||||
|
||||
await collection.InsertOneAsync(document, cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
_logger.LogInformation("Scanner Mongo migration {MigrationId} applied", migration.Id);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogError(ex, "Scanner Mongo migration {MigrationId} failed", migration.Id);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static async Task EnsureCollectionExistsAsync(IMongoDatabase database, CancellationToken cancellationToken)
|
||||
{
|
||||
using var cursor = await database.ListCollectionNamesAsync(cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
var names = await cursor.ToListAsync(cancellationToken).ConfigureAwait(false);
|
||||
if (!names.Contains(ScannerStorageDefaults.Collections.Migrations, StringComparer.Ordinal))
|
||||
{
|
||||
await database.CreateCollectionAsync(ScannerStorageDefaults.Collections.Migrations, cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
|
||||
private static async Task<HashSet<string>> LoadAppliedMigrationIdsAsync(
|
||||
IMongoCollection<MongoMigrationDocument> collection,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
using var cursor = await collection.FindAsync(FilterDefinition<MongoMigrationDocument>.Empty, cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
var documents = await cursor.ToListAsync(cancellationToken).ConfigureAwait(false);
|
||||
var ids = new HashSet<string>(StringComparer.Ordinal);
|
||||
foreach (var doc in documents)
|
||||
{
|
||||
if (!string.IsNullOrWhiteSpace(doc.Id))
|
||||
{
|
||||
ids.Add(doc.Id);
|
||||
}
|
||||
}
|
||||
|
||||
return ids;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,219 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.Scanner.Storage.Catalog;
|
||||
using StellaOps.Scanner.Storage.Migrations;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Mongo;
|
||||
|
||||
public sealed class MongoBootstrapper
|
||||
{
|
||||
private readonly IMongoDatabase _database;
|
||||
private readonly ScannerStorageOptions _options;
|
||||
private readonly ILogger<MongoBootstrapper> _logger;
|
||||
private readonly MongoMigrationRunner _migrationRunner;
|
||||
|
||||
public MongoBootstrapper(
|
||||
IMongoDatabase database,
|
||||
IOptions<ScannerStorageOptions> options,
|
||||
ILogger<MongoBootstrapper> logger,
|
||||
MongoMigrationRunner migrationRunner)
|
||||
{
|
||||
_database = database ?? throw new ArgumentNullException(nameof(database));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
_migrationRunner = migrationRunner ?? throw new ArgumentNullException(nameof(migrationRunner));
|
||||
_options = (options ?? throw new ArgumentNullException(nameof(options))).Value;
|
||||
}
|
||||
|
||||
public async Task InitializeAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
_options.EnsureValid();
|
||||
|
||||
await EnsureCollectionsAsync(cancellationToken).ConfigureAwait(false);
|
||||
await EnsureIndexesAsync(cancellationToken).ConfigureAwait(false);
|
||||
await _migrationRunner.RunAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private async Task EnsureCollectionsAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var targetCollections = new[]
|
||||
{
|
||||
ScannerStorageDefaults.Collections.Artifacts,
|
||||
ScannerStorageDefaults.Collections.Images,
|
||||
ScannerStorageDefaults.Collections.Layers,
|
||||
ScannerStorageDefaults.Collections.Links,
|
||||
ScannerStorageDefaults.Collections.Jobs,
|
||||
ScannerStorageDefaults.Collections.LifecycleRules,
|
||||
ScannerStorageDefaults.Collections.RuntimeEvents,
|
||||
ScannerStorageDefaults.Collections.Migrations,
|
||||
};
|
||||
|
||||
using var cursor = await _database.ListCollectionNamesAsync(cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
var existing = await cursor.ToListAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
foreach (var name in targetCollections)
|
||||
{
|
||||
if (existing.Contains(name, StringComparer.Ordinal))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
_logger.LogInformation("Creating Mongo collection {Collection}", name);
|
||||
await _database.CreateCollectionAsync(name, cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
|
||||
private async Task EnsureIndexesAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
await EnsureArtifactIndexesAsync(cancellationToken).ConfigureAwait(false);
|
||||
await EnsureImageIndexesAsync(cancellationToken).ConfigureAwait(false);
|
||||
await EnsureLayerIndexesAsync(cancellationToken).ConfigureAwait(false);
|
||||
await EnsureLinkIndexesAsync(cancellationToken).ConfigureAwait(false);
|
||||
await EnsureJobIndexesAsync(cancellationToken).ConfigureAwait(false);
|
||||
await EnsureLifecycleIndexesAsync(cancellationToken).ConfigureAwait(false);
|
||||
await EnsureRuntimeEventIndexesAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private Task EnsureArtifactIndexesAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var collection = _database.GetCollection<ArtifactDocument>(ScannerStorageDefaults.Collections.Artifacts);
|
||||
var models = new List<CreateIndexModel<ArtifactDocument>>
|
||||
{
|
||||
new(
|
||||
Builders<ArtifactDocument>.IndexKeys
|
||||
.Ascending(x => x.Type)
|
||||
.Ascending(x => x.BytesSha256),
|
||||
new CreateIndexOptions { Name = "artifact_type_bytesSha256", Unique = true }),
|
||||
new(
|
||||
Builders<ArtifactDocument>.IndexKeys.Ascending(x => x.RefCount),
|
||||
new CreateIndexOptions { Name = "artifact_refCount" }),
|
||||
new(
|
||||
Builders<ArtifactDocument>.IndexKeys.Ascending(x => x.CreatedAtUtc),
|
||||
new CreateIndexOptions { Name = "artifact_createdAt" })
|
||||
};
|
||||
|
||||
return collection.Indexes.CreateManyAsync(models, cancellationToken);
|
||||
}
|
||||
|
||||
private Task EnsureImageIndexesAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var collection = _database.GetCollection<ImageDocument>(ScannerStorageDefaults.Collections.Images);
|
||||
var models = new List<CreateIndexModel<ImageDocument>>
|
||||
{
|
||||
new(
|
||||
Builders<ImageDocument>.IndexKeys
|
||||
.Ascending(x => x.Repository)
|
||||
.Ascending(x => x.Tag),
|
||||
new CreateIndexOptions { Name = "image_repo_tag" }),
|
||||
new(
|
||||
Builders<ImageDocument>.IndexKeys.Ascending(x => x.LastSeenAtUtc),
|
||||
new CreateIndexOptions { Name = "image_lastSeen" })
|
||||
};
|
||||
|
||||
return collection.Indexes.CreateManyAsync(models, cancellationToken);
|
||||
}
|
||||
|
||||
private Task EnsureLayerIndexesAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var collection = _database.GetCollection<LayerDocument>(ScannerStorageDefaults.Collections.Layers);
|
||||
var models = new List<CreateIndexModel<LayerDocument>>
|
||||
{
|
||||
new(
|
||||
Builders<LayerDocument>.IndexKeys.Ascending(x => x.LastSeenAtUtc),
|
||||
new CreateIndexOptions { Name = "layer_lastSeen" })
|
||||
};
|
||||
|
||||
return collection.Indexes.CreateManyAsync(models, cancellationToken);
|
||||
}
|
||||
|
||||
private Task EnsureLinkIndexesAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var collection = _database.GetCollection<LinkDocument>(ScannerStorageDefaults.Collections.Links);
|
||||
var models = new List<CreateIndexModel<LinkDocument>>
|
||||
{
|
||||
new(
|
||||
Builders<LinkDocument>.IndexKeys
|
||||
.Ascending(x => x.FromType)
|
||||
.Ascending(x => x.FromDigest)
|
||||
.Ascending(x => x.ArtifactId),
|
||||
new CreateIndexOptions { Name = "link_from_artifact", Unique = true })
|
||||
};
|
||||
|
||||
return collection.Indexes.CreateManyAsync(models, cancellationToken);
|
||||
}
|
||||
|
||||
private Task EnsureJobIndexesAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var collection = _database.GetCollection<JobDocument>(ScannerStorageDefaults.Collections.Jobs);
|
||||
var models = new List<CreateIndexModel<JobDocument>>
|
||||
{
|
||||
new(
|
||||
Builders<JobDocument>.IndexKeys
|
||||
.Ascending(x => x.State)
|
||||
.Ascending(x => x.CreatedAtUtc),
|
||||
new CreateIndexOptions { Name = "job_state_createdAt" }),
|
||||
new(
|
||||
Builders<JobDocument>.IndexKeys.Ascending(x => x.HeartbeatAtUtc),
|
||||
new CreateIndexOptions { Name = "job_heartbeat" })
|
||||
};
|
||||
|
||||
return collection.Indexes.CreateManyAsync(models, cancellationToken);
|
||||
}
|
||||
|
||||
private Task EnsureLifecycleIndexesAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var collection = _database.GetCollection<LifecycleRuleDocument>(ScannerStorageDefaults.Collections.LifecycleRules);
|
||||
var expiresIndex = new CreateIndexModel<LifecycleRuleDocument>(
|
||||
Builders<LifecycleRuleDocument>.IndexKeys.Ascending(x => x.ExpiresAtUtc),
|
||||
new CreateIndexOptions
|
||||
{
|
||||
Name = "lifecycle_expiresAt",
|
||||
ExpireAfter = TimeSpan.Zero,
|
||||
});
|
||||
|
||||
var artifactIndex = new CreateIndexModel<LifecycleRuleDocument>(
|
||||
Builders<LifecycleRuleDocument>.IndexKeys
|
||||
.Ascending(x => x.ArtifactId)
|
||||
.Ascending(x => x.Class),
|
||||
new CreateIndexOptions { Name = "lifecycle_artifact_class", Unique = true });
|
||||
|
||||
return collection.Indexes.CreateManyAsync(new[] { expiresIndex, artifactIndex }, cancellationToken);
|
||||
}
|
||||
|
||||
private Task EnsureRuntimeEventIndexesAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var collection = _database.GetCollection<RuntimeEventDocument>(ScannerStorageDefaults.Collections.RuntimeEvents);
|
||||
var models = new List<CreateIndexModel<RuntimeEventDocument>>
|
||||
{
|
||||
new(
|
||||
Builders<RuntimeEventDocument>.IndexKeys.Ascending(x => x.EventId),
|
||||
new CreateIndexOptions { Name = "runtime_event_eventId", Unique = true }),
|
||||
new(
|
||||
Builders<RuntimeEventDocument>.IndexKeys
|
||||
.Ascending(x => x.Tenant)
|
||||
.Ascending(x => x.Node)
|
||||
.Ascending(x => x.When),
|
||||
new CreateIndexOptions { Name = "runtime_event_tenant_node_when" }),
|
||||
new(
|
||||
Builders<RuntimeEventDocument>.IndexKeys
|
||||
.Ascending(x => x.ImageDigest)
|
||||
.Descending(x => x.When),
|
||||
new CreateIndexOptions { Name = "runtime_event_imageDigest_when" }),
|
||||
new(
|
||||
Builders<RuntimeEventDocument>.IndexKeys
|
||||
.Ascending(x => x.BuildId)
|
||||
.Descending(x => x.When),
|
||||
new CreateIndexOptions { Name = "runtime_event_buildId_when" }),
|
||||
new(
|
||||
Builders<RuntimeEventDocument>.IndexKeys.Ascending(x => x.ExpiresAt),
|
||||
new CreateIndexOptions
|
||||
{
|
||||
Name = "runtime_event_expiresAt",
|
||||
ExpireAfter = TimeSpan.Zero
|
||||
})
|
||||
};
|
||||
|
||||
return collection.Indexes.CreateManyAsync(models, cancellationToken);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,42 @@
|
||||
using Microsoft.Extensions.Options;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.Scanner.Storage.Catalog;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Mongo;
|
||||
|
||||
public sealed class MongoCollectionProvider
|
||||
{
|
||||
private readonly IMongoDatabase _database;
|
||||
private readonly MongoOptions _options;
|
||||
|
||||
public MongoCollectionProvider(IMongoDatabase database, IOptions<ScannerStorageOptions> options)
|
||||
{
|
||||
_database = database ?? throw new ArgumentNullException(nameof(database));
|
||||
_options = (options ?? throw new ArgumentNullException(nameof(options))).Value.Mongo;
|
||||
}
|
||||
|
||||
public IMongoCollection<ArtifactDocument> Artifacts => GetCollection<ArtifactDocument>(ScannerStorageDefaults.Collections.Artifacts);
|
||||
public IMongoCollection<ImageDocument> Images => GetCollection<ImageDocument>(ScannerStorageDefaults.Collections.Images);
|
||||
public IMongoCollection<LayerDocument> Layers => GetCollection<LayerDocument>(ScannerStorageDefaults.Collections.Layers);
|
||||
public IMongoCollection<LinkDocument> Links => GetCollection<LinkDocument>(ScannerStorageDefaults.Collections.Links);
|
||||
public IMongoCollection<JobDocument> Jobs => GetCollection<JobDocument>(ScannerStorageDefaults.Collections.Jobs);
|
||||
public IMongoCollection<LifecycleRuleDocument> LifecycleRules => GetCollection<LifecycleRuleDocument>(ScannerStorageDefaults.Collections.LifecycleRules);
|
||||
public IMongoCollection<RuntimeEventDocument> RuntimeEvents => GetCollection<RuntimeEventDocument>(ScannerStorageDefaults.Collections.RuntimeEvents);
|
||||
|
||||
private IMongoCollection<TDocument> GetCollection<TDocument>(string name)
|
||||
{
|
||||
var database = _database;
|
||||
|
||||
if (_options.UseMajorityReadConcern)
|
||||
{
|
||||
database = database.WithReadConcern(ReadConcern.Majority);
|
||||
}
|
||||
|
||||
if (_options.UseMajorityWriteConcern)
|
||||
{
|
||||
database = database.WithWriteConcern(WriteConcern.WMajority);
|
||||
}
|
||||
|
||||
return database.GetCollection<TDocument>(name);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
namespace StellaOps.Scanner.Storage.ObjectStore;
|
||||
|
||||
public interface IArtifactObjectStore
|
||||
{
|
||||
Task PutAsync(ArtifactObjectDescriptor descriptor, Stream content, CancellationToken cancellationToken);
|
||||
|
||||
Task<Stream?> GetAsync(ArtifactObjectDescriptor descriptor, CancellationToken cancellationToken);
|
||||
|
||||
Task DeleteAsync(ArtifactObjectDescriptor descriptor, CancellationToken cancellationToken);
|
||||
}
|
||||
|
||||
public sealed record ArtifactObjectDescriptor(string Bucket, string Key, bool Immutable, TimeSpan? RetainFor = null);
|
||||
@@ -0,0 +1,237 @@
|
||||
using System.Globalization;
|
||||
using System.Net;
|
||||
using System.Net.Http;
|
||||
using System.Net.Http.Headers;
|
||||
using System.Text;
|
||||
using System.Linq;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.ObjectStore;
|
||||
|
||||
public sealed class RustFsArtifactObjectStore : IArtifactObjectStore
|
||||
{
|
||||
internal const string HttpClientName = "scanner-storage-rustfs";
|
||||
|
||||
private const string ImmutableHeader = "X-RustFS-Immutable";
|
||||
private const string RetainSecondsHeader = "X-RustFS-Retain-Seconds";
|
||||
private static readonly MediaTypeHeaderValue OctetStream = new("application/octet-stream");
|
||||
|
||||
private readonly IHttpClientFactory _httpClientFactory;
|
||||
private readonly IOptions<ScannerStorageOptions> _options;
|
||||
private readonly ILogger<RustFsArtifactObjectStore> _logger;
|
||||
|
||||
public RustFsArtifactObjectStore(
|
||||
IHttpClientFactory httpClientFactory,
|
||||
IOptions<ScannerStorageOptions> options,
|
||||
ILogger<RustFsArtifactObjectStore> logger)
|
||||
{
|
||||
_httpClientFactory = httpClientFactory ?? throw new ArgumentNullException(nameof(httpClientFactory));
|
||||
_options = options ?? throw new ArgumentNullException(nameof(options));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
}
|
||||
|
||||
public async Task PutAsync(ArtifactObjectDescriptor descriptor, Stream content, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(descriptor);
|
||||
ArgumentNullException.ThrowIfNull(content);
|
||||
|
||||
var storeOptions = _options.Value.ObjectStore;
|
||||
EnsureRustFsDriver(storeOptions);
|
||||
|
||||
var client = _httpClientFactory.CreateClient(HttpClientName);
|
||||
using var request = new HttpRequestMessage(HttpMethod.Put, BuildRequestUri(storeOptions, descriptor))
|
||||
{
|
||||
Content = CreateHttpContent(content),
|
||||
};
|
||||
|
||||
request.Content.Headers.ContentType = OctetStream;
|
||||
ApplyHeaders(storeOptions, request, descriptor);
|
||||
|
||||
using var response = await client.SendAsync(request, HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false);
|
||||
if (!response.IsSuccessStatusCode)
|
||||
{
|
||||
var error = await ReadErrorAsync(response, cancellationToken).ConfigureAwait(false);
|
||||
throw new InvalidOperationException(
|
||||
$"RustFS upload for {descriptor.Bucket}/{descriptor.Key} failed with status {(int)response.StatusCode} ({response.ReasonPhrase}). {error}");
|
||||
}
|
||||
|
||||
_logger.LogDebug("Uploaded scanner object {Bucket}/{Key} via RustFS", descriptor.Bucket, descriptor.Key);
|
||||
}
|
||||
|
||||
public async Task<Stream?> GetAsync(ArtifactObjectDescriptor descriptor, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(descriptor);
|
||||
|
||||
var storeOptions = _options.Value.ObjectStore;
|
||||
EnsureRustFsDriver(storeOptions);
|
||||
|
||||
var client = _httpClientFactory.CreateClient(HttpClientName);
|
||||
using var request = new HttpRequestMessage(HttpMethod.Get, BuildRequestUri(storeOptions, descriptor));
|
||||
ApplyHeaders(storeOptions, request, descriptor);
|
||||
|
||||
using var response = await client.SendAsync(request, HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false);
|
||||
if (response.StatusCode == HttpStatusCode.NotFound)
|
||||
{
|
||||
_logger.LogDebug("RustFS object {Bucket}/{Key} not found", descriptor.Bucket, descriptor.Key);
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!response.IsSuccessStatusCode)
|
||||
{
|
||||
var error = await ReadErrorAsync(response, cancellationToken).ConfigureAwait(false);
|
||||
throw new InvalidOperationException(
|
||||
$"RustFS download for {descriptor.Bucket}/{descriptor.Key} failed with status {(int)response.StatusCode} ({response.ReasonPhrase}). {error}");
|
||||
}
|
||||
|
||||
var buffer = new MemoryStream();
|
||||
if (response.Content is not null)
|
||||
{
|
||||
await response.Content.CopyToAsync(buffer, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
buffer.Position = 0;
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public async Task DeleteAsync(ArtifactObjectDescriptor descriptor, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(descriptor);
|
||||
|
||||
var storeOptions = _options.Value.ObjectStore;
|
||||
EnsureRustFsDriver(storeOptions);
|
||||
|
||||
var client = _httpClientFactory.CreateClient(HttpClientName);
|
||||
using var request = new HttpRequestMessage(HttpMethod.Delete, BuildRequestUri(storeOptions, descriptor));
|
||||
ApplyHeaders(storeOptions, request, descriptor);
|
||||
|
||||
using var response = await client.SendAsync(request, HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false);
|
||||
if (response.StatusCode == HttpStatusCode.NotFound)
|
||||
{
|
||||
_logger.LogDebug("RustFS object {Bucket}/{Key} already absent", descriptor.Bucket, descriptor.Key);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!response.IsSuccessStatusCode)
|
||||
{
|
||||
var error = await ReadErrorAsync(response, cancellationToken).ConfigureAwait(false);
|
||||
throw new InvalidOperationException(
|
||||
$"RustFS delete for {descriptor.Bucket}/{descriptor.Key} failed with status {(int)response.StatusCode} ({response.ReasonPhrase}). {error}");
|
||||
}
|
||||
|
||||
_logger.LogDebug("Deleted scanner object {Bucket}/{Key} via RustFS", descriptor.Bucket, descriptor.Key);
|
||||
}
|
||||
|
||||
private static void EnsureRustFsDriver(ObjectStoreOptions options)
|
||||
{
|
||||
if (!options.IsRustFsDriver())
|
||||
{
|
||||
throw new InvalidOperationException("RustFS object store invoked while driver is not set to rustfs.");
|
||||
}
|
||||
}
|
||||
|
||||
private static async Task<string> ReadErrorAsync(HttpResponseMessage response, CancellationToken cancellationToken)
|
||||
{
|
||||
if (response.Content is null)
|
||||
{
|
||||
return string.Empty;
|
||||
}
|
||||
|
||||
var text = await response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false);
|
||||
if (string.IsNullOrWhiteSpace(text))
|
||||
{
|
||||
return string.Empty;
|
||||
}
|
||||
|
||||
var trimmed = text.Trim();
|
||||
return trimmed.Length <= 512 ? trimmed : trimmed[..512];
|
||||
}
|
||||
|
||||
private static HttpContent CreateHttpContent(Stream content)
|
||||
{
|
||||
if (content is MemoryStream memoryStream)
|
||||
{
|
||||
if (memoryStream.TryGetBuffer(out var segment))
|
||||
{
|
||||
return new ByteArrayContent(segment.Array!, segment.Offset, segment.Count);
|
||||
}
|
||||
|
||||
return new ByteArrayContent(memoryStream.ToArray());
|
||||
}
|
||||
|
||||
if (content.CanSeek)
|
||||
{
|
||||
var originalPosition = content.Position;
|
||||
try
|
||||
{
|
||||
content.Position = 0;
|
||||
using var duplicate = new MemoryStream();
|
||||
content.CopyTo(duplicate);
|
||||
var bytes = duplicate.ToArray();
|
||||
return new ByteArrayContent(bytes);
|
||||
}
|
||||
finally
|
||||
{
|
||||
content.Position = originalPosition;
|
||||
}
|
||||
}
|
||||
|
||||
using var buffer = new MemoryStream();
|
||||
content.CopyTo(buffer);
|
||||
return new ByteArrayContent(buffer.ToArray());
|
||||
}
|
||||
|
||||
private static Uri BuildRequestUri(ObjectStoreOptions options, ArtifactObjectDescriptor descriptor)
|
||||
{
|
||||
if (!Uri.TryCreate(options.RustFs.BaseUrl, UriKind.Absolute, out var baseUri))
|
||||
{
|
||||
throw new InvalidOperationException("RustFS baseUrl is invalid.");
|
||||
}
|
||||
|
||||
var encodedBucket = Uri.EscapeDataString(descriptor.Bucket);
|
||||
var encodedKey = EncodeKey(descriptor.Key);
|
||||
var relativePath = new StringBuilder()
|
||||
.Append("buckets/")
|
||||
.Append(encodedBucket)
|
||||
.Append("/objects/")
|
||||
.Append(encodedKey)
|
||||
.ToString();
|
||||
|
||||
return new Uri(baseUri, relativePath);
|
||||
}
|
||||
|
||||
private static string EncodeKey(string key)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(key))
|
||||
{
|
||||
return string.Empty;
|
||||
}
|
||||
|
||||
var segments = key.Split('/', StringSplitOptions.RemoveEmptyEntries | StringSplitOptions.TrimEntries);
|
||||
return string.Join('/', segments.Select(Uri.EscapeDataString));
|
||||
}
|
||||
|
||||
private void ApplyHeaders(ObjectStoreOptions options, HttpRequestMessage request, ArtifactObjectDescriptor descriptor)
|
||||
{
|
||||
if (!string.IsNullOrWhiteSpace(options.RustFs.ApiKeyHeader)
|
||||
&& !string.IsNullOrWhiteSpace(options.RustFs.ApiKey))
|
||||
{
|
||||
request.Headers.TryAddWithoutValidation(options.RustFs.ApiKeyHeader, options.RustFs.ApiKey);
|
||||
}
|
||||
|
||||
foreach (var header in options.Headers)
|
||||
{
|
||||
request.Headers.TryAddWithoutValidation(header.Key, header.Value);
|
||||
}
|
||||
|
||||
if (descriptor.Immutable)
|
||||
{
|
||||
request.Headers.TryAddWithoutValidation(ImmutableHeader, "true");
|
||||
if (descriptor.RetainFor is { } retain && retain > TimeSpan.Zero)
|
||||
{
|
||||
var seconds = Math.Ceiling(retain.TotalSeconds);
|
||||
request.Headers.TryAddWithoutValidation(RetainSecondsHeader, seconds.ToString(CultureInfo.InvariantCulture));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,75 @@
|
||||
using Amazon.S3;
|
||||
using Amazon.S3.Model;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.ObjectStore;
|
||||
|
||||
public sealed class S3ArtifactObjectStore : IArtifactObjectStore
|
||||
{
|
||||
private readonly IAmazonS3 _s3;
|
||||
private readonly ObjectStoreOptions _options;
|
||||
private readonly ILogger<S3ArtifactObjectStore> _logger;
|
||||
|
||||
public S3ArtifactObjectStore(IAmazonS3 s3, IOptions<ScannerStorageOptions> options, ILogger<S3ArtifactObjectStore> logger)
|
||||
{
|
||||
_s3 = s3 ?? throw new ArgumentNullException(nameof(s3));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
_options = (options ?? throw new ArgumentNullException(nameof(options))).Value.ObjectStore;
|
||||
}
|
||||
|
||||
public async Task PutAsync(ArtifactObjectDescriptor descriptor, Stream content, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(descriptor);
|
||||
ArgumentNullException.ThrowIfNull(content);
|
||||
|
||||
var request = new PutObjectRequest
|
||||
{
|
||||
BucketName = descriptor.Bucket,
|
||||
Key = descriptor.Key,
|
||||
InputStream = content,
|
||||
AutoCloseStream = false,
|
||||
};
|
||||
|
||||
if (descriptor.Immutable && _options.EnableObjectLock)
|
||||
{
|
||||
request.ObjectLockMode = ObjectLockMode.Compliance;
|
||||
if (descriptor.RetainFor is { } retention && retention > TimeSpan.Zero)
|
||||
{
|
||||
request.ObjectLockRetainUntilDate = DateTime.UtcNow + retention;
|
||||
}
|
||||
else if (_options.ComplianceRetention is { } defaultRetention && defaultRetention > TimeSpan.Zero)
|
||||
{
|
||||
request.ObjectLockRetainUntilDate = DateTime.UtcNow + defaultRetention;
|
||||
}
|
||||
}
|
||||
|
||||
await _s3.PutObjectAsync(request, cancellationToken).ConfigureAwait(false);
|
||||
_logger.LogDebug("Uploaded scanner object {Bucket}/{Key}", descriptor.Bucket, descriptor.Key);
|
||||
}
|
||||
|
||||
public async Task<Stream?> GetAsync(ArtifactObjectDescriptor descriptor, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(descriptor);
|
||||
try
|
||||
{
|
||||
var response = await _s3.GetObjectAsync(descriptor.Bucket, descriptor.Key, cancellationToken).ConfigureAwait(false);
|
||||
var buffer = new MemoryStream();
|
||||
await response.ResponseStream.CopyToAsync(buffer, cancellationToken).ConfigureAwait(false);
|
||||
buffer.Position = 0;
|
||||
return buffer;
|
||||
}
|
||||
catch (AmazonS3Exception ex) when (ex.StatusCode == System.Net.HttpStatusCode.NotFound)
|
||||
{
|
||||
_logger.LogDebug("Scanner object {Bucket}/{Key} not found", descriptor.Bucket, descriptor.Key);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public async Task DeleteAsync(ArtifactObjectDescriptor descriptor, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(descriptor);
|
||||
await _s3.DeleteObjectAsync(descriptor.Bucket, descriptor.Key, cancellationToken).ConfigureAwait(false);
|
||||
_logger.LogDebug("Deleted scanner object {Bucket}/{Key}", descriptor.Bucket, descriptor.Key);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,73 @@
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.Scanner.Storage.Catalog;
|
||||
using StellaOps.Scanner.Storage.Mongo;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Repositories;
|
||||
|
||||
public sealed class ArtifactRepository
|
||||
{
|
||||
private readonly MongoCollectionProvider _collections;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
|
||||
public ArtifactRepository(MongoCollectionProvider collections, TimeProvider? timeProvider = null)
|
||||
{
|
||||
_collections = collections ?? throw new ArgumentNullException(nameof(collections));
|
||||
_timeProvider = timeProvider ?? TimeProvider.System;
|
||||
}
|
||||
|
||||
public async Task<ArtifactDocument?> GetAsync(string artifactId, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(artifactId);
|
||||
return await _collections.Artifacts
|
||||
.Find(x => x.Id == artifactId)
|
||||
.FirstOrDefaultAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task UpsertAsync(ArtifactDocument document, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(document);
|
||||
var now = _timeProvider.GetUtcNow().UtcDateTime;
|
||||
document.CreatedAtUtc = document.CreatedAtUtc == default ? now : document.CreatedAtUtc;
|
||||
document.UpdatedAtUtc = now;
|
||||
var options = new ReplaceOptions { IsUpsert = true };
|
||||
await _collections.Artifacts
|
||||
.ReplaceOneAsync(x => x.Id == document.Id, document, options, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task UpdateRekorAsync(string artifactId, RekorReference reference, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(artifactId);
|
||||
ArgumentNullException.ThrowIfNull(reference);
|
||||
|
||||
var now = _timeProvider.GetUtcNow().UtcDateTime;
|
||||
var update = Builders<ArtifactDocument>.Update
|
||||
.Set(x => x.Rekor, reference)
|
||||
.Set(x => x.UpdatedAtUtc, now);
|
||||
|
||||
await _collections.Artifacts.UpdateOneAsync(x => x.Id == artifactId, update, cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<long> IncrementRefCountAsync(string artifactId, long delta, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(artifactId);
|
||||
|
||||
var now = _timeProvider.GetUtcNow().UtcDateTime;
|
||||
var update = Builders<ArtifactDocument>.Update
|
||||
.Inc(x => x.RefCount, delta)
|
||||
.Set(x => x.UpdatedAtUtc, now);
|
||||
|
||||
var options = new FindOneAndUpdateOptions<ArtifactDocument, ArtifactDocument>
|
||||
{
|
||||
ReturnDocument = ReturnDocument.After,
|
||||
IsUpsert = false,
|
||||
};
|
||||
|
||||
var result = await _collections.Artifacts
|
||||
.FindOneAndUpdateAsync<ArtifactDocument, ArtifactDocument>(x => x.Id == artifactId, update, options, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
return result?.RefCount ?? 0;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.Scanner.Storage.Catalog;
|
||||
using StellaOps.Scanner.Storage.Mongo;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Repositories;
|
||||
|
||||
public sealed class ImageRepository
|
||||
{
|
||||
private readonly MongoCollectionProvider _collections;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
|
||||
public ImageRepository(MongoCollectionProvider collections, TimeProvider? timeProvider = null)
|
||||
{
|
||||
_collections = collections ?? throw new ArgumentNullException(nameof(collections));
|
||||
_timeProvider = timeProvider ?? TimeProvider.System;
|
||||
}
|
||||
|
||||
public async Task UpsertAsync(ImageDocument document, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(document);
|
||||
document.LastSeenAtUtc = _timeProvider.GetUtcNow().UtcDateTime;
|
||||
var updateOptions = new ReplaceOptions { IsUpsert = true };
|
||||
await _collections.Images
|
||||
.ReplaceOneAsync(x => x.ImageDigest == document.ImageDigest, document, updateOptions, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<ImageDocument?> GetAsync(string imageDigest, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(imageDigest);
|
||||
return await _collections.Images
|
||||
.Find(x => x.ImageDigest == imageDigest)
|
||||
.FirstOrDefaultAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,78 @@
|
||||
using MongoDB.Bson;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.Scanner.Storage.Catalog;
|
||||
using StellaOps.Scanner.Storage.Mongo;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Repositories;
|
||||
|
||||
public sealed class JobRepository
|
||||
{
|
||||
private readonly MongoCollectionProvider _collections;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
|
||||
public JobRepository(MongoCollectionProvider collections, TimeProvider? timeProvider = null)
|
||||
{
|
||||
_collections = collections ?? throw new ArgumentNullException(nameof(collections));
|
||||
_timeProvider = timeProvider ?? TimeProvider.System;
|
||||
}
|
||||
|
||||
public async Task<JobDocument> InsertAsync(JobDocument document, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(document);
|
||||
document.CreatedAtUtc = _timeProvider.GetUtcNow().UtcDateTime;
|
||||
await _collections.Jobs.InsertOneAsync(document, cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
return document;
|
||||
}
|
||||
|
||||
public async Task<bool> TryTransitionAsync(string jobId, JobState expected, JobState next, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(jobId);
|
||||
var now = _timeProvider.GetUtcNow().UtcDateTime;
|
||||
var update = Builders<JobDocument>.Update
|
||||
.Set(x => x.State, next)
|
||||
.Set(x => x.HeartbeatAtUtc, now);
|
||||
|
||||
if (next == JobState.Running)
|
||||
{
|
||||
update = update.Set(x => x.StartedAtUtc, now);
|
||||
}
|
||||
|
||||
if (next is JobState.Succeeded or JobState.Failed or JobState.Cancelled)
|
||||
{
|
||||
update = update.Set(x => x.CompletedAtUtc, now);
|
||||
}
|
||||
|
||||
var result = await _collections.Jobs.UpdateOneAsync(
|
||||
Builders<JobDocument>.Filter.And(
|
||||
Builders<JobDocument>.Filter.Eq(x => x.Id, jobId),
|
||||
Builders<JobDocument>.Filter.Eq(x => x.State, expected)),
|
||||
update,
|
||||
cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
|
||||
return result.ModifiedCount == 1;
|
||||
}
|
||||
|
||||
public async Task<JobDocument?> GetAsync(string jobId, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(jobId);
|
||||
return await _collections.Jobs
|
||||
.Find(x => x.Id == jobId)
|
||||
.FirstOrDefaultAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public Task<List<JobDocument>> ListStaleAsync(TimeSpan heartbeatThreshold, CancellationToken cancellationToken)
|
||||
{
|
||||
if (heartbeatThreshold <= TimeSpan.Zero)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(heartbeatThreshold));
|
||||
}
|
||||
|
||||
var cutoff = _timeProvider.GetUtcNow().UtcDateTime - heartbeatThreshold;
|
||||
var filter = Builders<JobDocument>.Filter.And(
|
||||
Builders<JobDocument>.Filter.Eq(x => x.State, JobState.Running),
|
||||
Builders<JobDocument>.Filter.Lt(x => x.HeartbeatAtUtc, cutoff));
|
||||
|
||||
return _collections.Jobs.Find(filter).ToListAsync(cancellationToken);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.Scanner.Storage.Catalog;
|
||||
using StellaOps.Scanner.Storage.Mongo;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Repositories;
|
||||
|
||||
public sealed class LayerRepository
|
||||
{
|
||||
private readonly MongoCollectionProvider _collections;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
|
||||
public LayerRepository(MongoCollectionProvider collections, TimeProvider? timeProvider = null)
|
||||
{
|
||||
_collections = collections ?? throw new ArgumentNullException(nameof(collections));
|
||||
_timeProvider = timeProvider ?? TimeProvider.System;
|
||||
}
|
||||
|
||||
public async Task UpsertAsync(LayerDocument document, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(document);
|
||||
document.LastSeenAtUtc = _timeProvider.GetUtcNow().UtcDateTime;
|
||||
var options = new ReplaceOptions { IsUpsert = true };
|
||||
await _collections.Layers
|
||||
.ReplaceOneAsync(x => x.LayerDigest == document.LayerDigest, document, options, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<LayerDocument?> GetAsync(string layerDigest, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(layerDigest);
|
||||
return await _collections.Layers
|
||||
.Find(x => x.LayerDigest == layerDigest)
|
||||
.FirstOrDefaultAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.Scanner.Storage.Catalog;
|
||||
using StellaOps.Scanner.Storage.Mongo;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Repositories;
|
||||
|
||||
public sealed class LifecycleRuleRepository
|
||||
{
|
||||
private readonly MongoCollectionProvider _collections;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
|
||||
public LifecycleRuleRepository(MongoCollectionProvider collections, TimeProvider? timeProvider = null)
|
||||
{
|
||||
_collections = collections ?? throw new ArgumentNullException(nameof(collections));
|
||||
_timeProvider = timeProvider ?? TimeProvider.System;
|
||||
}
|
||||
|
||||
public async Task UpsertAsync(LifecycleRuleDocument document, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(document);
|
||||
var now = _timeProvider.GetUtcNow().UtcDateTime;
|
||||
document.CreatedAtUtc = document.CreatedAtUtc == default ? now : document.CreatedAtUtc;
|
||||
var options = new ReplaceOptions { IsUpsert = true };
|
||||
await _collections.LifecycleRules
|
||||
.ReplaceOneAsync(x => x.Id == document.Id, document, options, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public Task<List<LifecycleRuleDocument>> ListExpiredAsync(DateTime utcNow, CancellationToken cancellationToken)
|
||||
{
|
||||
var filter = Builders<LifecycleRuleDocument>.Filter.Lt(x => x.ExpiresAtUtc, utcNow);
|
||||
return _collections.LifecycleRules
|
||||
.Find(filter)
|
||||
.ToListAsync(cancellationToken);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.Scanner.Storage.Catalog;
|
||||
using StellaOps.Scanner.Storage.Mongo;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Repositories;
|
||||
|
||||
public sealed class LinkRepository
|
||||
{
|
||||
private readonly MongoCollectionProvider _collections;
|
||||
|
||||
public LinkRepository(MongoCollectionProvider collections)
|
||||
{
|
||||
_collections = collections ?? throw new ArgumentNullException(nameof(collections));
|
||||
}
|
||||
|
||||
public async Task UpsertAsync(LinkDocument document, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(document);
|
||||
var options = new ReplaceOptions { IsUpsert = true };
|
||||
await _collections.Links
|
||||
.ReplaceOneAsync(x => x.Id == document.Id, document, options, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public Task<List<LinkDocument>> ListBySourceAsync(LinkSourceType type, string digest, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(digest);
|
||||
return _collections.Links
|
||||
.Find(x => x.FromType == type && x.FromDigest == digest)
|
||||
.ToListAsync(cancellationToken);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,132 @@
|
||||
using System.Collections.Generic;
|
||||
using System.Linq;
|
||||
using MongoDB.Driver;
|
||||
using StellaOps.Scanner.Storage.Catalog;
|
||||
using StellaOps.Scanner.Storage.Mongo;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// Repository responsible for persisting runtime events.
|
||||
/// </summary>
|
||||
public sealed class RuntimeEventRepository
|
||||
{
|
||||
private readonly MongoCollectionProvider _collections;
|
||||
|
||||
public RuntimeEventRepository(MongoCollectionProvider collections)
|
||||
{
|
||||
_collections = collections ?? throw new ArgumentNullException(nameof(collections));
|
||||
}
|
||||
|
||||
public async Task<RuntimeEventInsertResult> InsertAsync(
|
||||
IReadOnlyCollection<RuntimeEventDocument> documents,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(documents);
|
||||
if (documents.Count == 0)
|
||||
{
|
||||
return RuntimeEventInsertResult.Empty;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
await _collections.RuntimeEvents.InsertManyAsync(
|
||||
documents,
|
||||
new InsertManyOptions { IsOrdered = false },
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
|
||||
return new RuntimeEventInsertResult(documents.Count, 0);
|
||||
}
|
||||
catch (MongoBulkWriteException<RuntimeEventDocument> ex)
|
||||
{
|
||||
var duplicates = ex.WriteErrors
|
||||
.Count(error => error.Category == ServerErrorCategory.DuplicateKey);
|
||||
var inserted = documents.Count - duplicates;
|
||||
if (inserted < 0)
|
||||
{
|
||||
inserted = 0;
|
||||
}
|
||||
|
||||
return new RuntimeEventInsertResult(inserted, duplicates);
|
||||
}
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyDictionary<string, RuntimeBuildIdObservation>> GetRecentBuildIdsAsync(
|
||||
IReadOnlyCollection<string> imageDigests,
|
||||
int maxPerImage,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(imageDigests);
|
||||
if (imageDigests.Count == 0 || maxPerImage <= 0)
|
||||
{
|
||||
return new Dictionary<string, RuntimeBuildIdObservation>(StringComparer.Ordinal);
|
||||
}
|
||||
|
||||
var normalized = imageDigests
|
||||
.Where(digest => !string.IsNullOrWhiteSpace(digest))
|
||||
.Select(digest => digest.Trim().ToLowerInvariant())
|
||||
.Distinct(StringComparer.Ordinal)
|
||||
.ToArray();
|
||||
|
||||
if (normalized.Length == 0)
|
||||
{
|
||||
return new Dictionary<string, RuntimeBuildIdObservation>(StringComparer.Ordinal);
|
||||
}
|
||||
|
||||
var results = new Dictionary<string, RuntimeBuildIdObservation>(StringComparer.Ordinal);
|
||||
var limit = Math.Max(1, maxPerImage);
|
||||
|
||||
foreach (var digest in normalized)
|
||||
{
|
||||
var filter = Builders<RuntimeEventDocument>.Filter.And(
|
||||
Builders<RuntimeEventDocument>.Filter.Eq(doc => doc.ImageDigest, digest),
|
||||
Builders<RuntimeEventDocument>.Filter.Ne(doc => doc.BuildId, null),
|
||||
Builders<RuntimeEventDocument>.Filter.Ne(doc => doc.BuildId, string.Empty));
|
||||
|
||||
var documents = await _collections.RuntimeEvents
|
||||
.Find(filter)
|
||||
.SortByDescending(doc => doc.When)
|
||||
.Limit(limit * 4)
|
||||
.Project(doc => new { doc.BuildId, doc.When })
|
||||
.ToListAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
if (documents.Count == 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
var buildIds = documents
|
||||
.Select(doc => doc.BuildId)
|
||||
.Where(id => !string.IsNullOrWhiteSpace(id))
|
||||
.Distinct(StringComparer.OrdinalIgnoreCase)
|
||||
.Take(limit)
|
||||
.Select(id => id!.Trim().ToLowerInvariant())
|
||||
.ToArray();
|
||||
|
||||
if (buildIds.Length == 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
var observedAt = documents
|
||||
.Where(doc => !string.IsNullOrWhiteSpace(doc.BuildId))
|
||||
.Select(doc => doc.When)
|
||||
.FirstOrDefault();
|
||||
|
||||
results[digest] = new RuntimeBuildIdObservation(digest, buildIds, observedAt);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
}
|
||||
|
||||
public readonly record struct RuntimeEventInsertResult(int InsertedCount, int DuplicateCount)
|
||||
{
|
||||
public static RuntimeEventInsertResult Empty => new(0, 0);
|
||||
}
|
||||
|
||||
public sealed record RuntimeBuildIdObservation(
|
||||
string ImageDigest,
|
||||
IReadOnlyList<string> BuildIds,
|
||||
DateTime ObservedAtUtc);
|
||||
@@ -0,0 +1,35 @@
|
||||
namespace StellaOps.Scanner.Storage;
|
||||
|
||||
public static class ScannerStorageDefaults
|
||||
{
|
||||
public const string DefaultDatabaseName = "scanner";
|
||||
public const string DefaultBucketName = "stellaops";
|
||||
public const string DefaultRootPrefix = "scanner";
|
||||
|
||||
public static class ObjectStoreProviders
|
||||
{
|
||||
public const string S3 = "s3";
|
||||
public const string Minio = "minio";
|
||||
public const string RustFs = "rustfs";
|
||||
}
|
||||
|
||||
public static class Collections
|
||||
{
|
||||
public const string Artifacts = "artifacts";
|
||||
public const string Images = "images";
|
||||
public const string Layers = "layers";
|
||||
public const string Links = "links";
|
||||
public const string Jobs = "jobs";
|
||||
public const string LifecycleRules = "lifecycle_rules";
|
||||
public const string RuntimeEvents = "runtime.events";
|
||||
public const string Migrations = "schema_migrations";
|
||||
}
|
||||
|
||||
public static class ObjectPrefixes
|
||||
{
|
||||
public const string Layers = "layers";
|
||||
public const string Images = "images";
|
||||
public const string Indexes = "indexes";
|
||||
public const string Attestations = "attest";
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,204 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using MongoDB.Driver;
|
||||
|
||||
namespace StellaOps.Scanner.Storage;
|
||||
|
||||
public sealed class ScannerStorageOptions
|
||||
{
|
||||
public MongoOptions Mongo { get; set; } = new();
|
||||
|
||||
public ObjectStoreOptions ObjectStore { get; set; } = new();
|
||||
|
||||
public DualWriteOptions DualWrite { get; set; } = new();
|
||||
|
||||
public void EnsureValid()
|
||||
{
|
||||
Mongo.EnsureValid();
|
||||
ObjectStore.EnsureValid();
|
||||
DualWrite.EnsureValid();
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class MongoOptions
|
||||
{
|
||||
public string ConnectionString { get; set; } = string.Empty;
|
||||
|
||||
public string? DatabaseName { get; set; }
|
||||
= null;
|
||||
|
||||
public TimeSpan CommandTimeout { get; set; }
|
||||
= TimeSpan.FromSeconds(30);
|
||||
|
||||
public bool UseMajorityReadConcern { get; set; }
|
||||
= true;
|
||||
|
||||
public bool UseMajorityWriteConcern { get; set; }
|
||||
= true;
|
||||
|
||||
public string ResolveDatabaseName()
|
||||
{
|
||||
if (!string.IsNullOrWhiteSpace(DatabaseName))
|
||||
{
|
||||
return DatabaseName.Trim();
|
||||
}
|
||||
|
||||
if (!string.IsNullOrWhiteSpace(ConnectionString))
|
||||
{
|
||||
var url = MongoUrl.Create(ConnectionString);
|
||||
if (!string.IsNullOrWhiteSpace(url.DatabaseName))
|
||||
{
|
||||
return url.DatabaseName;
|
||||
}
|
||||
}
|
||||
|
||||
return ScannerStorageDefaults.DefaultDatabaseName;
|
||||
}
|
||||
|
||||
public void EnsureValid()
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(ConnectionString))
|
||||
{
|
||||
throw new InvalidOperationException("Scanner storage Mongo connection string is not configured.");
|
||||
}
|
||||
|
||||
if (CommandTimeout <= TimeSpan.Zero)
|
||||
{
|
||||
throw new InvalidOperationException("Scanner storage Mongo command timeout must be positive.");
|
||||
}
|
||||
|
||||
_ = ResolveDatabaseName();
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class ObjectStoreOptions
|
||||
{
|
||||
private static readonly HashSet<string> S3Drivers = new(StringComparer.OrdinalIgnoreCase)
|
||||
{
|
||||
ScannerStorageDefaults.ObjectStoreProviders.S3,
|
||||
ScannerStorageDefaults.ObjectStoreProviders.Minio,
|
||||
};
|
||||
|
||||
private static readonly HashSet<string> RustFsDrivers = new(StringComparer.OrdinalIgnoreCase)
|
||||
{
|
||||
ScannerStorageDefaults.ObjectStoreProviders.RustFs,
|
||||
};
|
||||
|
||||
public string Driver { get; set; } = ScannerStorageDefaults.ObjectStoreProviders.S3;
|
||||
|
||||
public string Region { get; set; } = "us-east-1";
|
||||
|
||||
public string? ServiceUrl { get; set; }
|
||||
= null;
|
||||
|
||||
public string BucketName { get; set; } = ScannerStorageDefaults.DefaultBucketName;
|
||||
|
||||
public string RootPrefix { get; set; } = ScannerStorageDefaults.DefaultRootPrefix;
|
||||
|
||||
public bool ForcePathStyle { get; set; } = true;
|
||||
|
||||
public bool EnableObjectLock { get; set; } = false;
|
||||
|
||||
public TimeSpan? ComplianceRetention { get; set; }
|
||||
= TimeSpan.FromDays(90);
|
||||
|
||||
public IDictionary<string, string> Headers { get; } = new Dictionary<string, string>(StringComparer.OrdinalIgnoreCase);
|
||||
|
||||
public RustFsOptions RustFs { get; set; } = new();
|
||||
|
||||
public bool IsS3Driver()
|
||||
=> S3Drivers.Contains(Driver);
|
||||
|
||||
public bool IsRustFsDriver()
|
||||
=> RustFsDrivers.Contains(Driver);
|
||||
|
||||
public void EnsureValid()
|
||||
{
|
||||
if (!IsS3Driver() && !IsRustFsDriver())
|
||||
{
|
||||
throw new InvalidOperationException($"Scanner storage object store driver '{Driver}' is not supported.");
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(BucketName))
|
||||
{
|
||||
throw new InvalidOperationException("Scanner storage bucket name cannot be empty.");
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(RootPrefix))
|
||||
{
|
||||
throw new InvalidOperationException("Scanner storage root prefix cannot be empty.");
|
||||
}
|
||||
|
||||
if (ComplianceRetention is { } retention && retention <= TimeSpan.Zero)
|
||||
{
|
||||
throw new InvalidOperationException("Compliance retention must be positive when specified.");
|
||||
}
|
||||
|
||||
if (IsRustFsDriver())
|
||||
{
|
||||
RustFs ??= new RustFsOptions();
|
||||
RustFs.EnsureValid();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class RustFsOptions
|
||||
{
|
||||
public string BaseUrl { get; set; } = string.Empty;
|
||||
|
||||
public bool AllowInsecureTls { get; set; }
|
||||
= false;
|
||||
|
||||
public string? ApiKey { get; set; }
|
||||
= null;
|
||||
|
||||
public string ApiKeyHeader { get; set; } = string.Empty;
|
||||
|
||||
public TimeSpan Timeout { get; set; } = TimeSpan.FromSeconds(60);
|
||||
|
||||
public void EnsureValid()
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(BaseUrl))
|
||||
{
|
||||
throw new InvalidOperationException("RustFS baseUrl must be configured.");
|
||||
}
|
||||
|
||||
if (!Uri.TryCreate(BaseUrl, UriKind.Absolute, out var uri))
|
||||
{
|
||||
throw new InvalidOperationException("RustFS baseUrl must be an absolute URI.");
|
||||
}
|
||||
|
||||
if (!string.Equals(uri.Scheme, Uri.UriSchemeHttp, StringComparison.OrdinalIgnoreCase)
|
||||
&& !string.Equals(uri.Scheme, Uri.UriSchemeHttps, StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
throw new InvalidOperationException("RustFS baseUrl must use HTTP or HTTPS.");
|
||||
}
|
||||
|
||||
if (Timeout <= TimeSpan.Zero)
|
||||
{
|
||||
throw new InvalidOperationException("RustFS timeout must be greater than zero.");
|
||||
}
|
||||
|
||||
if (!string.IsNullOrWhiteSpace(ApiKeyHeader) && string.IsNullOrWhiteSpace(ApiKey))
|
||||
{
|
||||
throw new InvalidOperationException("RustFS API key header name requires a non-empty API key.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class DualWriteOptions
|
||||
{
|
||||
public bool Enabled { get; set; }
|
||||
= false;
|
||||
|
||||
public string? MirrorBucket { get; set; }
|
||||
= null;
|
||||
|
||||
public void EnsureValid()
|
||||
{
|
||||
if (Enabled && string.IsNullOrWhiteSpace(MirrorBucket))
|
||||
{
|
||||
throw new InvalidOperationException("Dual-write mirror bucket must be configured when enabled.");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,181 @@
|
||||
using System.Buffers;
|
||||
using System.Security.Cryptography;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Scanner.Storage.Catalog;
|
||||
using StellaOps.Scanner.Storage.ObjectStore;
|
||||
using StellaOps.Scanner.Storage.Repositories;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Services;
|
||||
|
||||
public sealed class ArtifactStorageService
|
||||
{
|
||||
private readonly ArtifactRepository _artifactRepository;
|
||||
private readonly LifecycleRuleRepository _lifecycleRuleRepository;
|
||||
private readonly IArtifactObjectStore _objectStore;
|
||||
private readonly ScannerStorageOptions _options;
|
||||
private readonly ILogger<ArtifactStorageService> _logger;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
|
||||
public ArtifactStorageService(
|
||||
ArtifactRepository artifactRepository,
|
||||
LifecycleRuleRepository lifecycleRuleRepository,
|
||||
IArtifactObjectStore objectStore,
|
||||
IOptions<ScannerStorageOptions> options,
|
||||
ILogger<ArtifactStorageService> logger,
|
||||
TimeProvider? timeProvider = null)
|
||||
{
|
||||
_artifactRepository = artifactRepository ?? throw new ArgumentNullException(nameof(artifactRepository));
|
||||
_lifecycleRuleRepository = lifecycleRuleRepository ?? throw new ArgumentNullException(nameof(lifecycleRuleRepository));
|
||||
_objectStore = objectStore ?? throw new ArgumentNullException(nameof(objectStore));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
_options = (options ?? throw new ArgumentNullException(nameof(options))).Value;
|
||||
_timeProvider = timeProvider ?? TimeProvider.System;
|
||||
}
|
||||
|
||||
public async Task<ArtifactDocument> StoreArtifactAsync(
|
||||
ArtifactDocumentType type,
|
||||
ArtifactDocumentFormat format,
|
||||
string mediaType,
|
||||
Stream content,
|
||||
bool immutable,
|
||||
string ttlClass,
|
||||
DateTime? expiresAtUtc,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(content);
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(mediaType);
|
||||
|
||||
var (buffer, size, digestHex) = await BufferAndHashAsync(content, cancellationToken).ConfigureAwait(false);
|
||||
try
|
||||
{
|
||||
var normalizedDigest = $"sha256:{digestHex}";
|
||||
var artifactId = CatalogIdFactory.CreateArtifactId(type, normalizedDigest);
|
||||
var key = BuildObjectKey(type, format, normalizedDigest);
|
||||
var descriptor = new ArtifactObjectDescriptor(
|
||||
_options.ObjectStore.BucketName,
|
||||
key,
|
||||
immutable,
|
||||
_options.ObjectStore.ComplianceRetention);
|
||||
|
||||
buffer.Position = 0;
|
||||
await _objectStore.PutAsync(descriptor, buffer, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
if (_options.DualWrite.Enabled)
|
||||
{
|
||||
buffer.Position = 0;
|
||||
var mirrorDescriptor = descriptor with { Bucket = _options.DualWrite.MirrorBucket! };
|
||||
await _objectStore.PutAsync(mirrorDescriptor, buffer, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
var now = _timeProvider.GetUtcNow().UtcDateTime;
|
||||
var document = new ArtifactDocument
|
||||
{
|
||||
Id = artifactId,
|
||||
Type = type,
|
||||
Format = format,
|
||||
MediaType = mediaType,
|
||||
BytesSha256 = normalizedDigest,
|
||||
SizeBytes = size,
|
||||
Immutable = immutable,
|
||||
RefCount = 1,
|
||||
CreatedAtUtc = now,
|
||||
UpdatedAtUtc = now,
|
||||
TtlClass = ttlClass,
|
||||
};
|
||||
|
||||
await _artifactRepository.UpsertAsync(document, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
if (expiresAtUtc.HasValue)
|
||||
{
|
||||
var lifecycle = new LifecycleRuleDocument
|
||||
{
|
||||
Id = CatalogIdFactory.CreateLifecycleRuleId(document.Id, ttlClass),
|
||||
ArtifactId = document.Id,
|
||||
Class = ttlClass,
|
||||
ExpiresAtUtc = expiresAtUtc,
|
||||
CreatedAtUtc = now,
|
||||
};
|
||||
|
||||
await _lifecycleRuleRepository.UpsertAsync(lifecycle, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
_logger.LogInformation("Stored scanner artifact {ArtifactId} ({SizeBytes} bytes, digest {Digest})", document.Id, size, normalizedDigest);
|
||||
return document;
|
||||
}
|
||||
finally
|
||||
{
|
||||
await buffer.DisposeAsync().ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
|
||||
private static async Task<(MemoryStream Buffer, long Size, string DigestHex)> BufferAndHashAsync(Stream content, CancellationToken cancellationToken)
|
||||
{
|
||||
var bufferStream = new MemoryStream();
|
||||
var hasher = IncrementalHash.CreateHash(HashAlgorithmName.SHA256);
|
||||
var rented = ArrayPool<byte>.Shared.Rent(81920);
|
||||
long total = 0;
|
||||
|
||||
try
|
||||
{
|
||||
int read;
|
||||
while ((read = await content.ReadAsync(rented.AsMemory(0, rented.Length), cancellationToken).ConfigureAwait(false)) > 0)
|
||||
{
|
||||
hasher.AppendData(rented, 0, read);
|
||||
await bufferStream.WriteAsync(rented.AsMemory(0, read), cancellationToken).ConfigureAwait(false);
|
||||
total += read;
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(rented);
|
||||
}
|
||||
|
||||
bufferStream.Position = 0;
|
||||
var digest = hasher.GetCurrentHash();
|
||||
var digestHex = Convert.ToHexString(digest).ToLowerInvariant();
|
||||
return (bufferStream, total, digestHex);
|
||||
}
|
||||
|
||||
private string BuildObjectKey(ArtifactDocumentType type, ArtifactDocumentFormat format, string digest)
|
||||
{
|
||||
var normalizedDigest = digest.Split(':', 2, StringSplitOptions.TrimEntries)[^1];
|
||||
var prefix = type switch
|
||||
{
|
||||
ArtifactDocumentType.LayerBom => ScannerStorageDefaults.ObjectPrefixes.Layers,
|
||||
ArtifactDocumentType.ImageBom => ScannerStorageDefaults.ObjectPrefixes.Images,
|
||||
ArtifactDocumentType.Diff => "diffs",
|
||||
ArtifactDocumentType.Index => ScannerStorageDefaults.ObjectPrefixes.Indexes,
|
||||
ArtifactDocumentType.Attestation => ScannerStorageDefaults.ObjectPrefixes.Attestations,
|
||||
_ => ScannerStorageDefaults.ObjectPrefixes.Images,
|
||||
};
|
||||
|
||||
var extension = format switch
|
||||
{
|
||||
ArtifactDocumentFormat.CycloneDxJson => "sbom.cdx.json",
|
||||
ArtifactDocumentFormat.CycloneDxProtobuf => "sbom.cdx.pb",
|
||||
ArtifactDocumentFormat.SpdxJson => "sbom.spdx.json",
|
||||
ArtifactDocumentFormat.BomIndex => "bom-index.bin",
|
||||
ArtifactDocumentFormat.DsseJson => "artifact.dsse.json",
|
||||
_ => "artifact.bin",
|
||||
};
|
||||
|
||||
var rootPrefix = _options.ObjectStore.RootPrefix;
|
||||
if (string.IsNullOrWhiteSpace(rootPrefix))
|
||||
{
|
||||
return $"{prefix}/{normalizedDigest}/{extension}";
|
||||
}
|
||||
|
||||
return $"{TrimTrailingSlash(rootPrefix)}/{prefix}/{normalizedDigest}/{extension}";
|
||||
}
|
||||
|
||||
private static string TrimTrailingSlash(string prefix)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(prefix))
|
||||
{
|
||||
return string.Empty;
|
||||
}
|
||||
|
||||
return prefix.TrimEnd('/');
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
<PropertyGroup>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<LangVersion>preview</LangVersion>
|
||||
<Nullable>enable</Nullable>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
|
||||
</PropertyGroup>
|
||||
<ItemGroup>
|
||||
<PackageReference Include="MongoDB.Driver" Version="3.5.0" />
|
||||
<PackageReference Include="AWSSDK.S3" Version="3.7.305.6" />
|
||||
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0-rc.2.25502.107" />
|
||||
<PackageReference Include="Microsoft.Extensions.Options" Version="10.0.0-rc.2.25502.107" />
|
||||
<PackageReference Include="Microsoft.Extensions.Configuration.Abstractions" Version="10.0.0-rc.2.25502.107" />
|
||||
<PackageReference Include="Microsoft.Extensions.Options.ConfigurationExtensions" Version="10.0.0-rc.2.25502.107" />
|
||||
<PackageReference Include="Microsoft.Extensions.Http" Version="10.0.0-rc.2.25502.107" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
@@ -0,0 +1,9 @@
|
||||
# Scanner Storage Task Board
|
||||
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| SCANNER-STORAGE-09-301 | DONE (2025-10-18) | Scanner Storage Guild | SCANNER-CORE-09-501 | Mongo catalog schemas/indexes for images, layers, artifacts, jobs, lifecycle rules plus migrations. | Collections created via bootstrapper; migrations recorded; indexes enforce uniqueness + TTL; majority read/write configured. |
|
||||
| SCANNER-STORAGE-09-302 | DONE (2025-10-18) | Scanner Storage Guild | SCANNER-STORAGE-09-301 | MinIO layout, immutability policies, client abstraction, and configuration binding. | S3 client abstraction configurable via options; bucket/prefix defaults documented; immutability flags enforced with tests; config binding validated. |
|
||||
| SCANNER-STORAGE-09-303 | DONE (2025-10-18) | Scanner Storage Guild | SCANNER-STORAGE-09-301, SCANNER-STORAGE-09-302 | Repositories/services with dual-write feature flag, deterministic digests, TTL enforcement tests. | Dual-write service writes metadata + objects atomically; digest determinism covered by tests; TTL enforcement fixture passing. |
|
||||
| SCANNER-STORAGE-09-304 | DONE (2025-10-19) | Scanner Storage Guild | SCANNER-STORAGE-09-303 | Adopt `TimeProvider` across storage timestamps for determinism. | Storage services/repositories use injected `TimeProvider`; tests cover timestamp determinism. |
|
||||
| SCANNER-STORAGE-11-401 | DONE (2025-10-23) | Scanner Storage Guild | SCANNER-STORAGE-09-302 | Replace MinIO artifact store with RustFS driver, including migration tooling and configuration updates. | RustFS provider registered across Worker/WebService; data migration plan/tooling validated on staging; Helm/offline kit configs updated; regression tests cover RustFS paths with deterministic results. |
|
||||
Reference in New Issue
Block a user