Repair live canonical migrations and scanner cache bootstrap

This commit is contained in:
master
2026-03-09 21:56:41 +02:00
parent 00bf2fa99a
commit dfd22281ed
21 changed files with 1018 additions and 12 deletions

View File

@@ -102,6 +102,7 @@ volumes:
registry-data:
concelier-jobs:
scanner-surface-cache:
scanner-cache-data:
console-dist:
advisory-ai-queue:
advisory-ai-plans:
@@ -646,11 +647,31 @@ services:
labels: *release-labels
# --- Slot 8: Scanner -------------------------------------------------------
scanner-cache-init:
image: stellaops/scanner-worker:dev
container_name: stellaops-scanner-cache-init
restart: "no"
user: "0:0"
entrypoint:
- /bin/sh
- -lc
- |
mkdir -p /var/lib/stellaops/cache/scanner/layers /var/lib/stellaops/cache/scanner/cas &&
chown -R 10001:10001 /var/lib/stellaops/cache/scanner &&
chmod -R u+rwX,go-rwx /var/lib/stellaops/cache/scanner
volumes:
- scanner-cache-data:/var/lib/stellaops/cache/scanner
networks:
stellaops: {}
labels: *release-labels
scanner-web:
image: stellaops/scanner-web:dev
container_name: stellaops-scanner-web
restart: unless-stopped
depends_on:
scanner-cache-init:
condition: service_completed_successfully
postgres:
condition: service_healthy
valkey:
@@ -677,6 +698,7 @@ services:
SCANNER_SCANNER__EVENTS__STREAM: "${SCANNER_EVENTS_STREAM:-stella.events}"
SCANNER_SCANNER__EVENTS__PUBLISHTIMEOUTSECONDS: "${SCANNER_EVENTS_PUBLISH_TIMEOUT_SECONDS:-5}"
SCANNER_SCANNER__EVENTS__MAXSTREAMLENGTH: "${SCANNER_EVENTS_MAX_STREAM_LENGTH:-10000}"
SCANNER_SCANNER__CACHE__ROOTPATH: "/var/lib/stellaops/cache/scanner"
SCANNER_SCANNER__OFFLINEKIT__ENABLED: "${SCANNER_OFFLINEKIT_ENABLED:-false}"
SCANNER_SURFACE_FS_ENDPOINT: "${SCANNER_SURFACE_FS_ENDPOINT:-http://s3.stella-ops.local:8333}"
SCANNER_SURFACE_FS_BUCKET: "${SCANNER_SURFACE_FS_BUCKET:-surface-cache}"
@@ -699,6 +721,7 @@ services:
- ${SCANNER_OFFLINEKIT_TRUSTROOTS_HOST_PATH:-./offline/trust-roots}:${SCANNER_OFFLINEKIT_TRUSTROOTDIRECTORY:-/etc/stellaops/trust-roots}:ro
- ${SCANNER_OFFLINEKIT_REKOR_SNAPSHOT_HOST_PATH:-./offline/rekor-snapshot}:${SCANNER_OFFLINEKIT_REKORSNAPSHOTDIRECTORY:-/var/lib/stellaops/rekor-snapshot}:ro
- *cert-volume
- scanner-cache-data:/var/lib/stellaops/cache/scanner
tmpfs:
- /var/lib/stellaops/surface:mode=1777
ports:
@@ -718,6 +741,8 @@ services:
container_name: stellaops-scanner-worker
restart: unless-stopped
depends_on:
scanner-cache-init:
condition: service_completed_successfully
postgres:
condition: service_healthy
valkey:
@@ -742,10 +767,12 @@ services:
SCANNER_SURFACE_SECRETS_PROVIDER: "file"
SCANNER_SURFACE_SECRETS_ROOT: "/var/lib/stellaops/surface"
SCANNER_SURFACE_VALIDATION_DISABLED: "true"
SCANNER_SCANNER__CACHE__ROOTPATH: "/var/lib/stellaops/cache/scanner"
# EPSS bundle source path (EpssBundleSource constructor)
EPSS_BUNDLE_PATH: "/app/epss"
volumes:
- *cert-volume
- scanner-cache-data:/var/lib/stellaops/cache/scanner
tmpfs:
- /var/lib/stellaops/surface:mode=1777
- /app/epss:mode=1777
@@ -2268,7 +2295,7 @@ services:
ADVISORYAI__KnowledgeSearch__VexAdapterEnabled: "true"
ADVISORYAI__KnowledgeSearch__VexAdapterBaseUrl: "http://concelier.stella-ops.local"
ADVISORYAI__KnowledgeSearch__PolicyAdapterEnabled: "true"
ADVISORYAI__KnowledgeSearch__PolicyAdapterBaseUrl: "http://policy-gateway.stella-ops.local"
ADVISORYAI__KnowledgeSearch__PolicyAdapterBaseUrl: "http://policy-gateway.stella-ops.local"
volumes:
- *cert-volume
networks:

View File

@@ -28,7 +28,7 @@ The scanner cache stores layer-level SBOM fragments and file content that can be
| `enableFileCas` | `true` | Disable to prevent CAS usage (APIs throw on `PutAsync`). |
| `importDirectory` / `exportDirectory` | `null` | Optional defaults for offline import/export tooling. |
> **Tip:** configure `scanner:cache:rootPath` to a dedicated volume and mount it into worker containers when running in Kubernetes or Nomad.
> **Tip:** configure `scanner:cache:rootPath` to a dedicated writable volume and mount it into worker containers when running in Kubernetes or Nomad. Hardened runtime images should not point the cache at `/app/**`; use a writable path such as `/var/lib/stellaops/cache/scanner`. On Docker Compose, bootstrap ownership before the non-root scanner containers start, for example with the `scanner-cache-init` one-shot service used by the local Stella Ops stack.
## 3. Metrics

View File

@@ -0,0 +1,74 @@
# Sprint 20260309_014 - Live Runtime Fault Repair
## Topic & Scope
- Repair live backend/runtime faults uncovered after the full 60-image rebuild and fresh `stella-ops.local` redeploy.
- Keep the rebuilt stack client-ready underneath the clean UI shell by fixing background workers, runtime contracts, and hardened-container assumptions instead of hiding errors behind empty states.
- Working directory: `src/Platform/**`.
- Cross-module edits allowed for this sprint: `src/JobEngine/**`, `src/Concelier/**`, `src/Scanner/**`, `devops/compose/**`, and linked docs in `docs/**`.
- Expected evidence: targeted `.csproj` test runs, live API verification, live Playwright rechecks on impacted routes, and runtime log validation after redeploy.
## Dependencies & Concurrency
- Depends on the scratch rebuild baseline and live search runtime repair from `SPRINT_20260309_013_AdvisoryAI_live_unified_search_corpus_runtime_repair.md`.
- Safe parallelism: avoid unrelated web/search feature edits already in flight from other agents; stage only the runtime-fault hunks touched here.
## Documentation Prerequisites
- `docs/modules/platform/architecture-overview.md`
- `docs/modules/jobengine/architecture.md`
- `docs/modules/concelier/architecture.md`
- `docs/modules/scanner/architecture.md`
- `docs/code-of-conduct/CODE_OF_CONDUCT.md`
- `docs/code-of-conduct/TESTING_PRACTICES.md`
## Delivery Tracker
### TASK-014-001 - Diagnose live runtime failures from rebuilt stack
Status: DONE
Dependency: none
Owners: QA, Developer
Task description:
- Rebuild all services, redeploy the compose stack, then inspect live route behavior and backend logs to identify runtime faults that survive basic page rendering.
Completion criteria:
- [x] Full image matrix rebuild completed.
- [x] Fresh compose recreate completed.
- [x] Live evidence captured for runtime faults and impacted routes.
### TASK-014-002 - Repair scheduler and analytics runtime contract faults
Status: DONE
Dependency: TASK-014-001
Owners: Developer
Task description:
- Fix PostgreSQL type/function mismatches causing scheduler planner loops and platform analytics maintenance to fail after startup.
Completion criteria:
- [x] Scheduler planner queries no longer emit `run_state = text` errors.
- [x] Platform analytics maintenance invokes `analytics.compute_daily_rollups` with the correct PostgreSQL parameter type.
- [x] Focused tests prove the repaired contracts.
### TASK-014-003 - Repair canonical advisory DI and scanner cache runtime assumptions
Status: DONE
Dependency: TASK-014-001
Owners: Developer
Task description:
- Restore Concelier canonical advisory service registration under the live WebService and align scanner cache paths with writable hardened-container storage so maintenance jobs stop failing after deploy.
Completion criteria:
- [x] `/api/v1/canonical` resolves through registered services without runtime DI failure.
- [x] Scanner cache maintenance no longer writes into read-only `/app` paths in live containers.
- [x] Focused tests and live verification cover the repaired contracts.
## Execution Log
| Date (UTC) | Update | Owner |
| --- | --- | --- |
| 2026-03-09 | Sprint created after full rebuild/redeploy exposed live runtime faults in scheduler planning, platform analytics maintenance, Concelier canonical DI, and scanner cache maintenance. | Codex |
| 2026-03-09 | Repaired scheduler enum/query typing and platform analytics date binding; focused `.csproj` verification passed and post-redeploy logs stopped emitting the runtime faults. | Codex |
| 2026-03-09 | Added Concelier startup migration registration, fixed Scanner worker env-prefix bootstrap, and introduced compose cache ownership bootstrap; focused tests passed, `/api/v1/canonical` returned `200`, cache paths resolved to `/var/lib/stellaops/cache/scanner`, and live Playwright rechecks passed (`111/111` routes, changed-surfaces pass). | Codex |
## Decisions & Risks
- This sprint intentionally treats background worker failures as product defects even when the frontdoor UI still renders. A clean route sweep is insufficient if the live services are erroring underneath.
- Cross-module edits are permitted because the faults span runtime contracts across Platform, JobEngine, Concelier, Scanner, and compose deployment wiring.
- Microsoft Testing Platform projects in this sprint require `dotnet test <project>.csproj -- --filter-class ...`; `--filter` against the project silently ran whole suites and was rejected as verification evidence.
- Hardened Scanner containers need both a writable cache root and ownership bootstrap. The compose stack now uses `scanner-cache-init` to prepare the named volume for the non-root runtime user.
## Next Checkpoints
- Targeted repair commit once runtime faults are fixed, revalidated live, and staged without unrelated agent changes.

View File

@@ -25,6 +25,7 @@ using StellaOps.Auth.ServerIntegration;
using StellaOps.Auth.ServerIntegration.Tenancy;
using StellaOps.Concelier.Core.Aoc;
using StellaOps.Concelier.Core.Attestation;
using StellaOps.Concelier.Core.Canonical;
using StellaOps.Concelier.Core.Diagnostics;
using StellaOps.Concelier.Core.Events;
using StellaOps.Concelier.Core.Federation;
@@ -498,6 +499,7 @@ builder.Services.AddConcelierPostgresStorage(pgOptions =>
pgOptions.AutoMigrate = postgresOptions.AutoMigrate;
pgOptions.MigrationsPath = postgresOptions.MigrationsPath;
});
builder.Services.AddScoped<ICanonicalAdvisoryService, CanonicalAdvisoryService>();
// Register in-memory lease store (single-instance dev mode).
builder.Services.AddSingleton<StellaOps.Concelier.Core.Jobs.ILeaseStore, StellaOps.Concelier.Core.Jobs.InMemoryLeaseStore>();

View File

@@ -11,6 +11,7 @@ using StellaOps.Concelier.Merge.Backport;
using StellaOps.Concelier.Persistence.Postgres;
using StellaOps.Concelier.Persistence.Postgres.Advisories;
using StellaOps.Concelier.Persistence.Postgres.Repositories;
using StellaOps.Infrastructure.Postgres.Migrations;
using StellaOps.Infrastructure.Postgres.Options;
using StorageContracts = StellaOps.Concelier.Storage;
@@ -35,6 +36,10 @@ public static class ConcelierPersistenceExtensions
{
services.Configure<PostgresOptions>(sectionName, configuration.GetSection(sectionName));
services.AddSingleton<ConcelierDataSource>();
services.AddStartupMigrations(
ConcelierDataSource.DefaultSchemaName,
"Concelier.Storage",
typeof(ConcelierDataSource).Assembly);
// Register repositories
services.AddScoped<IAdvisoryRepository, AdvisoryRepository>();
@@ -83,6 +88,10 @@ public static class ConcelierPersistenceExtensions
{
services.Configure(configureOptions);
services.AddSingleton<ConcelierDataSource>();
services.AddStartupMigrations(
ConcelierDataSource.DefaultSchemaName,
"Concelier.Storage",
typeof(ConcelierDataSource).Assembly);
// Register repositories
services.AddScoped<IAdvisoryRepository, AdvisoryRepository>();

View File

@@ -0,0 +1,365 @@
using System.Text.Json;
using StellaOps.Concelier.Core.Canonical;
using StellaOps.Concelier.Merge.Backport;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Concelier.Persistence.Postgres.Repositories;
using MergeHashInput = StellaOps.Concelier.Merge.Identity.MergeHashInput;
namespace StellaOps.Concelier.Persistence.Postgres;
public sealed class MergeHashCalculatorAdapter : IMergeHashCalculator
{
private readonly StellaOps.Concelier.Merge.Identity.IMergeHashCalculator inner;
public MergeHashCalculatorAdapter(StellaOps.Concelier.Merge.Identity.IMergeHashCalculator inner)
{
this.inner = inner ?? throw new ArgumentNullException(nameof(inner));
}
public string ComputeMergeHash(StellaOps.Concelier.Core.Canonical.MergeHashInput input)
{
ArgumentNullException.ThrowIfNull(input);
return inner.ComputeMergeHash(new MergeHashInput
{
Cve = input.Cve,
AffectsKey = input.AffectsKey,
VersionRange = input.VersionRange,
Weaknesses = input.Weaknesses,
PatchLineage = input.PatchLineage
});
}
}
public sealed class PostgresCanonicalAdvisoryStore : ICanonicalAdvisoryStore
{
private static readonly JsonSerializerOptions JsonOptions = new(JsonSerializerDefaults.Web);
private readonly IAdvisoryCanonicalRepository advisoryRepository;
private readonly ISourceRepository sourceRepository;
private readonly IProvenanceScopeStore provenanceScopeStore;
public PostgresCanonicalAdvisoryStore(
IAdvisoryCanonicalRepository advisoryRepository,
ISourceRepository sourceRepository,
IProvenanceScopeStore provenanceScopeStore)
{
this.advisoryRepository = advisoryRepository ?? throw new ArgumentNullException(nameof(advisoryRepository));
this.sourceRepository = sourceRepository ?? throw new ArgumentNullException(nameof(sourceRepository));
this.provenanceScopeStore = provenanceScopeStore ?? throw new ArgumentNullException(nameof(provenanceScopeStore));
}
public async Task<CanonicalAdvisory?> GetByIdAsync(Guid id, CancellationToken ct = default)
{
var entity = await advisoryRepository.GetByIdAsync(id, ct).ConfigureAwait(false);
return entity is null ? null : await MapCanonicalAsync(entity, ct).ConfigureAwait(false);
}
public async Task<CanonicalAdvisory?> GetByMergeHashAsync(string mergeHash, CancellationToken ct = default)
{
var entity = await advisoryRepository.GetByMergeHashAsync(mergeHash, ct).ConfigureAwait(false);
return entity is null ? null : await MapCanonicalAsync(entity, ct).ConfigureAwait(false);
}
public async Task<IReadOnlyList<CanonicalAdvisory>> GetByCveAsync(string cve, CancellationToken ct = default)
{
var entities = await advisoryRepository.GetByCveAsync(cve, ct).ConfigureAwait(false);
return await MapCanonicalsAsync(entities, ct).ConfigureAwait(false);
}
public async Task<IReadOnlyList<CanonicalAdvisory>> GetByArtifactAsync(string artifactKey, CancellationToken ct = default)
{
var entities = await advisoryRepository.GetByAffectsKeyAsync(artifactKey, ct).ConfigureAwait(false);
return await MapCanonicalsAsync(entities, ct).ConfigureAwait(false);
}
public async Task<PagedResult<CanonicalAdvisory>> QueryAsync(CanonicalQueryOptions options, CancellationToken ct = default)
{
var result = await advisoryRepository.QueryAsync(options, ct).ConfigureAwait(false);
var items = await MapCanonicalsAsync(result.Items, ct).ConfigureAwait(false);
return new PagedResult<CanonicalAdvisory>
{
Items = items,
TotalCount = result.TotalCount,
Offset = result.Offset,
Limit = result.Limit
};
}
public async Task<Guid> UpsertCanonicalAsync(UpsertCanonicalRequest request, CancellationToken ct = default)
{
ArgumentNullException.ThrowIfNull(request);
var entity = new AdvisoryCanonicalEntity
{
Id = Guid.Empty,
Cve = request.Cve,
AffectsKey = request.AffectsKey,
VersionRange = request.VersionRangeJson,
Weakness = request.Weaknesses.ToArray(),
MergeHash = request.MergeHash,
Status = CanonicalStatus.Active.ToString().ToLowerInvariant(),
Severity = request.Severity,
EpssScore = request.EpssScore,
ExploitKnown = request.ExploitKnown,
Title = request.Title,
Summary = request.Summary
};
return await advisoryRepository.UpsertAsync(entity, ct).ConfigureAwait(false);
}
public Task UpdateStatusAsync(Guid id, CanonicalStatus status, CancellationToken ct = default)
=> advisoryRepository.UpdateStatusAsync(id, status.ToString().ToLowerInvariant(), ct);
public Task<long> CountAsync(CancellationToken ct = default)
=> advisoryRepository.CountAsync(ct);
public async Task<SourceEdgeResult> AddSourceEdgeAsync(AddSourceEdgeRequest request, CancellationToken ct = default)
{
ArgumentNullException.ThrowIfNull(request);
var existed = await SourceEdgeExistsAsync(request.CanonicalId, request.SourceId, request.SourceDocHash, ct)
.ConfigureAwait(false);
var edgeId = await advisoryRepository.AddSourceEdgeAsync(
new AdvisorySourceEdgeEntity
{
Id = Guid.Empty,
CanonicalId = request.CanonicalId,
SourceId = request.SourceId,
SourceAdvisoryId = request.SourceAdvisoryId,
SourceDocHash = request.SourceDocHash,
VendorStatus = request.VendorStatus?.ToString().ToLowerInvariant(),
PrecedenceRank = request.PrecedenceRank,
DsseEnvelope = request.DsseEnvelopeJson,
RawPayload = request.RawPayloadJson,
FetchedAt = request.FetchedAt
},
ct)
.ConfigureAwait(false);
return existed ? SourceEdgeResult.Existing(edgeId) : SourceEdgeResult.Created(edgeId);
}
public async Task<IReadOnlyList<SourceEdge>> GetSourceEdgesAsync(Guid canonicalId, CancellationToken ct = default)
{
var edges = await advisoryRepository.GetSourceEdgesAsync(canonicalId, ct).ConfigureAwait(false);
return await MapSourceEdgesAsync(edges, ct).ConfigureAwait(false);
}
public async Task<bool> SourceEdgeExistsAsync(Guid canonicalId, Guid sourceId, string docHash, CancellationToken ct = default)
{
var edges = await advisoryRepository.GetSourceEdgesAsync(canonicalId, ct).ConfigureAwait(false);
return edges.Any(edge =>
edge.SourceId == sourceId &&
string.Equals(edge.SourceDocHash, docHash, StringComparison.Ordinal));
}
public async Task<IReadOnlyList<ProvenanceScopeDto>> GetProvenanceScopesAsync(Guid canonicalId, CancellationToken ct = default)
{
var scopes = await provenanceScopeStore.GetByCanonicalIdAsync(canonicalId, ct).ConfigureAwait(false);
return scopes.Select(MapProvenanceScope).ToList();
}
public async Task<Guid> ResolveSourceIdAsync(string sourceKey, CancellationToken ct = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(sourceKey);
var existing = await sourceRepository.GetByKeyAsync(sourceKey, ct).ConfigureAwait(false);
if (existing is not null)
{
return existing.Id;
}
var created = await sourceRepository.UpsertAsync(
new SourceEntity
{
Id = Guid.NewGuid(),
Key = sourceKey.Trim(),
Name = sourceKey.Trim(),
SourceType = sourceKey.Trim(),
Priority = 100,
Enabled = true,
Config = "{}",
Metadata = "{}"
},
ct)
.ConfigureAwait(false);
return created.Id;
}
public async Task<int> GetSourcePrecedenceAsync(string sourceKey, CancellationToken ct = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(sourceKey);
var source = await sourceRepository.GetByKeyAsync(sourceKey, ct).ConfigureAwait(false);
return source?.Priority ?? 100;
}
private async Task<IReadOnlyList<CanonicalAdvisory>> MapCanonicalsAsync(
IReadOnlyList<AdvisoryCanonicalEntity> entities,
CancellationToken ct)
{
var results = new List<CanonicalAdvisory>(entities.Count);
foreach (var entity in entities)
{
results.Add(await MapCanonicalAsync(entity, ct).ConfigureAwait(false));
}
return results;
}
private async Task<CanonicalAdvisory> MapCanonicalAsync(AdvisoryCanonicalEntity entity, CancellationToken ct)
{
var sourceEdges = await advisoryRepository.GetSourceEdgesAsync(entity.Id, ct).ConfigureAwait(false);
var provenanceScopes = await provenanceScopeStore.GetByCanonicalIdAsync(entity.Id, ct).ConfigureAwait(false);
return new CanonicalAdvisory
{
Id = entity.Id,
Cve = entity.Cve,
AffectsKey = entity.AffectsKey,
VersionRange = ParseVersionRange(entity.VersionRange),
Weaknesses = entity.Weakness,
MergeHash = entity.MergeHash,
Status = ParseCanonicalStatus(entity.Status),
Severity = entity.Severity,
EpssScore = entity.EpssScore,
ExploitKnown = entity.ExploitKnown,
Title = entity.Title,
Summary = entity.Summary,
CreatedAt = entity.CreatedAt,
UpdatedAt = entity.UpdatedAt,
SourceEdges = await MapSourceEdgesAsync(sourceEdges, ct).ConfigureAwait(false),
ProvenanceScopes = provenanceScopes.Select(MapProvenanceScope).ToList()
};
}
private async Task<IReadOnlyList<SourceEdge>> MapSourceEdgesAsync(
IReadOnlyList<AdvisorySourceEdgeEntity> entities,
CancellationToken ct)
{
var results = new List<SourceEdge>(entities.Count);
foreach (var entity in entities)
{
var source = await sourceRepository.GetByIdAsync(entity.SourceId, ct).ConfigureAwait(false);
results.Add(new SourceEdge
{
Id = entity.Id,
CanonicalId = entity.CanonicalId,
SourceName = source?.Key ?? entity.SourceId.ToString("D"),
SourceAdvisoryId = entity.SourceAdvisoryId,
SourceDocHash = entity.SourceDocHash,
VendorStatus = ParseVendorStatus(entity.VendorStatus),
PrecedenceRank = entity.PrecedenceRank,
DsseEnvelope = ParseDsseEnvelope(entity.DsseEnvelope),
FetchedAt = entity.FetchedAt,
CreatedAt = entity.CreatedAt
});
}
return results;
}
private static CanonicalStatus ParseCanonicalStatus(string? status)
{
return status?.Trim().ToLowerInvariant() switch
{
"stub" => CanonicalStatus.Stub,
"withdrawn" => CanonicalStatus.Withdrawn,
_ => CanonicalStatus.Active
};
}
private static VendorStatus? ParseVendorStatus(string? status)
{
return status?.Trim().ToLowerInvariant() switch
{
"affected" => VendorStatus.Affected,
"not_affected" => VendorStatus.NotAffected,
"fixed" => VendorStatus.Fixed,
"under_investigation" => VendorStatus.UnderInvestigation,
_ => null
};
}
private static VersionRange? ParseVersionRange(string? versionRangeJson)
{
if (string.IsNullOrWhiteSpace(versionRangeJson))
{
return null;
}
var trimmed = versionRangeJson.Trim();
if (!trimmed.StartsWith("{", StringComparison.Ordinal))
{
return new VersionRange { RangeExpression = trimmed };
}
try
{
using var document = JsonDocument.Parse(trimmed);
var root = document.RootElement;
return new VersionRange
{
Introduced = GetProperty(root, "introduced"),
Fixed = GetProperty(root, "fixed"),
LastAffected = GetProperty(root, "lastAffected", "last_affected"),
RangeExpression = GetProperty(root, "rangeExpression", "range_expression")
};
}
catch (JsonException)
{
return new VersionRange { RangeExpression = trimmed };
}
}
private static DsseEnvelope? ParseDsseEnvelope(string? dsseEnvelopeJson)
{
if (string.IsNullOrWhiteSpace(dsseEnvelopeJson))
{
return null;
}
try
{
return JsonSerializer.Deserialize<DsseEnvelope>(dsseEnvelopeJson, JsonOptions);
}
catch (JsonException)
{
return null;
}
}
private static ProvenanceScopeDto MapProvenanceScope(ProvenanceScope scope)
{
return new ProvenanceScopeDto
{
Id = scope.Id,
DistroRelease = scope.DistroRelease,
BackportVersion = scope.BackportSemver,
PatchId = scope.PatchId,
PatchOrigin = scope.PatchOrigin?.ToString(),
EvidenceRef = scope.EvidenceRef,
Confidence = scope.Confidence,
UpdatedAt = scope.UpdatedAt
};
}
private static string? GetProperty(JsonElement element, params string[] names)
{
foreach (var name in names)
{
if (element.TryGetProperty(name, out var value) && value.ValueKind == JsonValueKind.String)
{
return value.GetString();
}
}
return null;
}
}

View File

@@ -8,6 +8,7 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Core.Canonical;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
using System.Runtime.CompilerServices;
@@ -131,6 +132,111 @@ public sealed class AdvisoryCanonicalRepository : RepositoryBase<ConcelierDataSo
ct);
}
public async Task<PagedResult<AdvisoryCanonicalEntity>> QueryAsync(
CanonicalQueryOptions options,
CancellationToken ct = default)
{
ArgumentNullException.ThrowIfNull(options);
var filters = new List<string>();
if (!string.IsNullOrWhiteSpace(options.Cve))
{
filters.Add("cve = @cve");
}
if (!string.IsNullOrWhiteSpace(options.ArtifactKey))
{
filters.Add("affects_key = @affects_key");
}
if (!string.IsNullOrWhiteSpace(options.Severity))
{
filters.Add("severity = @severity");
}
if (options.Status is not null)
{
filters.Add("status = @status");
}
if (options.ExploitKnown is not null)
{
filters.Add("exploit_known = @exploit_known");
}
if (options.UpdatedSince is not null)
{
filters.Add("updated_at >= @updated_since");
}
var whereClause = filters.Count == 0
? string.Empty
: $"WHERE {string.Join(" AND ", filters)}";
var sql = $"""
SELECT id, cve, affects_key, version_range::text, weakness, merge_hash,
status, severity, epss_score, exploit_known, title, summary,
created_at, updated_at, COUNT(*) OVER() AS total_count
FROM vuln.advisory_canonical
{whereClause}
ORDER BY updated_at DESC, id ASC
OFFSET @offset
LIMIT @limit
""";
await using var connection = await DataSource.OpenSystemConnectionAsync(ct).ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "offset", options.Offset);
AddParameter(command, "limit", options.Limit);
if (!string.IsNullOrWhiteSpace(options.Cve))
{
AddParameter(command, "cve", options.Cve);
}
if (!string.IsNullOrWhiteSpace(options.ArtifactKey))
{
AddParameter(command, "affects_key", options.ArtifactKey);
}
if (!string.IsNullOrWhiteSpace(options.Severity))
{
AddParameter(command, "severity", NormalizeSeverity(options.Severity));
}
if (options.Status is not null)
{
AddParameter(command, "status", options.Status.Value.ToString().ToLowerInvariant());
}
if (options.ExploitKnown is not null)
{
AddParameter(command, "exploit_known", options.ExploitKnown.Value);
}
if (options.UpdatedSince is not null)
{
AddParameter(command, "updated_since", options.UpdatedSince.Value);
}
var items = new List<AdvisoryCanonicalEntity>();
long totalCount = 0;
await using var reader = await command.ExecuteReaderAsync(ct).ConfigureAwait(false);
while (await reader.ReadAsync(ct).ConfigureAwait(false))
{
items.Add(MapCanonical(reader));
totalCount = reader.GetInt64(14);
}
return new PagedResult<AdvisoryCanonicalEntity>
{
Items = items,
TotalCount = totalCount,
Offset = options.Offset,
Limit = options.Limit
};
}
public async Task<Guid> UpsertAsync(AdvisoryCanonicalEntity entity, CancellationToken ct = default)
{
var normalizedSeverity = NormalizeSeverity(entity.Severity);

View File

@@ -5,6 +5,7 @@
// Description: Repository interface for canonical advisory operations
// -----------------------------------------------------------------------------
using StellaOps.Concelier.Core.Canonical;
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
@@ -44,6 +45,13 @@ public interface IAdvisoryCanonicalRepository
int limit = 1000,
CancellationToken ct = default);
/// <summary>
/// Queries canonical advisories with deterministic ordering and pagination.
/// </summary>
Task<PagedResult<AdvisoryCanonicalEntity>> QueryAsync(
CanonicalQueryOptions options,
CancellationToken ct = default);
/// <summary>
/// Upserts a canonical advisory (insert or update by merge_hash).
/// </summary>

View File

@@ -6,12 +6,14 @@ using JpFlagsContracts = StellaOps.Concelier.Storage.JpFlags;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using PsirtContracts = StellaOps.Concelier.Storage.PsirtFlags;
using StellaOps.Concelier.Core.Canonical;
using StellaOps.Concelier.Core.Linksets;
using StellaOps.Concelier.Merge.Backport;
using StellaOps.Concelier.Persistence.Postgres.Advisories;
using StellaOps.Concelier.Persistence.Postgres.Repositories;
using StellaOps.Concelier.SbomIntegration;
using StellaOps.Infrastructure.Postgres;
using StellaOps.Infrastructure.Postgres.Migrations;
using StellaOps.Infrastructure.Postgres.Options;
using StorageContracts = StellaOps.Concelier.Storage;
@@ -36,10 +38,18 @@ public static class ServiceCollectionExtensions
{
services.Configure<PostgresOptions>(sectionName, configuration.GetSection(sectionName));
services.AddSingleton<ConcelierDataSource>();
services.AddStartupMigrations(
ConcelierDataSource.DefaultSchemaName,
"Concelier.Storage",
typeof(ConcelierDataSource).Assembly);
// Register repositories
services.AddScoped<IAdvisoryRepository, AdvisoryRepository>();
services.AddScoped<IPostgresAdvisoryStore, PostgresAdvisoryStore>();
services.AddScoped<IAdvisoryCanonicalRepository, AdvisoryCanonicalRepository>();
services.AddScoped<ICanonicalAdvisoryStore, PostgresCanonicalAdvisoryStore>();
services.AddScoped<IMergeHashCalculator, MergeHashCalculatorAdapter>();
services.AddScoped<StellaOps.Concelier.Merge.Identity.IMergeHashCalculator, StellaOps.Concelier.Merge.Identity.MergeHashCalculator>();
services.AddScoped<ISourceRepository, SourceRepository>();
services.AddScoped<IAdvisorySourceReadRepository, AdvisorySourceReadRepository>();
services.AddScoped<IAdvisoryAliasRepository, AdvisoryAliasRepository>();
@@ -86,10 +96,18 @@ public static class ServiceCollectionExtensions
{
services.Configure(configureOptions);
services.AddSingleton<ConcelierDataSource>();
services.AddStartupMigrations(
ConcelierDataSource.DefaultSchemaName,
"Concelier.Storage",
typeof(ConcelierDataSource).Assembly);
// Register repositories
services.AddScoped<IAdvisoryRepository, AdvisoryRepository>();
services.AddScoped<IPostgresAdvisoryStore, PostgresAdvisoryStore>();
services.AddScoped<IAdvisoryCanonicalRepository, AdvisoryCanonicalRepository>();
services.AddScoped<ICanonicalAdvisoryStore, PostgresCanonicalAdvisoryStore>();
services.AddScoped<IMergeHashCalculator, MergeHashCalculatorAdapter>();
services.AddScoped<StellaOps.Concelier.Merge.Identity.IMergeHashCalculator, StellaOps.Concelier.Merge.Identity.MergeHashCalculator>();
services.AddScoped<ISourceRepository, SourceRepository>();
services.AddScoped<IAdvisorySourceReadRepository, AdvisorySourceReadRepository>();
services.AddScoped<IAdvisoryAliasRepository, AdvisoryAliasRepository>();

View File

@@ -0,0 +1,33 @@
using FluentAssertions;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using StellaOps.Concelier.Persistence.Postgres;
using StellaOps.TestKit;
namespace StellaOps.Concelier.Persistence.Tests;
public sealed class ConcelierInfrastructureRegistrationTests
{
[Trait("Category", TestCategories.Unit)]
[Fact]
public void AddConcelierPostgresStorage_RegistersStartupMigrationHost()
{
var configuration = new ConfigurationBuilder()
.AddInMemoryCollection(new Dictionary<string, string?>
{
["Postgres:Concelier:ConnectionString"] = "Host=postgres;Database=stellaops;Username=postgres;Password=postgres"
})
.Build();
var services = new ServiceCollection();
services.AddLogging();
services.AddConcelierPostgresStorage(configuration);
services
.Where(descriptor => descriptor.ServiceType == typeof(IHostedService))
.Should()
.ContainSingle("fresh installs need Concelier startup migrations to create the vuln schema before canonical advisory queries can execute");
}
}

View File

@@ -11,3 +11,4 @@ Source of truth: `docs-archived/implplan/2025-12-29-csproj-audit/SPRINT_20251229
| TASK-015-011 | DONE | Added SbomRepository integration coverage. |
| TASK-015-007d | DONE | Added license query coverage for SbomRepository. |
| TASK-015-013 | DONE | Added SbomRepository integration coverage for model cards and policy fields. |
| TASK-014-003 | DONE | 2026-03-09: added startup-migration registration coverage so Concelier canonical tables bootstrap on fresh deploys and verified `/api/v1/canonical` live after redeploy. |

View File

@@ -0,0 +1,132 @@
using System.Net;
using FluentAssertions;
using Microsoft.AspNetCore.Hosting;
using Microsoft.AspNetCore.Mvc.Testing;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using StellaOps.Concelier.Core.Canonical;
using StellaOps.Concelier.WebService.Tests.Fixtures;
namespace StellaOps.Concelier.WebService.Tests.Canonical;
public sealed class CanonicalProductionRegistrationTests : IAsyncLifetime
{
private WebApplicationFactory<Program> factory = null!;
private HttpClient client = null!;
private static readonly Guid CanonicalId = Guid.Parse("22222222-2222-2222-2222-222222222222");
public ValueTask InitializeAsync()
{
factory = new ConcelierApplicationFactory()
.WithWebHostBuilder(builder =>
{
builder.UseEnvironment("Testing");
builder.ConfigureServices(services =>
{
services.RemoveAll<ICanonicalAdvisoryStore>();
services.AddSingleton<ICanonicalAdvisoryStore>(new StubCanonicalAdvisoryStore());
});
});
client = factory.CreateClient();
return ValueTask.CompletedTask;
}
public ValueTask DisposeAsync()
{
client.Dispose();
factory.Dispose();
return ValueTask.CompletedTask;
}
[Fact]
public async Task QueryCanonical_UsesProductionRegistrationWithoutDiFailure()
{
var response = await client.GetAsync("/api/v1/canonical?cve=CVE-2026-1000");
response.StatusCode.Should().Be(HttpStatusCode.OK);
var content = await response.Content.ReadAsStringAsync();
content.Should().Contain("CVE-2026-1000");
}
private sealed class StubCanonicalAdvisoryStore : ICanonicalAdvisoryStore
{
private static readonly CanonicalAdvisory Advisory = new()
{
Id = CanonicalId,
Cve = "CVE-2026-1000",
AffectsKey = "pkg:npm/example@1.0.0",
MergeHash = "sha256:canonical-production-registration",
Status = CanonicalStatus.Active,
Severity = "high",
CreatedAt = new DateTimeOffset(2026, 3, 9, 0, 0, 0, TimeSpan.Zero),
UpdatedAt = new DateTimeOffset(2026, 3, 9, 0, 0, 0, TimeSpan.Zero),
SourceEdges =
[
new SourceEdge
{
Id = Guid.Parse("33333333-3333-3333-3333-333333333333"),
CanonicalId = CanonicalId,
SourceName = "nvd",
SourceAdvisoryId = "NVD-2026-1000",
SourceDocHash = "sha256:edge",
PrecedenceRank = 40,
FetchedAt = new DateTimeOffset(2026, 3, 9, 0, 0, 0, TimeSpan.Zero),
CreatedAt = new DateTimeOffset(2026, 3, 9, 0, 0, 0, TimeSpan.Zero)
}
]
};
public Task<CanonicalAdvisory?> GetByIdAsync(Guid id, CancellationToken ct = default)
=> Task.FromResult(id == CanonicalId ? Advisory : null);
public Task<CanonicalAdvisory?> GetByMergeHashAsync(string mergeHash, CancellationToken ct = default)
=> Task.FromResult(
string.Equals(mergeHash, Advisory.MergeHash, StringComparison.Ordinal) ? Advisory : null);
public Task<IReadOnlyList<CanonicalAdvisory>> GetByCveAsync(string cve, CancellationToken ct = default)
=> Task.FromResult<IReadOnlyList<CanonicalAdvisory>>(
string.Equals(cve, Advisory.Cve, StringComparison.Ordinal) ? [Advisory] : []);
public Task<IReadOnlyList<CanonicalAdvisory>> GetByArtifactAsync(string artifactKey, CancellationToken ct = default)
=> Task.FromResult<IReadOnlyList<CanonicalAdvisory>>(
string.Equals(artifactKey, Advisory.AffectsKey, StringComparison.Ordinal) ? [Advisory] : []);
public Task<PagedResult<CanonicalAdvisory>> QueryAsync(CanonicalQueryOptions options, CancellationToken ct = default)
=> Task.FromResult(new PagedResult<CanonicalAdvisory>
{
Items = [Advisory],
TotalCount = 1,
Offset = options.Offset,
Limit = options.Limit
});
public Task<Guid> UpsertCanonicalAsync(UpsertCanonicalRequest request, CancellationToken ct = default)
=> Task.FromResult(CanonicalId);
public Task UpdateStatusAsync(Guid id, CanonicalStatus status, CancellationToken ct = default)
=> Task.CompletedTask;
public Task<long> CountAsync(CancellationToken ct = default)
=> Task.FromResult(1L);
public Task<SourceEdgeResult> AddSourceEdgeAsync(AddSourceEdgeRequest request, CancellationToken ct = default)
=> Task.FromResult(SourceEdgeResult.Created(Guid.Parse("44444444-4444-4444-4444-444444444444")));
public Task<IReadOnlyList<SourceEdge>> GetSourceEdgesAsync(Guid canonicalId, CancellationToken ct = default)
=> Task.FromResult<IReadOnlyList<SourceEdge>>(Advisory.SourceEdges);
public Task<bool> SourceEdgeExistsAsync(Guid canonicalId, Guid sourceId, string docHash, CancellationToken ct = default)
=> Task.FromResult(false);
public Task<IReadOnlyList<ProvenanceScopeDto>> GetProvenanceScopesAsync(Guid canonicalId, CancellationToken ct = default)
=> Task.FromResult<IReadOnlyList<ProvenanceScopeDto>>([]);
public Task<Guid> ResolveSourceIdAsync(string sourceKey, CancellationToken ct = default)
=> Task.FromResult(Guid.Parse("55555555-5555-5555-5555-555555555555"));
public Task<int> GetSourcePrecedenceAsync(string sourceKey, CancellationToken ct = default)
=> Task.FromResult(40);
}
}

View File

@@ -0,0 +1,41 @@
-- 010_dead_letter_summary_ambiguity_fix.sql
-- Repairs get_actionable_dead_letter_summary so the sample-reason subquery
-- unambiguously references dead_letter_entries columns after projection aliasing.
CREATE OR REPLACE FUNCTION get_actionable_dead_letter_summary(
p_tenant_id TEXT,
p_limit INTEGER DEFAULT 10
)
RETURNS TABLE (
error_code TEXT,
category error_category,
entry_count BIGINT,
retryable_count BIGINT,
oldest_entry TIMESTAMPTZ,
sample_reason TEXT
) AS $$
BEGIN
RETURN QUERY
SELECT
dle.error_code,
dle.category,
COUNT(*)::BIGINT AS entry_count,
COUNT(*) FILTER (WHERE dle.is_retryable)::BIGINT AS retryable_count,
MIN(dle.created_at) AS oldest_entry,
(
SELECT dle_sample.failure_reason
FROM dead_letter_entries dle_sample
WHERE dle_sample.tenant_id = p_tenant_id
AND dle_sample.error_code = dle.error_code
AND dle_sample.status = 'pending'
ORDER BY dle_sample.created_at DESC
LIMIT 1
) AS sample_reason
FROM dead_letter_entries dle
WHERE dle.tenant_id = p_tenant_id
AND dle.status = 'pending'
GROUP BY dle.error_code, dle.category
ORDER BY COUNT(*) DESC
LIMIT p_limit;
END;
$$ LANGUAGE plpgsql STABLE;

View File

@@ -0,0 +1,25 @@
using StellaOps.JobEngine.Infrastructure.Postgres;
using StellaOps.TestKit;
namespace StellaOps.JobEngine.Tests;
public sealed class DeadLetterSummaryMigrationTests
{
[Trait("Category", TestCategories.Unit)]
[Fact]
public void DeadLetterSummaryFixMigration_EmbedsQualifiedSubqueryColumns()
{
var assembly = typeof(JobEngineDataSource).Assembly;
Assert.Contains("010_dead_letter_summary_ambiguity_fix.sql", assembly.GetManifestResourceNames());
using var stream = assembly.GetManifestResourceStream("010_dead_letter_summary_ambiguity_fix.sql");
Assert.NotNull(stream);
using var reader = new StreamReader(stream!);
var sql = reader.ReadToEnd();
Assert.Contains("FROM dead_letter_entries dle_sample", sql, StringComparison.Ordinal);
Assert.Contains("dle_sample.error_code = dle.error_code", sql, StringComparison.Ordinal);
Assert.Contains("dle_sample.status = 'pending'", sql, StringComparison.Ordinal);
}
}

View File

@@ -106,7 +106,7 @@ public sealed class RunRepository : RepositoryBase<SchedulerDataSource>, IRunRep
if (!options.States.IsDefaultOrEmpty)
{
filters.Add("state = ANY(@states)");
filters.Add("state = ANY(CAST(@states AS scheduler.run_state[]))");
}
if (options.CreatedAfter is not null)
@@ -174,7 +174,7 @@ public sealed class RunRepository : RepositoryBase<SchedulerDataSource>, IRunRep
const string sql = """
SELECT *
FROM scheduler.runs
WHERE state = @state
WHERE state = CAST(@state AS scheduler.run_state)
ORDER BY created_at ASC
LIMIT @limit
""";
@@ -197,15 +197,18 @@ public sealed class RunRepository : RepositoryBase<SchedulerDataSource>, IRunRep
AddParameter(command, "id", run.Id);
AddParameter(command, "tenant_id", run.TenantId);
AddParameter(command, "schedule_id", run.ScheduleId ?? (object)DBNull.Value);
AddParameter(command, "trigger", Serialize(run.Trigger));
AddParameter(command, "state", run.State.ToString().ToLowerInvariant());
AddParameter(command, "stats", Serialize(run.Stats));
AddParameter(command, "reason", Serialize(run.Reason));
AddJsonbParameter(command, "trigger", Serialize(run.Trigger));
command.Parameters.Add(new NpgsqlParameter<string>("state", run.State.ToString().ToLowerInvariant())
{
DataTypeName = "scheduler.run_state"
});
AddJsonbParameter(command, "stats", Serialize(run.Stats));
AddJsonbParameter(command, "reason", Serialize(run.Reason));
AddParameter(command, "created_at", run.CreatedAt);
AddParameter(command, "started_at", run.StartedAt ?? (object)DBNull.Value);
AddParameter(command, "finished_at", run.FinishedAt ?? (object)DBNull.Value);
AddParameter(command, "error", run.Error ?? (object)DBNull.Value);
AddParameter(command, "deltas", Serialize(run.Deltas));
AddJsonbParameter(command, "deltas", Serialize(run.Deltas));
AddParameter(command, "retry_of", run.RetryOf ?? (object)DBNull.Value);
AddParameter(command, "schema_version", run.SchemaVersion ?? (object)DBNull.Value);
}

View File

@@ -0,0 +1,99 @@
using System;
using System.Collections.Immutable;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using FluentAssertions;
using Microsoft.Extensions.Logging.Abstractions;
using Microsoft.Extensions.Options;
using StellaOps.Scheduler.Models;
using StellaOps.Scheduler.Persistence.Postgres;
using StellaOps.Scheduler.Persistence.Postgres.Repositories;
using StellaOps.TestKit;
using Xunit;
namespace StellaOps.Scheduler.Persistence.Postgres.Tests;
[Collection(SchedulerPostgresCollection.Name)]
public sealed class RunRepositoryTests : IAsyncLifetime
{
private readonly SchedulerPostgresFixture fixture;
public RunRepositoryTests(SchedulerPostgresFixture fixture)
{
this.fixture = fixture;
}
public ValueTask InitializeAsync() => new(fixture.TruncateAllTablesAsync());
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
[Trait("Category", TestCategories.Unit)]
[Fact]
public async Task ListByStateAsync_ReturnsRunsMatchingEnumState()
{
var repository = CreateRepository();
var queuedRun = CreateRun("tenant-a", RunState.Queued);
var runningRun = CreateRun("tenant-a", RunState.Running);
await repository.InsertAsync(queuedRun, CancellationToken.None);
await repository.InsertAsync(runningRun, CancellationToken.None);
var results = await repository.ListByStateAsync(RunState.Queued, cancellationToken: CancellationToken.None);
results.Should().ContainSingle(run => run.Id == queuedRun.Id);
results.Should().NotContain(run => run.Id == runningRun.Id);
}
[Trait("Category", TestCategories.Unit)]
[Fact]
public async Task ListAsync_FiltersByEnumStatesArray()
{
var repository = CreateRepository();
var queuedRun = CreateRun("tenant-b", RunState.Queued, scheduleId: "sched-1");
var runningRun = CreateRun("tenant-b", RunState.Running, scheduleId: "sched-1");
var completedRun = CreateRun("tenant-b", RunState.Completed, scheduleId: "sched-1");
await repository.InsertAsync(queuedRun, CancellationToken.None);
await repository.InsertAsync(runningRun, CancellationToken.None);
await repository.InsertAsync(completedRun, CancellationToken.None);
var results = await repository.ListAsync(
"tenant-b",
new RunQueryOptions
{
ScheduleId = "sched-1",
States = ImmutableArray.Create(RunState.Queued, RunState.Running),
SortAscending = true,
Limit = 10
},
CancellationToken.None);
results.Select(run => run.Id).Should().BeEquivalentTo(
new[] { queuedRun.Id, runningRun.Id },
options => options.WithoutStrictOrdering());
results.Should().NotContain(run => run.Id == completedRun.Id);
}
private RunRepository CreateRepository()
{
var options = fixture.Fixture.CreateOptions();
options.SchemaName = SchedulerDataSource.DefaultSchemaName;
var dataSource = new SchedulerDataSource(Options.Create(options), NullLogger<SchedulerDataSource>.Instance);
return new RunRepository(dataSource, NullLogger<RunRepository>.Instance);
}
private static Run CreateRun(string tenantId, RunState state, string? scheduleId = null)
{
var createdAt = new DateTimeOffset(2026, 3, 9, 10, 0, 0, TimeSpan.Zero)
.AddMinutes(Guid.NewGuid().GetHashCode() & 15);
return new Run(
id: Guid.NewGuid().ToString("N"),
tenantId: tenantId,
trigger: RunTrigger.Manual,
state: state,
stats: new RunStats(candidates: 1, queued: 1),
createdAt: createdAt,
scheduleId: scheduleId);
}
}

View File

@@ -2,6 +2,7 @@
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using NpgsqlTypes;
using StellaOps.Platform.WebService.Options;
using System;
using System.Threading;
@@ -185,7 +186,11 @@ public sealed class PlatformAnalyticsMaintenanceService : BackgroundService
var startedAt = timeProvider.GetUtcNow();
var executed = await executor.ExecuteNonQueryAsync(
"SELECT analytics.compute_daily_rollups(@date);",
cmd => cmd.Parameters.AddWithValue("date", snapshotDate.Date),
cmd =>
{
var parameter = cmd.Parameters.Add("date", NpgsqlDbType.Date);
parameter.Value = snapshotDate.Date;
},
cancellationToken)
.ConfigureAwait(false);
if (!executed)

View File

@@ -6,6 +6,7 @@ using System.Threading.Tasks;
using Microsoft.Extensions.Logging.Abstractions;
using Microsoft.Extensions.Options;
using Npgsql;
using NpgsqlTypes;
using StellaOps.Platform.WebService.Options;
using StellaOps.Platform.WebService.Services;
using StellaOps.TestKit;
@@ -60,6 +61,9 @@ public sealed class PlatformAnalyticsMaintenanceServiceTests
.ToArray();
Assert.Equal(expectedDates, actualDates);
Assert.All(
rollupCommands,
command => Assert.Equal(NpgsqlDbType.Date, command.ParameterTypes["date"]));
var refreshCommands = executor.Commands
.Where(command => command.Sql.StartsWith("REFRESH MATERIALIZED VIEW", StringComparison.Ordinal))
@@ -77,7 +81,10 @@ public sealed class PlatformAnalyticsMaintenanceServiceTests
Assert.True(lastRollupIndex < firstRefreshIndex);
}
private sealed record ExecutedCommand(string Sql, IReadOnlyDictionary<string, object?> Parameters);
private sealed record ExecutedCommand(
string Sql,
IReadOnlyDictionary<string, object?> Parameters,
IReadOnlyDictionary<string, NpgsqlDbType> ParameterTypes);
private sealed class RecordingMaintenanceExecutor : IPlatformAnalyticsMaintenanceExecutor
{
@@ -108,8 +115,14 @@ public sealed class PlatformAnalyticsMaintenanceServiceTests
parameter => parameter.ParameterName,
parameter => parameter.Value,
StringComparer.OrdinalIgnoreCase);
var parameterTypes = command.Parameters
.Cast<NpgsqlParameter>()
.ToDictionary(
parameter => parameter.ParameterName,
parameter => parameter.NpgsqlDbType,
StringComparer.OrdinalIgnoreCase);
Commands.Add(new ExecutedCommand(sql, parameters));
Commands.Add(new ExecutedCommand(sql, parameters, parameterTypes));
if (Commands.Count >= expectedCommandCount)
{
completion.TrySetResult(true);

View File

@@ -57,6 +57,12 @@ using System.Diagnostics;
using System.IO;
var builder = WebApplication.CreateSlimBuilder(args);
builder.Configuration.AddStellaOpsDefaults(options =>
{
options.BasePath = builder.Environment.ContentRootPath;
options.EnvironmentPrefix = "SCANNER_";
options.YamlFiles.Add(new YamlConfigurationFile("../etc/scanner.yaml", true));
});
builder.Services.AddOptions<ScannerWorkerOptions>()
.BindConfiguration(ScannerWorkerOptions.SectionName)

View File

@@ -0,0 +1,38 @@
using Microsoft.Extensions.Configuration;
using StellaOps.Configuration;
using StellaOps.Scanner.Cache;
using StellaOps.TestKit;
using Xunit;
namespace StellaOps.Scanner.Worker.Tests;
public sealed class ScannerWorkerConfigurationBootstrapTests
{
[Trait("Category", TestCategories.Unit)]
[Fact]
public void AddStellaOpsDefaults_StripsScannerPrefixForCacheBinding()
{
const string expectedRootPath = "/var/lib/stellaops/cache/scanner";
const string environmentVariableName = "SCANNER_SCANNER__CACHE__ROOTPATH";
Environment.SetEnvironmentVariable(environmentVariableName, expectedRootPath);
try
{
var configuration = new ConfigurationBuilder()
.AddStellaOpsDefaults(options => options.EnvironmentPrefix = "SCANNER_")
.Build();
var options = configuration
.GetSection("scanner:cache")
.Get<ScannerCacheOptions>();
Assert.NotNull(options);
Assert.Equal(expectedRootPath, options.RootPath);
}
finally
{
Environment.SetEnvironmentVariable(environmentVariableName, null);
}
}
}

View File

@@ -6,6 +6,7 @@ Source of truth: `docs/implplan/SPRINT_20260130_002_Tools_csproj_remediation_sol
| --- | --- | --- |
| QA-SCANNER-VERIFY-009 | DONE | `SPRINT_20260212_002_Scanner_unchecked_feature_verification_batch1.md`: added deterministic `BinaryLookupStageExecutorTests` coverage for runtime patch verification, Build-ID mapping, and unified finding publication wiring (run-002, 2026-02-12). |
| QA-SCANNER-VERIFY-008 | DONE | `SPRINT_20260212_002_Scanner_unchecked_feature_verification_batch1.md`: added worker entry-trace execution coverage for binary intelligence graph enrichment and validated run-002 pass (2026-02-12). |
| TASK-014-003 | DONE | 2026-03-09: verified `SCANNER_` env-prefix binding for `scanner:cache:rootPath`; live worker now uses `/var/lib/stellaops/cache/scanner` on the writable hardened volume. |
| REMED-05 | TODO | Remediation checklist: docs/implplan/audits/csproj-standards/remediation/checklists/src/Scanner/__Tests/StellaOps.Scanner.Worker.Tests/StellaOps.Scanner.Worker.Tests.md. |
| REMED-06 | DONE | SOLID review notes captured for SPRINT_20260130_002. |
| SPRINT-20260208-060-IDEMP-001 | DONE | Implement idempotent verdict attestation submission (idempotency key + dedupe + retry classification + tests). |