Fix build and code structure improvements. New but essential UI functionality. CI improvements. Documentation improvements. AI module improvements.

This commit is contained in:
StellaOps Bot
2025-12-26 21:54:17 +02:00
parent 335ff7da16
commit c2b9cd8d1f
3717 changed files with 264714 additions and 48202 deletions

View File

@@ -1,3 +1 @@
namespace StellaOps.Notify.WebService;
public partial class Program;
file sealed partial class Program;

View File

@@ -25,9 +25,10 @@ using StellaOps.Auth.ServerIntegration;
using StellaOps.Configuration;
using System.Collections.Immutable;
using StellaOps.Notify.Models;
using StellaOps.Notify.Storage.Postgres;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Storage.Postgres.Repositories;
using StellaOps.Notify.Persistence.Extensions;
using StellaOps.Notify.Persistence.Postgres;
using StellaOps.Notify.Persistence.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Repositories;
using StellaOps.Notify.WebService.Diagnostics;
using StellaOps.Notify.WebService.Extensions;
using StellaOps.Notify.WebService.Hosting;
@@ -87,7 +88,7 @@ builder.Services.AddSingleton<ServiceStatus>();
builder.Services.AddSingleton<NotifySchemaMigrationService>();
// PostgreSQL is the canonical Notify storage; enable Postgres-backed repositories.
builder.Services.AddNotifyPostgresStorage(builder.Configuration, sectionName: "Postgres:Notify");
builder.Services.AddNotifyPersistence(builder.Configuration, sectionName: "Postgres:Notify");
var pluginHostOptions = NotifyPluginHostFactory.Build(bootstrapOptions, contentRootPath);
builder.Services.AddSingleton(pluginHostOptions);

View File

@@ -0,0 +1,12 @@
{
"profiles": {
"StellaOps.Notify.WebService": {
"commandName": "Project",
"launchBrowser": true,
"environmentVariables": {
"ASPNETCORE_ENVIRONMENT": "Development"
},
"applicationUrl": "https://localhost:62530;http://localhost:62531"
}
}
}

View File

@@ -9,21 +9,21 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Serilog.AspNetCore" Version="8.0.1" />
<PackageReference Include="Serilog.Sinks.Console" Version="5.0.1" />
<PackageReference Include="YamlDotNet" Version="13.7.1" />
<PackageReference Include="Serilog.AspNetCore" />
<PackageReference Include="Serilog.Sinks.Console" />
<PackageReference Include="YamlDotNet" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="../../__Libraries/StellaOps.Configuration/StellaOps.Configuration.csproj" />
<ProjectReference Include="../../__Libraries/StellaOps.DependencyInjection/StellaOps.DependencyInjection.csproj" />
<ProjectReference Include="../__Libraries/StellaOps.Notify.Models/StellaOps.Notify.Models.csproj" />
<ProjectReference Include="../__Libraries/StellaOps.Notify.Storage.Postgres/StellaOps.Notify.Storage.Postgres.csproj" />
<ProjectReference Include="../__Libraries/StellaOps.Notify.Persistence/StellaOps.Notify.Persistence.csproj" />
<ProjectReference Include="../__Libraries/StellaOps.Notify.Engine/StellaOps.Notify.Engine.csproj" />
<ProjectReference Include="../../__Libraries/StellaOps.Plugin/StellaOps.Plugin.csproj" />
<ProjectReference Include="../../Authority/StellaOps.Authority/StellaOps.Auth.Abstractions/StellaOps.Auth.Abstractions.csproj" />
<ProjectReference Include="../../Authority/StellaOps.Authority/StellaOps.Auth.Client/StellaOps.Auth.Client.csproj" />
<ProjectReference Include="../../Authority/StellaOps.Authority/StellaOps.Auth.ServerIntegration/StellaOps.Auth.ServerIntegration.csproj" />
<ProjectReference Include="../../__Libraries/StellaOps.Router.AspNet/StellaOps.Router.AspNet.csproj" />
<ProjectReference Include="../../Router/__Libraries/StellaOps.Router.AspNet/StellaOps.Router.AspNet.csproj" />
</ItemGroup>
</Project>

View File

@@ -6,15 +6,15 @@
<OutputType>Exe</OutputType>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Configuration.Json" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Configuration.EnvironmentVariables" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Hosting" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Logging.Console" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Options.ConfigurationExtensions" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Configuration.Json" />
<PackageReference Include="Microsoft.Extensions.Configuration.EnvironmentVariables" />
<PackageReference Include="Microsoft.Extensions.Hosting" />
<PackageReference Include="Microsoft.Extensions.Logging.Console" />
<PackageReference Include="Microsoft.Extensions.Options.ConfigurationExtensions" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\StellaOps.Notify.Queue\StellaOps.Notify.Queue.csproj" />
<ProjectReference Include="..\StellaOps.Notify.Models\StellaOps.Notify.Models.csproj" />
<ProjectReference Include="..\__Libraries\StellaOps.Notify.Queue\StellaOps.Notify.Queue.csproj" />
<ProjectReference Include="..\__Libraries\StellaOps.Notify.Models\StellaOps.Notify.Models.csproj" />
</ItemGroup>
<ItemGroup>
<None Update="appsettings.json">

File diff suppressed because it is too large Load Diff

View File

@@ -18,4 +18,4 @@
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
</ItemGroup>
</Project>
</Project>

View File

@@ -8,5 +8,6 @@
<ItemGroup>
<ProjectReference Include="..\StellaOps.Notify.Engine\StellaOps.Notify.Engine.csproj" />
<ProjectReference Include="..\StellaOps.Notify.Models\StellaOps.Notify.Models.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Plugin\StellaOps.Plugin.csproj" />
</ItemGroup>
</Project>

View File

@@ -18,4 +18,4 @@
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
</ItemGroup>
</Project>
</Project>

View File

@@ -18,4 +18,4 @@
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
</ItemGroup>
</Project>
</Project>

View File

@@ -18,4 +18,4 @@
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
</ItemGroup>
</Project>
</Project>

View File

@@ -58,6 +58,7 @@ public enum NotifyDeliveryAttemptStatus
[JsonConverter(typeof(JsonStringEnumConverter))]
public enum NotifyTemplateRenderMode
{
None,
Markdown,
Html,
AdaptiveCard,

View File

@@ -0,0 +1,32 @@
using Microsoft.EntityFrameworkCore;
using StellaOps.Infrastructure.EfCore.Context;
namespace StellaOps.Notify.Persistence.EfCore.Context;
/// <summary>
/// EF Core DbContext for the Notify module.
/// Placeholder for future EF Core scaffolding from PostgreSQL schema.
/// </summary>
public class NotifyDbContext : StellaOpsDbContextBase
{
/// <summary>
/// Creates a new Notify DbContext.
/// </summary>
public NotifyDbContext(DbContextOptions<NotifyDbContext> options)
: base(options)
{
}
/// <inheritdoc />
protected override string SchemaName => "notify";
/// <inheritdoc />
protected override void OnModelCreating(ModelBuilder modelBuilder)
{
base.OnModelCreating(modelBuilder);
// Entity configurations will be added after scaffolding
// from the PostgreSQL database using:
// dotnet ef dbcontext scaffold
}
}

View File

@@ -1,24 +1,24 @@
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Infrastructure.Postgres;
using StellaOps.Infrastructure.Postgres.Options;
using StellaOps.Notify.Storage.Postgres.Repositories;
using StellaOps.Notify.Persistence.Postgres;
using StellaOps.Notify.Persistence.Postgres.Repositories;
namespace StellaOps.Notify.Storage.Postgres;
namespace StellaOps.Notify.Persistence.Extensions;
/// <summary>
/// Extension methods for configuring Notify PostgreSQL storage services.
/// Extension methods for configuring Notify persistence services.
/// </summary>
public static class ServiceCollectionExtensions
public static class NotifyPersistenceExtensions
{
/// <summary>
/// Adds Notify PostgreSQL storage services.
/// Adds Notify PostgreSQL persistence services using configuration section.
/// </summary>
/// <param name="services">Service collection.</param>
/// <param name="configuration">Configuration root.</param>
/// <param name="sectionName">Configuration section name for PostgreSQL options.</param>
/// <returns>Service collection for chaining.</returns>
public static IServiceCollection AddNotifyPostgresStorage(
public static IServiceCollection AddNotifyPersistence(
this IServiceCollection services,
IConfiguration configuration,
string sectionName = "Postgres:Notify")
@@ -41,8 +41,6 @@ public static class ServiceCollectionExtensions
services.AddScoped<IIncidentRepository, IncidentRepository>();
services.AddScoped<INotifyAuditRepository, NotifyAuditRepository>();
services.AddScoped<ILockRepository, LockRepository>();
// Register new repositories (SPRINT-3412: PostgreSQL durability)
services.AddScoped<IThrottleConfigRepository, ThrottleConfigRepository>();
services.AddScoped<IOperatorOverrideRepository, OperatorOverrideRepository>();
services.AddScoped<ILocalizationBundleRepository, LocalizationBundleRepository>();
@@ -51,12 +49,12 @@ public static class ServiceCollectionExtensions
}
/// <summary>
/// Adds Notify PostgreSQL storage services with explicit options.
/// Adds Notify PostgreSQL persistence services with explicit options.
/// </summary>
/// <param name="services">Service collection.</param>
/// <param name="configureOptions">Options configuration action.</param>
/// <returns>Service collection for chaining.</returns>
public static IServiceCollection AddNotifyPostgresStorage(
public static IServiceCollection AddNotifyPersistence(
this IServiceCollection services,
Action<PostgresOptions> configureOptions)
{
@@ -77,12 +75,36 @@ public static class ServiceCollectionExtensions
services.AddScoped<IInboxRepository, InboxRepository>();
services.AddScoped<IIncidentRepository, IncidentRepository>();
services.AddScoped<INotifyAuditRepository, NotifyAuditRepository>();
// Register new repositories (SPRINT-3412: PostgreSQL durability)
services.AddScoped<ILockRepository, LockRepository>();
services.AddScoped<IThrottleConfigRepository, ThrottleConfigRepository>();
services.AddScoped<IOperatorOverrideRepository, OperatorOverrideRepository>();
services.AddScoped<ILocalizationBundleRepository, LocalizationBundleRepository>();
return services;
}
/// <summary>
/// Adds Notify in-memory persistence services for testing.
/// </summary>
/// <param name="services">Service collection.</param>
/// <returns>Service collection for chaining.</returns>
public static IServiceCollection AddNotifyPersistenceInMemory(
this IServiceCollection services)
{
services.AddScoped<InMemory.Repositories.INotifyChannelRepository, InMemory.Repositories.NotifyChannelRepositoryAdapter>();
services.AddScoped<InMemory.Repositories.INotifyRuleRepository, InMemory.Repositories.NotifyRuleRepositoryAdapter>();
services.AddScoped<InMemory.Repositories.INotifyTemplateRepository, InMemory.Repositories.NotifyTemplateRepositoryAdapter>();
services.AddScoped<InMemory.Repositories.INotifyDeliveryRepository, InMemory.Repositories.NotifyDeliveryRepositoryAdapter>();
services.AddScoped<InMemory.Repositories.INotifyDigestRepository, InMemory.Repositories.NotifyDigestRepositoryAdapter>();
services.AddScoped<InMemory.Repositories.INotifyAuditRepository, InMemory.Repositories.NotifyAuditRepositoryAdapter>();
services.AddScoped<InMemory.Repositories.INotifyLockRepository, InMemory.Repositories.NotifyLockRepositoryAdapter>();
services.AddScoped<InMemory.Repositories.INotifyEscalationPolicyRepository, InMemory.Repositories.NotifyEscalationPolicyRepositoryAdapter>();
services.AddScoped<InMemory.Repositories.INotifyEscalationStateRepository, InMemory.Repositories.NotifyEscalationStateRepositoryAdapter>();
services.AddScoped<InMemory.Repositories.INotifyOnCallScheduleRepository, InMemory.Repositories.NotifyOnCallScheduleRepositoryAdapter>();
services.AddScoped<InMemory.Repositories.INotifyQuietHoursRepository, InMemory.Repositories.NotifyQuietHoursRepositoryAdapter>();
services.AddScoped<InMemory.Repositories.INotifyMaintenanceWindowRepository, InMemory.Repositories.NotifyMaintenanceWindowRepositoryAdapter>();
services.AddScoped<InMemory.Repositories.INotifyInboxRepository, InMemory.Repositories.NotifyInboxRepositoryAdapter>();
return services;
}
}

View File

@@ -0,0 +1,270 @@
using System.Text.Json.Nodes;
namespace StellaOps.Notify.Persistence.InMemory.Documents;
/// <summary>
/// Represents a notification channel document (storage compatibility shim).
/// </summary>
public sealed class NotifyChannelDocument
{
public string Id { get; set; } = Guid.NewGuid().ToString("N");
public string TenantId { get; set; } = string.Empty;
public string Name { get; set; } = string.Empty;
public string ChannelType { get; set; } = string.Empty;
public bool Enabled { get; set; } = true;
public string Config { get; set; } = "{}";
public string? Credentials { get; set; }
public string Metadata { get; set; } = "{}";
public DateTimeOffset CreatedAt { get; set; }
public DateTimeOffset UpdatedAt { get; set; }
public string? CreatedBy { get; set; }
}
/// <summary>
/// Represents a notification rule document (storage compatibility shim).
/// </summary>
public sealed class NotifyRuleDocument
{
public string Id { get; set; } = Guid.NewGuid().ToString("N");
public string TenantId { get; set; } = string.Empty;
public string Name { get; set; } = string.Empty;
public string? Description { get; set; }
public bool Enabled { get; set; } = true;
public int Priority { get; set; }
public string EventFilter { get; set; } = "{}";
public string? ChannelId { get; set; }
public string? TemplateId { get; set; }
public string? DigestConfig { get; set; }
public string? EscalationPolicyId { get; set; }
public string Metadata { get; set; } = "{}";
public DateTimeOffset CreatedAt { get; set; }
public DateTimeOffset UpdatedAt { get; set; }
public string? CreatedBy { get; set; }
}
/// <summary>
/// Represents a notification template document (storage compatibility shim).
/// </summary>
public sealed class NotifyTemplateDocument
{
public string Id { get; set; } = Guid.NewGuid().ToString("N");
public string TenantId { get; set; } = string.Empty;
public string Name { get; set; } = string.Empty;
public string? Description { get; set; }
public string Subject { get; set; } = string.Empty;
public string Body { get; set; } = string.Empty;
public string Format { get; set; } = "text";
public string? ChannelType { get; set; }
public string Metadata { get; set; } = "{}";
public DateTimeOffset CreatedAt { get; set; }
public DateTimeOffset UpdatedAt { get; set; }
public string? CreatedBy { get; set; }
}
/// <summary>
/// Represents a notification delivery document (storage compatibility shim).
/// </summary>
public sealed class NotifyDeliveryDocument
{
public string Id { get; set; } = Guid.NewGuid().ToString("N");
public string TenantId { get; set; } = string.Empty;
public string? RuleId { get; set; }
public string? ChannelId { get; set; }
public string? TemplateId { get; set; }
public string Status { get; set; } = "pending";
public string? Error { get; set; }
public string Payload { get; set; } = "{}";
public string? RenderedSubject { get; set; }
public string? RenderedBody { get; set; }
public int RetryCount { get; set; }
public DateTimeOffset? NextRetryAt { get; set; }
public DateTimeOffset? SentAt { get; set; }
public DateTimeOffset CreatedAt { get; set; }
public DateTimeOffset UpdatedAt { get; set; }
}
/// <summary>
/// Represents a notification digest document (storage compatibility shim).
/// </summary>
public sealed class NotifyDigestDocument
{
public string Id { get; set; } = Guid.NewGuid().ToString("N");
public string TenantId { get; set; } = string.Empty;
public string? RuleId { get; set; }
public string DigestKey { get; set; } = string.Empty;
public DateTimeOffset WindowStart { get; set; }
public DateTimeOffset WindowEnd { get; set; }
public List<string> EventIds { get; set; } = new();
public int EventCount { get; set; }
public string Status { get; set; } = "collecting";
public DateTimeOffset CreatedAt { get; set; }
public DateTimeOffset UpdatedAt { get; set; }
}
/// <summary>
/// Represents a notification audit document (storage compatibility shim).
/// </summary>
public sealed class NotifyAuditDocument
{
public string Id { get; set; } = Guid.NewGuid().ToString("N");
public string TenantId { get; set; } = string.Empty;
public string? DeliveryId { get; set; }
public string Action { get; set; } = string.Empty;
public string? Actor { get; set; }
public string? Details { get; set; }
public DateTimeOffset Timestamp { get; set; }
}
/// <summary>
/// Represents an audit entry for notification actions (storage compatibility shim).
/// </summary>
public sealed class NotifyAuditEntryDocument
{
public string Id { get; set; } = Guid.NewGuid().ToString("N");
public string TenantId { get; set; } = string.Empty;
public string? EntityId { get; set; }
public string? EntityType { get; set; }
public string Action { get; set; } = string.Empty;
public string? Actor { get; set; }
public JsonObject? Payload { get; set; }
public DateTimeOffset Timestamp { get; set; }
}
/// <summary>
/// Represents an escalation policy document (storage compatibility shim).
/// </summary>
public sealed class NotifyEscalationPolicyDocument
{
public string Id { get; set; } = Guid.NewGuid().ToString("N");
public string TenantId { get; set; } = string.Empty;
public string Name { get; set; } = string.Empty;
public string? Description { get; set; }
public List<NotifyEscalationStep> Steps { get; set; } = new();
public DateTimeOffset CreatedAt { get; set; }
public DateTimeOffset UpdatedAt { get; set; }
}
/// <summary>
/// Represents an escalation step.
/// </summary>
public sealed class NotifyEscalationStep
{
public int Order { get; set; }
public TimeSpan Delay { get; set; }
public string? ChannelId { get; set; }
public List<string> Targets { get; set; } = new();
}
/// <summary>
/// Represents escalation state document (storage compatibility shim).
/// </summary>
public sealed class NotifyEscalationStateDocument
{
public string Id { get; set; } = Guid.NewGuid().ToString("N");
public string TenantId { get; set; } = string.Empty;
public string? DeliveryId { get; set; }
public string? PolicyId { get; set; }
public int CurrentStep { get; set; }
public string Status { get; set; } = "active";
public DateTimeOffset? AcknowledgedAt { get; set; }
public string? AcknowledgedBy { get; set; }
public DateTimeOffset? NextEscalationAt { get; set; }
public DateTimeOffset CreatedAt { get; set; }
public DateTimeOffset UpdatedAt { get; set; }
}
/// <summary>
/// Represents an on-call schedule document (storage compatibility shim).
/// </summary>
public sealed class NotifyOnCallScheduleDocument
{
public string Id { get; set; } = Guid.NewGuid().ToString("N");
public string TenantId { get; set; } = string.Empty;
public string Name { get; set; } = string.Empty;
public string? Description { get; set; }
public string? TimeZone { get; set; }
public List<NotifyOnCallRotation> Rotations { get; set; } = new();
public DateTimeOffset CreatedAt { get; set; }
public DateTimeOffset UpdatedAt { get; set; }
}
/// <summary>
/// Represents an on-call rotation.
/// </summary>
public sealed class NotifyOnCallRotation
{
public string? UserId { get; set; }
public DateTimeOffset Start { get; set; }
public DateTimeOffset End { get; set; }
}
/// <summary>
/// Represents a quiet hours configuration document (storage compatibility shim).
/// </summary>
public sealed class NotifyQuietHoursDocument
{
public string Id { get; set; } = Guid.NewGuid().ToString("N");
public string TenantId { get; set; } = string.Empty;
public string Name { get; set; } = string.Empty;
public string? TimeZone { get; set; }
public TimeSpan StartTime { get; set; }
public TimeSpan EndTime { get; set; }
public List<DayOfWeek> DaysOfWeek { get; set; } = new();
public bool Enabled { get; set; } = true;
public DateTimeOffset CreatedAt { get; set; }
public DateTimeOffset UpdatedAt { get; set; }
}
/// <summary>
/// Represents a maintenance window document (storage compatibility shim).
/// </summary>
public sealed class NotifyMaintenanceWindowDocument
{
public string Id { get; set; } = Guid.NewGuid().ToString("N");
public string TenantId { get; set; } = string.Empty;
public string Name { get; set; } = string.Empty;
public string? Description { get; set; }
public DateTimeOffset StartAt { get; set; }
public DateTimeOffset EndAt { get; set; }
public List<string>? AffectedServices { get; set; }
public string? CreatedBy { get; set; }
public DateTimeOffset CreatedAt { get; set; }
public DateTimeOffset UpdatedAt { get; set; }
}
/// <summary>
/// Represents an inbox message document (storage compatibility shim).
/// </summary>
public sealed class NotifyInboxDocument
{
public string Id { get; set; } = Guid.NewGuid().ToString("N");
public string TenantId { get; set; } = string.Empty;
public string UserId { get; set; } = string.Empty;
public string? DeliveryId { get; set; }
public string Subject { get; set; } = string.Empty;
public string Body { get; set; } = string.Empty;
public bool Read { get; set; }
public DateTimeOffset? ReadAt { get; set; }
public DateTimeOffset CreatedAt { get; set; }
}
/// <summary>
/// Inbox message representation for the storage shim (used by adapters).
/// </summary>
public sealed class NotifyInboxMessage
{
public string MessageId { get; set; } = Guid.NewGuid().ToString("N");
public string TenantId { get; set; } = string.Empty;
public string UserId { get; set; } = string.Empty;
public string Title { get; set; } = string.Empty;
public string Body { get; set; } = string.Empty;
public string? Summary { get; set; }
public string Category { get; set; } = "general";
public int Priority { get; set; }
public IReadOnlyDictionary<string, string>? Metadata { get; set; }
public DateTimeOffset CreatedAt { get; set; }
public DateTimeOffset? ExpiresAt { get; set; }
public DateTimeOffset? ReadAt { get; set; }
public string? SourceChannel { get; set; }
public string? DeliveryId { get; set; }
}

View File

@@ -0,0 +1,149 @@
using StellaOps.Notify.Persistence.InMemory.Documents;
namespace StellaOps.Notify.Persistence.InMemory.Repositories;
/// <summary>
/// Repository interface for notification channels (storage compatibility shim).
/// </summary>
public interface INotifyChannelRepository
{
Task<NotifyChannelDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default);
Task<NotifyChannelDocument?> GetByNameAsync(string tenantId, string name, CancellationToken cancellationToken = default);
Task<IReadOnlyList<NotifyChannelDocument>> GetAllAsync(string tenantId, bool? enabled = null, string? channelType = null, int limit = 100, int offset = 0, CancellationToken cancellationToken = default);
Task<NotifyChannelDocument> UpsertAsync(NotifyChannelDocument channel, CancellationToken cancellationToken = default);
Task<bool> DeleteAsync(string tenantId, string id, CancellationToken cancellationToken = default);
Task<IReadOnlyList<NotifyChannelDocument>> GetEnabledByTypeAsync(string tenantId, string channelType, CancellationToken cancellationToken = default);
}
/// <summary>
/// Repository interface for notification rules (storage compatibility shim).
/// </summary>
public interface INotifyRuleRepository
{
Task<NotifyRuleDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default);
Task<NotifyRuleDocument?> GetByNameAsync(string tenantId, string name, CancellationToken cancellationToken = default);
Task<IReadOnlyList<NotifyRuleDocument>> GetAllAsync(string tenantId, bool? enabled = null, int limit = 100, int offset = 0, CancellationToken cancellationToken = default);
Task<NotifyRuleDocument> UpsertAsync(NotifyRuleDocument rule, CancellationToken cancellationToken = default);
Task<bool> DeleteAsync(string tenantId, string id, CancellationToken cancellationToken = default);
Task<IReadOnlyList<NotifyRuleDocument>> GetEnabledAsync(string tenantId, CancellationToken cancellationToken = default);
}
/// <summary>
/// Repository interface for notification templates (storage compatibility shim).
/// </summary>
public interface INotifyTemplateRepository
{
Task<NotifyTemplateDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default);
Task<NotifyTemplateDocument?> GetByNameAsync(string tenantId, string name, CancellationToken cancellationToken = default);
Task<IReadOnlyList<NotifyTemplateDocument>> GetAllAsync(string tenantId, int limit = 100, int offset = 0, CancellationToken cancellationToken = default);
Task<NotifyTemplateDocument> UpsertAsync(NotifyTemplateDocument template, CancellationToken cancellationToken = default);
Task<bool> DeleteAsync(string tenantId, string id, CancellationToken cancellationToken = default);
}
/// <summary>
/// Repository interface for notification deliveries (storage compatibility shim).
/// </summary>
public interface INotifyDeliveryRepository
{
Task<NotifyDeliveryDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default);
Task<IReadOnlyList<NotifyDeliveryDocument>> GetByRuleAsync(string tenantId, string ruleId, int limit = 100, int offset = 0, CancellationToken cancellationToken = default);
Task<NotifyDeliveryDocument> UpsertAsync(NotifyDeliveryDocument delivery, CancellationToken cancellationToken = default);
Task<bool> UpdateStatusAsync(string tenantId, string id, string status, string? error = null, CancellationToken cancellationToken = default);
Task<IReadOnlyList<NotifyDeliveryDocument>> GetPendingAsync(string tenantId, int limit = 100, CancellationToken cancellationToken = default);
}
/// <summary>
/// Repository interface for notification digests (storage compatibility shim).
/// </summary>
public interface INotifyDigestRepository
{
Task<NotifyDigestDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default);
Task<NotifyDigestDocument> UpsertAsync(NotifyDigestDocument digest, CancellationToken cancellationToken = default);
Task<IReadOnlyList<NotifyDigestDocument>> GetPendingAsync(string tenantId, DateTimeOffset before, int limit = 100, CancellationToken cancellationToken = default);
}
/// <summary>
/// Repository interface for notification audit entries (storage compatibility shim).
/// </summary>
public interface INotifyAuditRepository
{
Task InsertAsync(NotifyAuditDocument audit, CancellationToken cancellationToken = default);
Task<IReadOnlyList<NotifyAuditDocument>> GetByDeliveryAsync(string tenantId, string deliveryId, int limit = 100, CancellationToken cancellationToken = default);
Task<IReadOnlyList<NotifyAuditDocument>> GetRecentAsync(string tenantId, int limit = 100, CancellationToken cancellationToken = default);
}
/// <summary>
/// Repository interface for distributed locks (storage compatibility shim).
/// </summary>
public interface INotifyLockRepository
{
Task<bool> TryAcquireAsync(string lockKey, string owner, TimeSpan ttl, CancellationToken cancellationToken = default);
Task<bool> ReleaseAsync(string lockKey, string owner, CancellationToken cancellationToken = default);
Task<bool> ExtendAsync(string lockKey, string owner, TimeSpan ttl, CancellationToken cancellationToken = default);
}
/// <summary>
/// Repository interface for escalation policies (storage compatibility shim).
/// </summary>
public interface INotifyEscalationPolicyRepository
{
Task<NotifyEscalationPolicyDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default);
Task<IReadOnlyList<NotifyEscalationPolicyDocument>> GetAllAsync(string tenantId, int limit = 100, CancellationToken cancellationToken = default);
Task<NotifyEscalationPolicyDocument> UpsertAsync(NotifyEscalationPolicyDocument policy, CancellationToken cancellationToken = default);
}
/// <summary>
/// Repository interface for escalation state (storage compatibility shim).
/// </summary>
public interface INotifyEscalationStateRepository
{
Task<NotifyEscalationStateDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default);
Task<NotifyEscalationStateDocument> UpsertAsync(NotifyEscalationStateDocument state, CancellationToken cancellationToken = default);
Task<IReadOnlyList<NotifyEscalationStateDocument>> GetActiveAsync(string tenantId, CancellationToken cancellationToken = default);
}
/// <summary>
/// Repository interface for on-call schedules (storage compatibility shim).
/// </summary>
public interface INotifyOnCallScheduleRepository
{
Task<NotifyOnCallScheduleDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default);
Task<IReadOnlyList<NotifyOnCallScheduleDocument>> GetAllAsync(string tenantId, int limit = 100, CancellationToken cancellationToken = default);
Task<NotifyOnCallScheduleDocument> UpsertAsync(NotifyOnCallScheduleDocument schedule, CancellationToken cancellationToken = default);
Task<NotifyOnCallScheduleDocument?> GetCurrentAsync(string tenantId, DateTimeOffset at, CancellationToken cancellationToken = default);
}
/// <summary>
/// Repository interface for quiet hours configuration (storage compatibility shim).
/// </summary>
public interface INotifyQuietHoursRepository
{
Task<NotifyQuietHoursDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default);
Task<IReadOnlyList<NotifyQuietHoursDocument>> GetAllAsync(string tenantId, CancellationToken cancellationToken = default);
Task<NotifyQuietHoursDocument> UpsertAsync(NotifyQuietHoursDocument quietHours, CancellationToken cancellationToken = default);
Task<bool> DeleteAsync(string tenantId, string id, CancellationToken cancellationToken = default);
}
/// <summary>
/// Repository interface for maintenance windows (storage compatibility shim).
/// </summary>
public interface INotifyMaintenanceWindowRepository
{
Task<NotifyMaintenanceWindowDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default);
Task<IReadOnlyList<NotifyMaintenanceWindowDocument>> GetAllAsync(string tenantId, CancellationToken cancellationToken = default);
Task<IReadOnlyList<NotifyMaintenanceWindowDocument>> GetActiveAsync(string tenantId, DateTimeOffset at, CancellationToken cancellationToken = default);
Task<NotifyMaintenanceWindowDocument> UpsertAsync(NotifyMaintenanceWindowDocument window, CancellationToken cancellationToken = default);
Task<bool> DeleteAsync(string tenantId, string id, CancellationToken cancellationToken = default);
}
/// <summary>
/// Repository interface for inbox messages (storage compatibility shim).
/// </summary>
public interface INotifyInboxRepository
{
Task<NotifyInboxDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default);
Task<IReadOnlyList<NotifyInboxDocument>> GetByUserAsync(string tenantId, string userId, bool? read = null, int limit = 100, CancellationToken cancellationToken = default);
Task<NotifyInboxDocument> InsertAsync(NotifyInboxDocument message, CancellationToken cancellationToken = default);
Task<bool> MarkReadAsync(string tenantId, string id, CancellationToken cancellationToken = default);
Task<bool> DeleteAsync(string tenantId, string id, CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,516 @@
using System.Collections.Concurrent;
using StellaOps.Notify.Persistence.InMemory.Documents;
namespace StellaOps.Notify.Persistence.InMemory.Repositories;
/// <summary>
/// In-memory implementation of channel repository for development/testing.
/// </summary>
public sealed class NotifyChannelRepositoryAdapter : INotifyChannelRepository
{
private readonly ConcurrentDictionary<string, NotifyChannelDocument> _channels = new(StringComparer.OrdinalIgnoreCase);
public Task<NotifyChannelDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
_channels.TryGetValue(key, out var doc);
return Task.FromResult(doc);
}
public Task<NotifyChannelDocument?> GetByNameAsync(string tenantId, string name, CancellationToken cancellationToken = default)
{
var doc = _channels.Values.FirstOrDefault(c => c.TenantId == tenantId && c.Name == name);
return Task.FromResult(doc);
}
public Task<IReadOnlyList<NotifyChannelDocument>> GetAllAsync(string tenantId, bool? enabled = null, string? channelType = null, int limit = 100, int offset = 0, CancellationToken cancellationToken = default)
{
var query = _channels.Values.Where(c => c.TenantId == tenantId);
if (enabled.HasValue) query = query.Where(c => c.Enabled == enabled.Value);
if (!string.IsNullOrEmpty(channelType)) query = query.Where(c => c.ChannelType == channelType);
var result = query.Skip(offset).Take(limit).ToList();
return Task.FromResult<IReadOnlyList<NotifyChannelDocument>>(result);
}
public Task<NotifyChannelDocument> UpsertAsync(NotifyChannelDocument channel, CancellationToken cancellationToken = default)
{
channel.UpdatedAt = DateTimeOffset.UtcNow;
var key = $"{channel.TenantId}:{channel.Id}";
_channels[key] = channel;
return Task.FromResult(channel);
}
public Task<bool> DeleteAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
return Task.FromResult(_channels.TryRemove(key, out _));
}
public Task<IReadOnlyList<NotifyChannelDocument>> GetEnabledByTypeAsync(string tenantId, string channelType, CancellationToken cancellationToken = default)
{
var result = _channels.Values.Where(c => c.TenantId == tenantId && c.Enabled && c.ChannelType == channelType).ToList();
return Task.FromResult<IReadOnlyList<NotifyChannelDocument>>(result);
}
}
/// <summary>
/// In-memory implementation of rule repository for development/testing.
/// </summary>
public sealed class NotifyRuleRepositoryAdapter : INotifyRuleRepository
{
private readonly ConcurrentDictionary<string, NotifyRuleDocument> _rules = new(StringComparer.OrdinalIgnoreCase);
public Task<NotifyRuleDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
_rules.TryGetValue(key, out var doc);
return Task.FromResult(doc);
}
public Task<NotifyRuleDocument?> GetByNameAsync(string tenantId, string name, CancellationToken cancellationToken = default)
{
var doc = _rules.Values.FirstOrDefault(r => r.TenantId == tenantId && r.Name == name);
return Task.FromResult(doc);
}
public Task<IReadOnlyList<NotifyRuleDocument>> GetAllAsync(string tenantId, bool? enabled = null, int limit = 100, int offset = 0, CancellationToken cancellationToken = default)
{
var query = _rules.Values.Where(r => r.TenantId == tenantId);
if (enabled.HasValue) query = query.Where(r => r.Enabled == enabled.Value);
var result = query.OrderBy(r => r.Priority).Skip(offset).Take(limit).ToList();
return Task.FromResult<IReadOnlyList<NotifyRuleDocument>>(result);
}
public Task<NotifyRuleDocument> UpsertAsync(NotifyRuleDocument rule, CancellationToken cancellationToken = default)
{
rule.UpdatedAt = DateTimeOffset.UtcNow;
var key = $"{rule.TenantId}:{rule.Id}";
_rules[key] = rule;
return Task.FromResult(rule);
}
public Task<bool> DeleteAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
return Task.FromResult(_rules.TryRemove(key, out _));
}
public Task<IReadOnlyList<NotifyRuleDocument>> GetEnabledAsync(string tenantId, CancellationToken cancellationToken = default)
{
var result = _rules.Values.Where(r => r.TenantId == tenantId && r.Enabled).OrderBy(r => r.Priority).ToList();
return Task.FromResult<IReadOnlyList<NotifyRuleDocument>>(result);
}
}
/// <summary>
/// In-memory implementation of template repository for development/testing.
/// </summary>
public sealed class NotifyTemplateRepositoryAdapter : INotifyTemplateRepository
{
private readonly ConcurrentDictionary<string, NotifyTemplateDocument> _templates = new(StringComparer.OrdinalIgnoreCase);
public Task<NotifyTemplateDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
_templates.TryGetValue(key, out var doc);
return Task.FromResult(doc);
}
public Task<NotifyTemplateDocument?> GetByNameAsync(string tenantId, string name, CancellationToken cancellationToken = default)
{
var doc = _templates.Values.FirstOrDefault(t => t.TenantId == tenantId && t.Name == name);
return Task.FromResult(doc);
}
public Task<IReadOnlyList<NotifyTemplateDocument>> GetAllAsync(string tenantId, int limit = 100, int offset = 0, CancellationToken cancellationToken = default)
{
var result = _templates.Values.Where(t => t.TenantId == tenantId).Skip(offset).Take(limit).ToList();
return Task.FromResult<IReadOnlyList<NotifyTemplateDocument>>(result);
}
public Task<NotifyTemplateDocument> UpsertAsync(NotifyTemplateDocument template, CancellationToken cancellationToken = default)
{
template.UpdatedAt = DateTimeOffset.UtcNow;
var key = $"{template.TenantId}:{template.Id}";
_templates[key] = template;
return Task.FromResult(template);
}
public Task<bool> DeleteAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
return Task.FromResult(_templates.TryRemove(key, out _));
}
}
/// <summary>
/// In-memory implementation of delivery repository for development/testing.
/// </summary>
public sealed class NotifyDeliveryRepositoryAdapter : INotifyDeliveryRepository
{
private readonly ConcurrentDictionary<string, NotifyDeliveryDocument> _deliveries = new(StringComparer.OrdinalIgnoreCase);
public Task<NotifyDeliveryDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
_deliveries.TryGetValue(key, out var doc);
return Task.FromResult(doc);
}
public Task<IReadOnlyList<NotifyDeliveryDocument>> GetByRuleAsync(string tenantId, string ruleId, int limit = 100, int offset = 0, CancellationToken cancellationToken = default)
{
var result = _deliveries.Values.Where(d => d.TenantId == tenantId && d.RuleId == ruleId)
.OrderByDescending(d => d.CreatedAt).Skip(offset).Take(limit).ToList();
return Task.FromResult<IReadOnlyList<NotifyDeliveryDocument>>(result);
}
public Task<NotifyDeliveryDocument> UpsertAsync(NotifyDeliveryDocument delivery, CancellationToken cancellationToken = default)
{
delivery.UpdatedAt = DateTimeOffset.UtcNow;
var key = $"{delivery.TenantId}:{delivery.Id}";
_deliveries[key] = delivery;
return Task.FromResult(delivery);
}
public Task<bool> UpdateStatusAsync(string tenantId, string id, string status, string? error = null, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
if (_deliveries.TryGetValue(key, out var doc))
{
doc.Status = status;
doc.Error = error;
doc.UpdatedAt = DateTimeOffset.UtcNow;
return Task.FromResult(true);
}
return Task.FromResult(false);
}
public Task<IReadOnlyList<NotifyDeliveryDocument>> GetPendingAsync(string tenantId, int limit = 100, CancellationToken cancellationToken = default)
{
var result = _deliveries.Values.Where(d => d.TenantId == tenantId && d.Status == "pending")
.OrderBy(d => d.CreatedAt).Take(limit).ToList();
return Task.FromResult<IReadOnlyList<NotifyDeliveryDocument>>(result);
}
}
/// <summary>
/// In-memory implementation of digest repository for development/testing.
/// </summary>
public sealed class NotifyDigestRepositoryAdapter : INotifyDigestRepository
{
private readonly ConcurrentDictionary<string, NotifyDigestDocument> _digests = new(StringComparer.OrdinalIgnoreCase);
public Task<NotifyDigestDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
_digests.TryGetValue(key, out var doc);
return Task.FromResult(doc);
}
public Task<NotifyDigestDocument> UpsertAsync(NotifyDigestDocument digest, CancellationToken cancellationToken = default)
{
digest.UpdatedAt = DateTimeOffset.UtcNow;
var key = $"{digest.TenantId}:{digest.Id}";
_digests[key] = digest;
return Task.FromResult(digest);
}
public Task<IReadOnlyList<NotifyDigestDocument>> GetPendingAsync(string tenantId, DateTimeOffset before, int limit = 100, CancellationToken cancellationToken = default)
{
var result = _digests.Values.Where(d => d.TenantId == tenantId && d.Status == "collecting" && d.WindowEnd <= before)
.OrderBy(d => d.WindowEnd).Take(limit).ToList();
return Task.FromResult<IReadOnlyList<NotifyDigestDocument>>(result);
}
}
/// <summary>
/// In-memory implementation of audit repository for development/testing.
/// </summary>
public sealed class NotifyAuditRepositoryAdapter : INotifyAuditRepository
{
private readonly ConcurrentBag<NotifyAuditDocument> _audits = new();
public Task InsertAsync(NotifyAuditDocument audit, CancellationToken cancellationToken = default)
{
_audits.Add(audit);
return Task.CompletedTask;
}
public Task<IReadOnlyList<NotifyAuditDocument>> GetByDeliveryAsync(string tenantId, string deliveryId, int limit = 100, CancellationToken cancellationToken = default)
{
var result = _audits.Where(a => a.TenantId == tenantId && a.DeliveryId == deliveryId)
.OrderByDescending(a => a.Timestamp).Take(limit).ToList();
return Task.FromResult<IReadOnlyList<NotifyAuditDocument>>(result);
}
public Task<IReadOnlyList<NotifyAuditDocument>> GetRecentAsync(string tenantId, int limit = 100, CancellationToken cancellationToken = default)
{
var result = _audits.Where(a => a.TenantId == tenantId)
.OrderByDescending(a => a.Timestamp).Take(limit).ToList();
return Task.FromResult<IReadOnlyList<NotifyAuditDocument>>(result);
}
}
/// <summary>
/// In-memory implementation of lock repository for development/testing.
/// </summary>
public sealed class NotifyLockRepositoryAdapter : INotifyLockRepository
{
private readonly ConcurrentDictionary<string, (string Owner, DateTimeOffset ExpiresAt)> _locks = new(StringComparer.OrdinalIgnoreCase);
public Task<bool> TryAcquireAsync(string lockKey, string owner, TimeSpan ttl, CancellationToken cancellationToken = default)
{
var now = DateTimeOffset.UtcNow;
// Clean up expired locks
foreach (var key in _locks.Keys.ToList())
{
if (_locks.TryGetValue(key, out var value) && value.ExpiresAt <= now)
{
_locks.TryRemove(key, out _);
}
}
var expiresAt = now + ttl;
return Task.FromResult(_locks.TryAdd(lockKey, (owner, expiresAt)));
}
public Task<bool> ReleaseAsync(string lockKey, string owner, CancellationToken cancellationToken = default)
{
if (_locks.TryGetValue(lockKey, out var value) && value.Owner == owner)
{
return Task.FromResult(_locks.TryRemove(lockKey, out _));
}
return Task.FromResult(false);
}
public Task<bool> ExtendAsync(string lockKey, string owner, TimeSpan ttl, CancellationToken cancellationToken = default)
{
if (_locks.TryGetValue(lockKey, out var value) && value.Owner == owner)
{
var newExpiry = DateTimeOffset.UtcNow + ttl;
_locks[lockKey] = (owner, newExpiry);
return Task.FromResult(true);
}
return Task.FromResult(false);
}
}
/// <summary>
/// In-memory implementation of escalation policy repository for development/testing.
/// </summary>
public sealed class NotifyEscalationPolicyRepositoryAdapter : INotifyEscalationPolicyRepository
{
private readonly ConcurrentDictionary<string, NotifyEscalationPolicyDocument> _policies = new(StringComparer.OrdinalIgnoreCase);
public Task<NotifyEscalationPolicyDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
_policies.TryGetValue(key, out var doc);
return Task.FromResult(doc);
}
public Task<IReadOnlyList<NotifyEscalationPolicyDocument>> GetAllAsync(string tenantId, int limit = 100, CancellationToken cancellationToken = default)
{
var result = _policies.Values.Where(p => p.TenantId == tenantId).Take(limit).ToList();
return Task.FromResult<IReadOnlyList<NotifyEscalationPolicyDocument>>(result);
}
public Task<NotifyEscalationPolicyDocument> UpsertAsync(NotifyEscalationPolicyDocument policy, CancellationToken cancellationToken = default)
{
policy.UpdatedAt = DateTimeOffset.UtcNow;
var key = $"{policy.TenantId}:{policy.Id}";
_policies[key] = policy;
return Task.FromResult(policy);
}
}
/// <summary>
/// In-memory implementation of escalation state repository for development/testing.
/// </summary>
public sealed class NotifyEscalationStateRepositoryAdapter : INotifyEscalationStateRepository
{
private readonly ConcurrentDictionary<string, NotifyEscalationStateDocument> _states = new(StringComparer.OrdinalIgnoreCase);
public Task<NotifyEscalationStateDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
_states.TryGetValue(key, out var doc);
return Task.FromResult(doc);
}
public Task<NotifyEscalationStateDocument> UpsertAsync(NotifyEscalationStateDocument state, CancellationToken cancellationToken = default)
{
state.UpdatedAt = DateTimeOffset.UtcNow;
var key = $"{state.TenantId}:{state.Id}";
_states[key] = state;
return Task.FromResult(state);
}
public Task<IReadOnlyList<NotifyEscalationStateDocument>> GetActiveAsync(string tenantId, CancellationToken cancellationToken = default)
{
var result = _states.Values.Where(s => s.TenantId == tenantId && s.Status == "active").ToList();
return Task.FromResult<IReadOnlyList<NotifyEscalationStateDocument>>(result);
}
}
/// <summary>
/// In-memory implementation of on-call schedule repository for development/testing.
/// </summary>
public sealed class NotifyOnCallScheduleRepositoryAdapter : INotifyOnCallScheduleRepository
{
private readonly ConcurrentDictionary<string, NotifyOnCallScheduleDocument> _schedules = new(StringComparer.OrdinalIgnoreCase);
public Task<NotifyOnCallScheduleDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
_schedules.TryGetValue(key, out var doc);
return Task.FromResult(doc);
}
public Task<IReadOnlyList<NotifyOnCallScheduleDocument>> GetAllAsync(string tenantId, int limit = 100, CancellationToken cancellationToken = default)
{
var result = _schedules.Values.Where(s => s.TenantId == tenantId).Take(limit).ToList();
return Task.FromResult<IReadOnlyList<NotifyOnCallScheduleDocument>>(result);
}
public Task<NotifyOnCallScheduleDocument> UpsertAsync(NotifyOnCallScheduleDocument schedule, CancellationToken cancellationToken = default)
{
schedule.UpdatedAt = DateTimeOffset.UtcNow;
var key = $"{schedule.TenantId}:{schedule.Id}";
_schedules[key] = schedule;
return Task.FromResult(schedule);
}
public Task<NotifyOnCallScheduleDocument?> GetCurrentAsync(string tenantId, DateTimeOffset at, CancellationToken cancellationToken = default)
{
var doc = _schedules.Values.FirstOrDefault(s =>
s.TenantId == tenantId &&
s.Rotations.Any(r => r.Start <= at && r.End > at));
return Task.FromResult(doc);
}
}
/// <summary>
/// In-memory implementation of quiet hours repository for development/testing.
/// </summary>
public sealed class NotifyQuietHoursRepositoryAdapter : INotifyQuietHoursRepository
{
private readonly ConcurrentDictionary<string, NotifyQuietHoursDocument> _quietHours = new(StringComparer.OrdinalIgnoreCase);
public Task<NotifyQuietHoursDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
_quietHours.TryGetValue(key, out var doc);
return Task.FromResult(doc);
}
public Task<IReadOnlyList<NotifyQuietHoursDocument>> GetAllAsync(string tenantId, CancellationToken cancellationToken = default)
{
var result = _quietHours.Values.Where(q => q.TenantId == tenantId).ToList();
return Task.FromResult<IReadOnlyList<NotifyQuietHoursDocument>>(result);
}
public Task<NotifyQuietHoursDocument> UpsertAsync(NotifyQuietHoursDocument quietHours, CancellationToken cancellationToken = default)
{
quietHours.UpdatedAt = DateTimeOffset.UtcNow;
var key = $"{quietHours.TenantId}:{quietHours.Id}";
_quietHours[key] = quietHours;
return Task.FromResult(quietHours);
}
public Task<bool> DeleteAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
return Task.FromResult(_quietHours.TryRemove(key, out _));
}
}
/// <summary>
/// In-memory implementation of maintenance window repository for development/testing.
/// </summary>
public sealed class NotifyMaintenanceWindowRepositoryAdapter : INotifyMaintenanceWindowRepository
{
private readonly ConcurrentDictionary<string, NotifyMaintenanceWindowDocument> _windows = new(StringComparer.OrdinalIgnoreCase);
public Task<NotifyMaintenanceWindowDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
_windows.TryGetValue(key, out var doc);
return Task.FromResult(doc);
}
public Task<IReadOnlyList<NotifyMaintenanceWindowDocument>> GetAllAsync(string tenantId, CancellationToken cancellationToken = default)
{
var result = _windows.Values.Where(w => w.TenantId == tenantId).ToList();
return Task.FromResult<IReadOnlyList<NotifyMaintenanceWindowDocument>>(result);
}
public Task<IReadOnlyList<NotifyMaintenanceWindowDocument>> GetActiveAsync(string tenantId, DateTimeOffset at, CancellationToken cancellationToken = default)
{
var result = _windows.Values.Where(w => w.TenantId == tenantId && w.StartAt <= at && w.EndAt > at).ToList();
return Task.FromResult<IReadOnlyList<NotifyMaintenanceWindowDocument>>(result);
}
public Task<NotifyMaintenanceWindowDocument> UpsertAsync(NotifyMaintenanceWindowDocument window, CancellationToken cancellationToken = default)
{
window.UpdatedAt = DateTimeOffset.UtcNow;
var key = $"{window.TenantId}:{window.Id}";
_windows[key] = window;
return Task.FromResult(window);
}
public Task<bool> DeleteAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
return Task.FromResult(_windows.TryRemove(key, out _));
}
}
/// <summary>
/// In-memory implementation of inbox repository for development/testing.
/// </summary>
public sealed class NotifyInboxRepositoryAdapter : INotifyInboxRepository
{
private readonly ConcurrentDictionary<string, NotifyInboxDocument> _inbox = new(StringComparer.OrdinalIgnoreCase);
public Task<NotifyInboxDocument?> GetByIdAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
_inbox.TryGetValue(key, out var doc);
return Task.FromResult(doc);
}
public Task<IReadOnlyList<NotifyInboxDocument>> GetByUserAsync(string tenantId, string userId, bool? read = null, int limit = 100, CancellationToken cancellationToken = default)
{
var query = _inbox.Values.Where(i => i.TenantId == tenantId && i.UserId == userId);
if (read.HasValue) query = query.Where(i => i.Read == read.Value);
var result = query.OrderByDescending(i => i.CreatedAt).Take(limit).ToList();
return Task.FromResult<IReadOnlyList<NotifyInboxDocument>>(result);
}
public Task<NotifyInboxDocument> InsertAsync(NotifyInboxDocument message, CancellationToken cancellationToken = default)
{
var key = $"{message.TenantId}:{message.Id}";
_inbox[key] = message;
return Task.FromResult(message);
}
public Task<bool> MarkReadAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
if (_inbox.TryGetValue(key, out var doc))
{
doc.Read = true;
doc.ReadAt = DateTimeOffset.UtcNow;
return Task.FromResult(true);
}
return Task.FromResult(false);
}
public Task<bool> DeleteAsync(string tenantId, string id, CancellationToken cancellationToken = default)
{
var key = $"{tenantId}:{id}";
return Task.FromResult(_inbox.TryRemove(key, out _));
}
}

View File

@@ -0,0 +1,578 @@
-- Notify Schema Migration 001: Initial Schema (Compacted)
-- Consolidated from migrations 001, 010, 011, 011b for 1.0.0 release
-- Creates the notify schema for notifications, channels, delivery tracking,
-- incidents, escalation, and on-call management
-- ============================================================================
-- Schema Creation
-- ============================================================================
CREATE SCHEMA IF NOT EXISTS notify;
CREATE SCHEMA IF NOT EXISTS notify_app;
-- ============================================================================
-- Enum Types
-- ============================================================================
DO $$ BEGIN
CREATE TYPE notify.channel_type AS ENUM (
'email', 'slack', 'teams', 'webhook', 'pagerduty', 'opsgenie'
);
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
DO $$ BEGIN
CREATE TYPE notify.delivery_status AS ENUM (
'pending', 'queued', 'sending', 'sent', 'delivered', 'failed', 'bounced'
);
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
-- ============================================================================
-- Tenant Context Helper Function
-- ============================================================================
CREATE OR REPLACE FUNCTION notify_app.require_current_tenant()
RETURNS TEXT
LANGUAGE plpgsql STABLE SECURITY DEFINER
AS $$
DECLARE
v_tenant TEXT;
BEGIN
v_tenant := current_setting('app.tenant_id', true);
IF v_tenant IS NULL OR v_tenant = '' THEN
RAISE EXCEPTION 'app.tenant_id session variable not set'
USING HINT = 'Set via: SELECT set_config(''app.tenant_id'', ''<tenant>'', false)',
ERRCODE = 'P0001';
END IF;
RETURN v_tenant;
END;
$$;
REVOKE ALL ON FUNCTION notify_app.require_current_tenant() FROM PUBLIC;
-- ============================================================================
-- Update Timestamp Function
-- ============================================================================
CREATE OR REPLACE FUNCTION notify.update_updated_at()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- ============================================================================
-- Channels Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS notify.channels (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
channel_type notify.channel_type NOT NULL,
enabled BOOLEAN NOT NULL DEFAULT TRUE,
config JSONB NOT NULL DEFAULT '{}',
credentials JSONB,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
created_by TEXT,
UNIQUE(tenant_id, name)
);
CREATE INDEX idx_channels_tenant ON notify.channels(tenant_id);
CREATE INDEX idx_channels_type ON notify.channels(tenant_id, channel_type);
CREATE TRIGGER trg_channels_updated_at
BEFORE UPDATE ON notify.channels
FOR EACH ROW EXECUTE FUNCTION notify.update_updated_at();
-- ============================================================================
-- Rules Table (Notification Routing Rules)
-- ============================================================================
CREATE TABLE IF NOT EXISTS notify.rules (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
description TEXT,
enabled BOOLEAN NOT NULL DEFAULT TRUE,
priority INT NOT NULL DEFAULT 0,
event_types TEXT[] NOT NULL DEFAULT '{}',
filter JSONB NOT NULL DEFAULT '{}',
channel_ids UUID[] NOT NULL DEFAULT '{}',
template_id UUID,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, name)
);
CREATE INDEX idx_rules_tenant ON notify.rules(tenant_id);
CREATE INDEX idx_rules_enabled ON notify.rules(tenant_id, enabled, priority DESC);
CREATE TRIGGER trg_rules_updated_at
BEFORE UPDATE ON notify.rules
FOR EACH ROW EXECUTE FUNCTION notify.update_updated_at();
-- ============================================================================
-- Templates Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS notify.templates (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
channel_type notify.channel_type NOT NULL,
subject_template TEXT,
body_template TEXT NOT NULL,
locale TEXT NOT NULL DEFAULT 'en',
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, name, channel_type, locale)
);
CREATE INDEX idx_templates_tenant ON notify.templates(tenant_id);
CREATE TRIGGER trg_templates_updated_at
BEFORE UPDATE ON notify.templates
FOR EACH ROW EXECUTE FUNCTION notify.update_updated_at();
-- ============================================================================
-- Deliveries Table (PARTITIONED by created_at)
-- ============================================================================
-- Note: Foreign key constraints not supported on partitioned tables;
-- application-level integrity checks are used instead.
CREATE TABLE IF NOT EXISTS notify.deliveries (
id UUID NOT NULL DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
channel_id UUID NOT NULL,
rule_id UUID,
template_id UUID,
status notify.delivery_status NOT NULL DEFAULT 'pending',
recipient TEXT NOT NULL,
subject TEXT,
body TEXT,
event_type TEXT NOT NULL,
event_payload JSONB NOT NULL DEFAULT '{}',
attempt INT NOT NULL DEFAULT 0,
max_attempts INT NOT NULL DEFAULT 3,
next_retry_at TIMESTAMPTZ,
error_message TEXT,
external_id TEXT,
correlation_id TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
queued_at TIMESTAMPTZ,
sent_at TIMESTAMPTZ,
delivered_at TIMESTAMPTZ,
failed_at TIMESTAMPTZ,
PRIMARY KEY (id, created_at)
) PARTITION BY RANGE (created_at);
-- Create default partition to catch any rows outside defined ranges
CREATE TABLE IF NOT EXISTS notify.deliveries_default
PARTITION OF notify.deliveries DEFAULT;
-- Indexes on partitioned deliveries table
CREATE INDEX ix_deliveries_part_tenant ON notify.deliveries (tenant_id);
CREATE INDEX ix_deliveries_part_status ON notify.deliveries (tenant_id, status);
CREATE INDEX ix_deliveries_part_pending ON notify.deliveries (status, next_retry_at)
WHERE status IN ('pending', 'queued');
CREATE INDEX ix_deliveries_part_channel ON notify.deliveries (channel_id);
CREATE INDEX ix_deliveries_part_correlation ON notify.deliveries (correlation_id)
WHERE correlation_id IS NOT NULL;
CREATE INDEX ix_deliveries_part_created ON notify.deliveries (tenant_id, created_at DESC);
CREATE INDEX ix_deliveries_part_created_brin ON notify.deliveries USING BRIN (created_at)
WITH (pages_per_range = 32);
CREATE INDEX ix_deliveries_part_external_id ON notify.deliveries (external_id)
WHERE external_id IS NOT NULL;
COMMENT ON TABLE notify.deliveries IS
'Notification deliveries. Partitioned monthly by created_at.';
-- ============================================================================
-- Partition Management Function
-- ============================================================================
CREATE OR REPLACE FUNCTION notify.ensure_delivery_partitions()
RETURNS void
LANGUAGE plpgsql
AS $$
DECLARE
v_start_date DATE;
v_end_date DATE;
v_partition_name TEXT;
v_from_date DATE;
v_to_date DATE;
BEGIN
-- Create partitions for 3 months back and 4 months ahead
v_start_date := date_trunc('month', NOW() - INTERVAL '3 months')::DATE;
v_end_date := date_trunc('month', NOW() + INTERVAL '4 months')::DATE;
v_from_date := v_start_date;
WHILE v_from_date < v_end_date LOOP
v_to_date := v_from_date + INTERVAL '1 month';
v_partition_name := 'deliveries_' || to_char(v_from_date, 'YYYY_MM');
-- Check if partition exists
IF NOT EXISTS (
SELECT 1 FROM pg_class c
JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE n.nspname = 'notify'
AND c.relname = v_partition_name
) THEN
EXECUTE format(
'CREATE TABLE notify.%I PARTITION OF notify.deliveries FOR VALUES FROM (%L) TO (%L)',
v_partition_name,
v_from_date,
v_to_date
);
RAISE NOTICE 'Created partition: notify.%', v_partition_name;
END IF;
v_from_date := v_to_date;
END LOOP;
END;
$$;
-- Create initial partitions
SELECT notify.ensure_delivery_partitions();
-- ============================================================================
-- Digests Table (Aggregated Notifications)
-- ============================================================================
CREATE TABLE IF NOT EXISTS notify.digests (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
channel_id UUID NOT NULL REFERENCES notify.channels(id),
recipient TEXT NOT NULL,
digest_key TEXT NOT NULL,
event_count INT NOT NULL DEFAULT 0,
events JSONB NOT NULL DEFAULT '[]',
status TEXT NOT NULL DEFAULT 'collecting' CHECK (status IN ('collecting', 'sending', 'sent')),
collect_until TIMESTAMPTZ NOT NULL,
sent_at TIMESTAMPTZ,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, channel_id, recipient, digest_key)
);
CREATE INDEX idx_digests_tenant ON notify.digests(tenant_id);
CREATE INDEX idx_digests_collect ON notify.digests(status, collect_until)
WHERE status = 'collecting';
CREATE TRIGGER trg_digests_updated_at
BEFORE UPDATE ON notify.digests
FOR EACH ROW EXECUTE FUNCTION notify.update_updated_at();
-- ============================================================================
-- Quiet Hours Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS notify.quiet_hours (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
user_id UUID,
channel_id UUID REFERENCES notify.channels(id),
start_time TIME NOT NULL,
end_time TIME NOT NULL,
timezone TEXT NOT NULL DEFAULT 'UTC',
days_of_week INT[] NOT NULL DEFAULT '{0,1,2,3,4,5,6}',
enabled BOOLEAN NOT NULL DEFAULT TRUE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_quiet_hours_tenant ON notify.quiet_hours(tenant_id);
-- ============================================================================
-- Maintenance Windows Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS notify.maintenance_windows (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
description TEXT,
start_at TIMESTAMPTZ NOT NULL,
end_at TIMESTAMPTZ NOT NULL,
suppress_channels UUID[],
suppress_event_types TEXT[],
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
created_by TEXT,
UNIQUE(tenant_id, name)
);
CREATE INDEX idx_maintenance_windows_tenant ON notify.maintenance_windows(tenant_id);
CREATE INDEX idx_maintenance_windows_active ON notify.maintenance_windows(start_at, end_at);
-- ============================================================================
-- Escalation Policies Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS notify.escalation_policies (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
description TEXT,
enabled BOOLEAN NOT NULL DEFAULT TRUE,
steps JSONB NOT NULL DEFAULT '[]',
repeat_count INT NOT NULL DEFAULT 0,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, name)
);
CREATE INDEX idx_escalation_policies_tenant ON notify.escalation_policies(tenant_id);
CREATE TRIGGER trg_escalation_policies_updated_at
BEFORE UPDATE ON notify.escalation_policies
FOR EACH ROW EXECUTE FUNCTION notify.update_updated_at();
-- ============================================================================
-- Escalation States Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS notify.escalation_states (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
policy_id UUID NOT NULL REFERENCES notify.escalation_policies(id),
incident_id UUID,
correlation_id TEXT NOT NULL,
current_step INT NOT NULL DEFAULT 0,
repeat_iteration INT NOT NULL DEFAULT 0,
status TEXT NOT NULL DEFAULT 'active' CHECK (status IN ('active', 'acknowledged', 'resolved', 'expired')),
started_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
next_escalation_at TIMESTAMPTZ,
acknowledged_at TIMESTAMPTZ,
acknowledged_by TEXT,
resolved_at TIMESTAMPTZ,
resolved_by TEXT,
metadata JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_escalation_states_tenant ON notify.escalation_states(tenant_id);
CREATE INDEX idx_escalation_states_active ON notify.escalation_states(status, next_escalation_at)
WHERE status = 'active';
CREATE INDEX idx_escalation_states_correlation ON notify.escalation_states(correlation_id);
-- ============================================================================
-- On-Call Schedules Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS notify.on_call_schedules (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
description TEXT,
timezone TEXT NOT NULL DEFAULT 'UTC',
rotation_type TEXT NOT NULL DEFAULT 'weekly' CHECK (rotation_type IN ('daily', 'weekly', 'custom')),
participants JSONB NOT NULL DEFAULT '[]',
overrides JSONB NOT NULL DEFAULT '[]',
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, name)
);
CREATE INDEX idx_on_call_schedules_tenant ON notify.on_call_schedules(tenant_id);
CREATE TRIGGER trg_on_call_schedules_updated_at
BEFORE UPDATE ON notify.on_call_schedules
FOR EACH ROW EXECUTE FUNCTION notify.update_updated_at();
-- ============================================================================
-- Inbox Table (In-App Notifications)
-- ============================================================================
CREATE TABLE IF NOT EXISTS notify.inbox (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
user_id UUID NOT NULL,
title TEXT NOT NULL,
body TEXT,
event_type TEXT NOT NULL,
event_payload JSONB NOT NULL DEFAULT '{}',
read BOOLEAN NOT NULL DEFAULT FALSE,
archived BOOLEAN NOT NULL DEFAULT FALSE,
action_url TEXT,
correlation_id TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
read_at TIMESTAMPTZ,
archived_at TIMESTAMPTZ
);
CREATE INDEX idx_inbox_tenant_user ON notify.inbox(tenant_id, user_id);
CREATE INDEX idx_inbox_unread ON notify.inbox(tenant_id, user_id, read, created_at DESC)
WHERE read = FALSE AND archived = FALSE;
-- ============================================================================
-- Incidents Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS notify.incidents (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
title TEXT NOT NULL,
description TEXT,
severity TEXT NOT NULL DEFAULT 'medium' CHECK (severity IN ('critical', 'high', 'medium', 'low')),
status TEXT NOT NULL DEFAULT 'open' CHECK (status IN ('open', 'acknowledged', 'resolved', 'closed')),
source TEXT,
correlation_id TEXT,
assigned_to UUID,
escalation_policy_id UUID REFERENCES notify.escalation_policies(id),
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
acknowledged_at TIMESTAMPTZ,
resolved_at TIMESTAMPTZ,
closed_at TIMESTAMPTZ,
created_by TEXT
);
CREATE INDEX idx_incidents_tenant ON notify.incidents(tenant_id);
CREATE INDEX idx_incidents_status ON notify.incidents(tenant_id, status);
CREATE INDEX idx_incidents_severity ON notify.incidents(tenant_id, severity);
CREATE INDEX idx_incidents_correlation ON notify.incidents(correlation_id);
-- ============================================================================
-- Audit Log Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS notify.audit (
id BIGSERIAL PRIMARY KEY,
tenant_id TEXT NOT NULL,
user_id UUID,
action TEXT NOT NULL,
resource_type TEXT NOT NULL,
resource_id TEXT,
details JSONB,
correlation_id TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_audit_tenant ON notify.audit(tenant_id);
CREATE INDEX idx_audit_created ON notify.audit(tenant_id, created_at);
-- ============================================================================
-- Locks Table (Lightweight Distributed Locks)
-- ============================================================================
CREATE TABLE IF NOT EXISTS notify.locks (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
resource TEXT NOT NULL,
owner TEXT NOT NULL,
expires_at TIMESTAMPTZ NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, resource)
);
CREATE INDEX idx_locks_tenant ON notify.locks(tenant_id);
CREATE INDEX idx_locks_expiry ON notify.locks(expires_at);
-- ============================================================================
-- Row-Level Security
-- ============================================================================
ALTER TABLE notify.channels ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.channels FORCE ROW LEVEL SECURITY;
ALTER TABLE notify.rules ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.rules FORCE ROW LEVEL SECURITY;
ALTER TABLE notify.templates ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.templates FORCE ROW LEVEL SECURITY;
ALTER TABLE notify.deliveries ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.deliveries FORCE ROW LEVEL SECURITY;
ALTER TABLE notify.digests ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.digests FORCE ROW LEVEL SECURITY;
ALTER TABLE notify.quiet_hours ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.quiet_hours FORCE ROW LEVEL SECURITY;
ALTER TABLE notify.maintenance_windows ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.maintenance_windows FORCE ROW LEVEL SECURITY;
ALTER TABLE notify.escalation_policies ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.escalation_policies FORCE ROW LEVEL SECURITY;
ALTER TABLE notify.escalation_states ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.escalation_states FORCE ROW LEVEL SECURITY;
ALTER TABLE notify.on_call_schedules ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.on_call_schedules FORCE ROW LEVEL SECURITY;
ALTER TABLE notify.inbox ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.inbox FORCE ROW LEVEL SECURITY;
ALTER TABLE notify.incidents ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.incidents FORCE ROW LEVEL SECURITY;
ALTER TABLE notify.audit ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.audit FORCE ROW LEVEL SECURITY;
ALTER TABLE notify.locks ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.locks FORCE ROW LEVEL SECURITY;
-- RLS Policies
CREATE POLICY channels_tenant_isolation ON notify.channels
FOR ALL USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
CREATE POLICY rules_tenant_isolation ON notify.rules
FOR ALL USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
CREATE POLICY templates_tenant_isolation ON notify.templates
FOR ALL USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
CREATE POLICY deliveries_tenant_isolation ON notify.deliveries
FOR ALL USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
CREATE POLICY digests_tenant_isolation ON notify.digests
FOR ALL USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
CREATE POLICY quiet_hours_tenant_isolation ON notify.quiet_hours
FOR ALL USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
CREATE POLICY maintenance_windows_tenant_isolation ON notify.maintenance_windows
FOR ALL USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
CREATE POLICY escalation_policies_tenant_isolation ON notify.escalation_policies
FOR ALL USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
CREATE POLICY escalation_states_tenant_isolation ON notify.escalation_states
FOR ALL USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
CREATE POLICY on_call_schedules_tenant_isolation ON notify.on_call_schedules
FOR ALL USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
CREATE POLICY inbox_tenant_isolation ON notify.inbox
FOR ALL USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
CREATE POLICY incidents_tenant_isolation ON notify.incidents
FOR ALL USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
CREATE POLICY audit_tenant_isolation ON notify.audit
FOR ALL USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
CREATE POLICY locks_tenant_isolation ON notify.locks
FOR ALL USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
-- Admin Bypass Role
DO $$
BEGIN
IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'notify_admin') THEN
CREATE ROLE notify_admin WITH NOLOGIN BYPASSRLS;
END IF;
END
$$;

View File

@@ -1,4 +1,4 @@
namespace StellaOps.Notify.Storage.Postgres.Models;
namespace StellaOps.Notify.Persistence.Postgres.Models;
/// <summary>
/// Channel types for notifications.

View File

@@ -1,4 +1,4 @@
namespace StellaOps.Notify.Storage.Postgres.Models;
namespace StellaOps.Notify.Persistence.Postgres.Models;
/// <summary>
/// Delivery status values.

View File

@@ -1,4 +1,4 @@
namespace StellaOps.Notify.Storage.Postgres.Models;
namespace StellaOps.Notify.Persistence.Postgres.Models;
/// <summary>
/// Digest status values.

View File

@@ -1,4 +1,4 @@
namespace StellaOps.Notify.Storage.Postgres.Models;
namespace StellaOps.Notify.Persistence.Postgres.Models;
/// <summary>
/// Represents an escalation policy.

View File

@@ -1,4 +1,4 @@
namespace StellaOps.Notify.Storage.Postgres.Models;
namespace StellaOps.Notify.Persistence.Postgres.Models;
/// <summary>
/// Represents an in-app notification inbox item.

View File

@@ -1,4 +1,4 @@
namespace StellaOps.Notify.Storage.Postgres.Models;
namespace StellaOps.Notify.Persistence.Postgres.Models;
/// <summary>
/// Incident severity values.

View File

@@ -1,4 +1,4 @@
namespace StellaOps.Notify.Storage.Postgres.Models;
namespace StellaOps.Notify.Persistence.Postgres.Models;
/// <summary>
/// Represents a localization bundle containing translated strings for a specific locale.

View File

@@ -1,4 +1,4 @@
namespace StellaOps.Notify.Storage.Postgres.Models;
namespace StellaOps.Notify.Persistence.Postgres.Models;
/// <summary>
/// Represents a lightweight distributed lock entry.

View File

@@ -1,4 +1,4 @@
namespace StellaOps.Notify.Storage.Postgres.Models;
namespace StellaOps.Notify.Persistence.Postgres.Models;
/// <summary>
/// Represents a maintenance window for suppressing notifications.

View File

@@ -1,4 +1,4 @@
namespace StellaOps.Notify.Storage.Postgres.Models;
namespace StellaOps.Notify.Persistence.Postgres.Models;
/// <summary>
/// Represents an audit log entry for the notify module.

View File

@@ -1,4 +1,4 @@
namespace StellaOps.Notify.Storage.Postgres.Models;
namespace StellaOps.Notify.Persistence.Postgres.Models;
/// <summary>
/// Rotation type values.

View File

@@ -1,4 +1,4 @@
namespace StellaOps.Notify.Storage.Postgres.Models;
namespace StellaOps.Notify.Persistence.Postgres.Models;
/// <summary>
/// Represents an operator override for bypassing quiet hours, throttling, or maintenance windows.

View File

@@ -1,4 +1,4 @@
namespace StellaOps.Notify.Storage.Postgres.Models;
namespace StellaOps.Notify.Persistence.Postgres.Models;
/// <summary>
/// Represents quiet hours configuration.

View File

@@ -1,4 +1,4 @@
namespace StellaOps.Notify.Storage.Postgres.Models;
namespace StellaOps.Notify.Persistence.Postgres.Models;
/// <summary>
/// Represents a notification routing rule.

View File

@@ -1,4 +1,4 @@
namespace StellaOps.Notify.Storage.Postgres.Models;
namespace StellaOps.Notify.Persistence.Postgres.Models;
/// <summary>
/// Represents a notification template.

View File

@@ -1,4 +1,4 @@
namespace StellaOps.Notify.Storage.Postgres.Models;
namespace StellaOps.Notify.Persistence.Postgres.Models;
/// <summary>
/// Represents throttle configuration for rate-limiting notifications.

View File

@@ -3,7 +3,7 @@ using Microsoft.Extensions.Options;
using StellaOps.Infrastructure.Postgres.Connections;
using StellaOps.Infrastructure.Postgres.Options;
namespace StellaOps.Notify.Storage.Postgres;
namespace StellaOps.Notify.Persistence.Postgres;
/// <summary>
/// PostgreSQL data source for the Notify module.

View File

@@ -1,9 +1,9 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for notification channel operations.

View File

@@ -2,9 +2,9 @@ using System.Text;
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for notification delivery operations.

View File

@@ -1,9 +1,9 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public sealed class DigestRepository : RepositoryBase<NotifyDataSource>, IDigestRepository
{

View File

@@ -1,9 +1,9 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public sealed class EscalationPolicyRepository : RepositoryBase<NotifyDataSource>, IEscalationPolicyRepository
{

View File

@@ -1,6 +1,6 @@
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
/// <summary>
/// Repository interface for notification channel operations.

View File

@@ -1,6 +1,6 @@
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
/// <summary>
/// Repository interface for notification delivery operations.

View File

@@ -1,6 +1,6 @@
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public interface IDigestRepository
{

View File

@@ -1,6 +1,6 @@
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public interface IEscalationPolicyRepository
{

View File

@@ -1,6 +1,6 @@
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public interface IInboxRepository
{

View File

@@ -1,6 +1,6 @@
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public interface IIncidentRepository
{

View File

@@ -1,6 +1,6 @@
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
/// <summary>
/// Repository interface for localization bundles.

View File

@@ -1,6 +1,6 @@
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
/// <summary>
/// Repository for distributed locks in the notify schema.

View File

@@ -1,6 +1,6 @@
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public interface IMaintenanceWindowRepository
{

View File

@@ -1,6 +1,6 @@
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public interface INotifyAuditRepository
{

View File

@@ -1,6 +1,6 @@
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public interface IOnCallScheduleRepository
{

View File

@@ -1,6 +1,6 @@
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
/// <summary>
/// Repository interface for operator overrides.

View File

@@ -1,6 +1,6 @@
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public interface IQuietHoursRepository
{

View File

@@ -1,6 +1,6 @@
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public interface IRuleRepository
{

View File

@@ -1,6 +1,6 @@
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public interface ITemplateRepository
{

View File

@@ -1,6 +1,6 @@
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
/// <summary>
/// Repository interface for throttle configuration.

View File

@@ -1,9 +1,9 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public sealed class InboxRepository : RepositoryBase<NotifyDataSource>, IInboxRepository
{

View File

@@ -1,9 +1,9 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public sealed class IncidentRepository : RepositoryBase<NotifyDataSource>, IIncidentRepository
{

View File

@@ -1,9 +1,9 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL implementation of <see cref="ILocalizationBundleRepository"/>.

View File

@@ -1,8 +1,8 @@
using Microsoft.Extensions.Logging;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public sealed class LockRepository : RepositoryBase<NotifyDataSource>, ILockRepository
{

View File

@@ -1,9 +1,9 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public sealed class MaintenanceWindowRepository : RepositoryBase<NotifyDataSource>, IMaintenanceWindowRepository
{

View File

@@ -1,9 +1,9 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public sealed class NotifyAuditRepository : RepositoryBase<NotifyDataSource>, INotifyAuditRepository
{

View File

@@ -1,9 +1,9 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public sealed class OnCallScheduleRepository : RepositoryBase<NotifyDataSource>, IOnCallScheduleRepository
{

View File

@@ -1,9 +1,9 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL implementation of <see cref="IOperatorOverrideRepository"/>.

View File

@@ -1,9 +1,9 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public sealed class QuietHoursRepository : RepositoryBase<NotifyDataSource>, IQuietHoursRepository
{

View File

@@ -1,9 +1,9 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public sealed class RuleRepository : RepositoryBase<NotifyDataSource>, IRuleRepository
{

View File

@@ -1,9 +1,9 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
public sealed class TemplateRepository : RepositoryBase<NotifyDataSource>, ITemplateRepository
{

View File

@@ -1,9 +1,9 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Infrastructure.Postgres.Repositories;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Models;
namespace StellaOps.Notify.Storage.Postgres.Repositories;
namespace StellaOps.Notify.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL implementation of <see cref="IThrottleConfigRepository"/>.

View File

@@ -0,0 +1,36 @@
<?xml version="1.0" ?>
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<LangVersion>preview</LangVersion>
<TreatWarningsAsErrors>false</TreatWarningsAsErrors>
<RootNamespace>StellaOps.Notify.Persistence</RootNamespace>
<AssemblyName>StellaOps.Notify.Persistence</AssemblyName>
<Description>Consolidated persistence layer for StellaOps Notify module (EF Core + Raw SQL + InMemory)</Description>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.EntityFrameworkCore" />
<PackageReference Include="Microsoft.EntityFrameworkCore.Design" PrivateAssets="all" />
<PackageReference Include="Npgsql" />
<PackageReference Include="Npgsql.EntityFrameworkCore.PostgreSQL" />
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" />
<PackageReference Include="Microsoft.Extensions.Configuration.Abstractions" />
<PackageReference Include="Microsoft.Extensions.Hosting.Abstractions" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\StellaOps.Notify.Models\StellaOps.Notify.Models.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.Postgres\StellaOps.Infrastructure.Postgres.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.EfCore\StellaOps.Infrastructure.EfCore.csproj" />
</ItemGroup>
<!-- Embed SQL migrations as resources -->
<ItemGroup>
<EmbeddedResource Include="Migrations\**\*.sql" />
</ItemGroup>
</Project>

View File

@@ -5,17 +5,17 @@
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Configuration.Abstractions" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Configuration.Binder" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.DependencyInjection" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Diagnostics.HealthChecks" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Diagnostics.HealthChecks.Abstractions" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Options" Version="10.0.0" />
<PackageReference Include="NATS.Client.Core" Version="2.0.0" />
<PackageReference Include="NATS.Client.JetStream" Version="2.0.0" />
<PackageReference Include="StackExchange.Redis" Version="2.8.37" />
<PackageReference Include="Microsoft.Extensions.Configuration.Abstractions" />
<PackageReference Include="Microsoft.Extensions.Configuration.Binder" />
<PackageReference Include="Microsoft.Extensions.DependencyInjection" />
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" />
<PackageReference Include="Microsoft.Extensions.Diagnostics.HealthChecks" />
<PackageReference Include="Microsoft.Extensions.Diagnostics.HealthChecks.Abstractions" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" />
<PackageReference Include="Microsoft.Extensions.Options" />
<PackageReference Include="NATS.Client.Core" />
<PackageReference Include="NATS.Client.JetStream" />
<PackageReference Include="StackExchange.Redis" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\StellaOps.Notify.Models\StellaOps.Notify.Models.csproj" />

View File

@@ -1,19 +1,19 @@
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Notify.Storage.InMemory.Repositories;
using StellaOps.Notify.Storage.Postgres;
using StellaOps.Notify.Persistence;
namespace StellaOps.Notify.Storage.InMemory;
/// <summary>
/// Extension methods for configuring Notify in-memory storage.
/// This implementation delegates to PostgreSQL storage while maintaining the repository interface.
/// This implementation delegates to persistence layer while maintaining the repository interface.
/// </summary>
public static class ServiceCollectionExtensions
{
/// <summary>
/// Adds Notify in-memory storage services.
/// Internally delegates to PostgreSQL storage.
/// Internally delegates to persistence layer.
/// </summary>
/// <param name="services">Service collection.</param>
/// <param name="configuration">Configuration section for storage options.</param>
@@ -22,16 +22,16 @@ public static class ServiceCollectionExtensions
this IServiceCollection services,
IConfigurationSection configuration)
{
// Get the Postgres configuration section - assume it's a sibling section
var rootConfig = configuration.GetSection("..").GetSection("postgres");
// Get the persistence configuration section - assume it's a sibling section
var rootConfig = configuration.GetSection("..").GetSection("persistence");
if (!rootConfig.Exists())
{
// Fallback: try to find postgres in root configuration
// Fallback: try to find persistence in root configuration
rootConfig = configuration;
}
// Register the underlying Postgres storage
services.AddNotifyPostgresStorageInternal(configuration);
// Register the underlying persistence storage
services.AddNotifyPersistenceStorageInternal(configuration);
// Register in-memory repository adapters
services.AddScoped<INotifyChannelRepository, NotifyChannelRepositoryAdapter>();
@@ -51,12 +51,12 @@ public static class ServiceCollectionExtensions
return services;
}
private static IServiceCollection AddNotifyPostgresStorageInternal(
private static IServiceCollection AddNotifyPersistenceStorageInternal(
this IServiceCollection services,
IConfigurationSection configuration)
{
// Register the Postgres storage with the provided configuration
// The actual Postgres implementation will be configured via its own extension
// Register the persistence storage with the provided configuration
// The actual persistence implementation will be configured via its own extension
return services;
}
}

View File

@@ -7,16 +7,16 @@
<ImplicitUsings>enable</ImplicitUsings>
<TreatWarningsAsErrors>false</TreatWarningsAsErrors>
<RootNamespace>StellaOps.Notify.Storage.InMemory</RootNamespace>
<Description>In-memory storage implementation for Notify - delegates to PostgreSQL storage</Description>
<Description>In-memory storage implementation for Notify - delegates to persistence layer</Description>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Configuration.Abstractions" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Hosting.Abstractions" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" />
<PackageReference Include="Microsoft.Extensions.Configuration.Abstractions" />
<PackageReference Include="Microsoft.Extensions.Hosting.Abstractions" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\StellaOps.Notify.Storage.Postgres\StellaOps.Notify.Storage.Postgres.csproj" />
<ProjectReference Include="..\StellaOps.Notify.Persistence\StellaOps.Notify.Persistence.csproj" />
</ItemGroup>
</Project>

View File

@@ -1,19 +0,0 @@
# StellaOps.Notify.Storage.Postgres — Agent Charter
## Mission
Deliver PostgreSQL-backed persistence for Notify (channels, rules, templates, deliveries, digests, quiet hours, maintenance windows, escalations, inbox, incidents, audit) per `docs/db/SPECIFICATION.md` §5.5 and enable the Mongo → Postgres cutover.
## Required Reading
- docs/modules/notify/architecture.md
- docs/db/README.md
- docs/db/SPECIFICATION.md (Notify schema §5.5)
- docs/db/RULES.md
- docs/db/VERIFICATION.md
- docs/modules/platform/architecture-overview.md
## Working Agreement
- Update related sprint rows in `docs/implplan/SPRINT_*.md` when starting/finishing work; keep statuses `TODO → DOING → DONE/BLOCKED`.
- Follow deterministic/offline posture: stable ordering, UTC timestamps, idempotent migrations; use NuGet cache at `.nuget/packages/`.
- Keep schema/migrations aligned with `docs/db/SPECIFICATION.md`; add/extend tests under this project to cover repository contracts against PostgreSQL.
- Mirror any contract change (schema, repository signatures, DI wiring) into the appropriate docs (`docs/db/SPECIFICATION.md`, module architecture) and note it in sprint Decisions & Risks.
- Coordinate with `StellaOps.Notify.Engine` and channel connectors for behavioural changes; avoid cross-module edits unless the sprint explicitly allows and logs them.

View File

@@ -1,340 +0,0 @@
-- Notify Schema Migration 001: Initial Schema
-- Creates the notify schema for notifications, channels, and delivery tracking
-- Create schema
CREATE SCHEMA IF NOT EXISTS notify;
-- Channel types
DO $$ BEGIN
CREATE TYPE notify.channel_type AS ENUM (
'email', 'slack', 'teams', 'webhook', 'pagerduty', 'opsgenie'
);
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
-- Delivery status
DO $$ BEGIN
CREATE TYPE notify.delivery_status AS ENUM (
'pending', 'queued', 'sending', 'sent', 'delivered', 'failed', 'bounced'
);
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
-- Channels table
CREATE TABLE IF NOT EXISTS notify.channels (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
channel_type notify.channel_type NOT NULL,
enabled BOOLEAN NOT NULL DEFAULT TRUE,
config JSONB NOT NULL DEFAULT '{}',
credentials JSONB,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
created_by TEXT,
UNIQUE(tenant_id, name)
);
CREATE INDEX idx_channels_tenant ON notify.channels(tenant_id);
CREATE INDEX idx_channels_type ON notify.channels(tenant_id, channel_type);
-- Rules table (notification routing rules)
CREATE TABLE IF NOT EXISTS notify.rules (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
description TEXT,
enabled BOOLEAN NOT NULL DEFAULT TRUE,
priority INT NOT NULL DEFAULT 0,
event_types TEXT[] NOT NULL DEFAULT '{}',
filter JSONB NOT NULL DEFAULT '{}',
channel_ids UUID[] NOT NULL DEFAULT '{}',
template_id UUID,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, name)
);
CREATE INDEX idx_rules_tenant ON notify.rules(tenant_id);
CREATE INDEX idx_rules_enabled ON notify.rules(tenant_id, enabled, priority DESC);
-- Templates table
CREATE TABLE IF NOT EXISTS notify.templates (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
channel_type notify.channel_type NOT NULL,
subject_template TEXT,
body_template TEXT NOT NULL,
locale TEXT NOT NULL DEFAULT 'en',
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, name, channel_type, locale)
);
CREATE INDEX idx_templates_tenant ON notify.templates(tenant_id);
-- Deliveries table
CREATE TABLE IF NOT EXISTS notify.deliveries (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
channel_id UUID NOT NULL REFERENCES notify.channels(id),
rule_id UUID REFERENCES notify.rules(id),
template_id UUID REFERENCES notify.templates(id),
status notify.delivery_status NOT NULL DEFAULT 'pending',
recipient TEXT NOT NULL,
subject TEXT,
body TEXT,
event_type TEXT NOT NULL,
event_payload JSONB NOT NULL DEFAULT '{}',
attempt INT NOT NULL DEFAULT 0,
max_attempts INT NOT NULL DEFAULT 3,
next_retry_at TIMESTAMPTZ,
error_message TEXT,
external_id TEXT,
correlation_id TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
queued_at TIMESTAMPTZ,
sent_at TIMESTAMPTZ,
delivered_at TIMESTAMPTZ,
failed_at TIMESTAMPTZ
);
CREATE INDEX idx_deliveries_tenant ON notify.deliveries(tenant_id);
CREATE INDEX idx_deliveries_status ON notify.deliveries(tenant_id, status);
CREATE INDEX idx_deliveries_pending ON notify.deliveries(status, next_retry_at)
WHERE status IN ('pending', 'queued');
CREATE INDEX idx_deliveries_channel ON notify.deliveries(channel_id);
CREATE INDEX idx_deliveries_correlation ON notify.deliveries(correlation_id);
CREATE INDEX idx_deliveries_created ON notify.deliveries(tenant_id, created_at);
-- Digests table (aggregated notifications)
CREATE TABLE IF NOT EXISTS notify.digests (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
channel_id UUID NOT NULL REFERENCES notify.channels(id),
recipient TEXT NOT NULL,
digest_key TEXT NOT NULL,
event_count INT NOT NULL DEFAULT 0,
events JSONB NOT NULL DEFAULT '[]',
status TEXT NOT NULL DEFAULT 'collecting' CHECK (status IN ('collecting', 'sending', 'sent')),
collect_until TIMESTAMPTZ NOT NULL,
sent_at TIMESTAMPTZ,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, channel_id, recipient, digest_key)
);
CREATE INDEX idx_digests_tenant ON notify.digests(tenant_id);
CREATE INDEX idx_digests_collect ON notify.digests(status, collect_until)
WHERE status = 'collecting';
-- Quiet hours table
CREATE TABLE IF NOT EXISTS notify.quiet_hours (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
user_id UUID,
channel_id UUID REFERENCES notify.channels(id),
start_time TIME NOT NULL,
end_time TIME NOT NULL,
timezone TEXT NOT NULL DEFAULT 'UTC',
days_of_week INT[] NOT NULL DEFAULT '{0,1,2,3,4,5,6}',
enabled BOOLEAN NOT NULL DEFAULT TRUE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_quiet_hours_tenant ON notify.quiet_hours(tenant_id);
-- Maintenance windows table
CREATE TABLE IF NOT EXISTS notify.maintenance_windows (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
description TEXT,
start_at TIMESTAMPTZ NOT NULL,
end_at TIMESTAMPTZ NOT NULL,
suppress_channels UUID[],
suppress_event_types TEXT[],
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
created_by TEXT,
UNIQUE(tenant_id, name)
);
CREATE INDEX idx_maintenance_windows_tenant ON notify.maintenance_windows(tenant_id);
CREATE INDEX idx_maintenance_windows_active ON notify.maintenance_windows(start_at, end_at);
-- Escalation policies table
CREATE TABLE IF NOT EXISTS notify.escalation_policies (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
description TEXT,
enabled BOOLEAN NOT NULL DEFAULT TRUE,
steps JSONB NOT NULL DEFAULT '[]',
repeat_count INT NOT NULL DEFAULT 0,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, name)
);
CREATE INDEX idx_escalation_policies_tenant ON notify.escalation_policies(tenant_id);
-- Escalation states table
CREATE TABLE IF NOT EXISTS notify.escalation_states (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
policy_id UUID NOT NULL REFERENCES notify.escalation_policies(id),
incident_id UUID,
correlation_id TEXT NOT NULL,
current_step INT NOT NULL DEFAULT 0,
repeat_iteration INT NOT NULL DEFAULT 0,
status TEXT NOT NULL DEFAULT 'active' CHECK (status IN ('active', 'acknowledged', 'resolved', 'expired')),
started_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
next_escalation_at TIMESTAMPTZ,
acknowledged_at TIMESTAMPTZ,
acknowledged_by TEXT,
resolved_at TIMESTAMPTZ,
resolved_by TEXT,
metadata JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_escalation_states_tenant ON notify.escalation_states(tenant_id);
CREATE INDEX idx_escalation_states_active ON notify.escalation_states(status, next_escalation_at)
WHERE status = 'active';
CREATE INDEX idx_escalation_states_correlation ON notify.escalation_states(correlation_id);
-- On-call schedules table
CREATE TABLE IF NOT EXISTS notify.on_call_schedules (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
description TEXT,
timezone TEXT NOT NULL DEFAULT 'UTC',
rotation_type TEXT NOT NULL DEFAULT 'weekly' CHECK (rotation_type IN ('daily', 'weekly', 'custom')),
participants JSONB NOT NULL DEFAULT '[]',
overrides JSONB NOT NULL DEFAULT '[]',
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, name)
);
CREATE INDEX idx_on_call_schedules_tenant ON notify.on_call_schedules(tenant_id);
-- Inbox table (in-app notifications)
CREATE TABLE IF NOT EXISTS notify.inbox (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
user_id UUID NOT NULL,
title TEXT NOT NULL,
body TEXT,
event_type TEXT NOT NULL,
event_payload JSONB NOT NULL DEFAULT '{}',
read BOOLEAN NOT NULL DEFAULT FALSE,
archived BOOLEAN NOT NULL DEFAULT FALSE,
action_url TEXT,
correlation_id TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
read_at TIMESTAMPTZ,
archived_at TIMESTAMPTZ
);
CREATE INDEX idx_inbox_tenant_user ON notify.inbox(tenant_id, user_id);
CREATE INDEX idx_inbox_unread ON notify.inbox(tenant_id, user_id, read, created_at DESC)
WHERE read = FALSE AND archived = FALSE;
-- Incidents table
CREATE TABLE IF NOT EXISTS notify.incidents (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
title TEXT NOT NULL,
description TEXT,
severity TEXT NOT NULL DEFAULT 'medium' CHECK (severity IN ('critical', 'high', 'medium', 'low')),
status TEXT NOT NULL DEFAULT 'open' CHECK (status IN ('open', 'acknowledged', 'resolved', 'closed')),
source TEXT,
correlation_id TEXT,
assigned_to UUID,
escalation_policy_id UUID REFERENCES notify.escalation_policies(id),
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
acknowledged_at TIMESTAMPTZ,
resolved_at TIMESTAMPTZ,
closed_at TIMESTAMPTZ,
created_by TEXT
);
CREATE INDEX idx_incidents_tenant ON notify.incidents(tenant_id);
CREATE INDEX idx_incidents_status ON notify.incidents(tenant_id, status);
CREATE INDEX idx_incidents_severity ON notify.incidents(tenant_id, severity);
CREATE INDEX idx_incidents_correlation ON notify.incidents(correlation_id);
-- Audit log table
CREATE TABLE IF NOT EXISTS notify.audit (
id BIGSERIAL PRIMARY KEY,
tenant_id TEXT NOT NULL,
user_id UUID,
action TEXT NOT NULL,
resource_type TEXT NOT NULL,
resource_id TEXT,
details JSONB,
correlation_id TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_audit_tenant ON notify.audit(tenant_id);
CREATE INDEX idx_audit_created ON notify.audit(tenant_id, created_at);
-- Locks table (lightweight distributed locks)
CREATE TABLE IF NOT EXISTS notify.locks (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
resource TEXT NOT NULL,
owner TEXT NOT NULL,
expires_at TIMESTAMPTZ NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, resource)
);
CREATE INDEX idx_locks_tenant ON notify.locks(tenant_id);
CREATE INDEX idx_locks_expiry ON notify.locks(expires_at);
-- Update timestamp function
CREATE OR REPLACE FUNCTION notify.update_updated_at()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Triggers
CREATE TRIGGER trg_channels_updated_at
BEFORE UPDATE ON notify.channels
FOR EACH ROW EXECUTE FUNCTION notify.update_updated_at();
CREATE TRIGGER trg_rules_updated_at
BEFORE UPDATE ON notify.rules
FOR EACH ROW EXECUTE FUNCTION notify.update_updated_at();
CREATE TRIGGER trg_templates_updated_at
BEFORE UPDATE ON notify.templates
FOR EACH ROW EXECUTE FUNCTION notify.update_updated_at();
CREATE TRIGGER trg_digests_updated_at
BEFORE UPDATE ON notify.digests
FOR EACH ROW EXECUTE FUNCTION notify.update_updated_at();
CREATE TRIGGER trg_escalation_policies_updated_at
BEFORE UPDATE ON notify.escalation_policies
FOR EACH ROW EXECUTE FUNCTION notify.update_updated_at();
CREATE TRIGGER trg_on_call_schedules_updated_at
BEFORE UPDATE ON notify.on_call_schedules
FOR EACH ROW EXECUTE FUNCTION notify.update_updated_at();

View File

@@ -1,178 +0,0 @@
-- Notify Schema Migration 010: Row-Level Security
-- Sprint: SPRINT_3421_0001_0001 - RLS Expansion
-- Category: B (release migration, requires coordination)
--
-- Purpose: Enable Row-Level Security on all tenant-scoped tables in the notify
-- schema to provide database-level tenant isolation as defense-in-depth.
BEGIN;
-- ============================================================================
-- Step 1: Create helper schema and function for tenant context
-- ============================================================================
CREATE SCHEMA IF NOT EXISTS notify_app;
-- Tenant context helper function
CREATE OR REPLACE FUNCTION notify_app.require_current_tenant()
RETURNS TEXT
LANGUAGE plpgsql STABLE SECURITY DEFINER
AS $$
DECLARE
v_tenant TEXT;
BEGIN
v_tenant := current_setting('app.tenant_id', true);
IF v_tenant IS NULL OR v_tenant = '' THEN
RAISE EXCEPTION 'app.tenant_id session variable not set'
USING HINT = 'Set via: SELECT set_config(''app.tenant_id'', ''<tenant>'', false)',
ERRCODE = 'P0001';
END IF;
RETURN v_tenant;
END;
$$;
REVOKE ALL ON FUNCTION notify_app.require_current_tenant() FROM PUBLIC;
-- ============================================================================
-- Step 2: Enable RLS on all tenant-scoped tables
-- ============================================================================
-- notify.channels
ALTER TABLE notify.channels ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.channels FORCE ROW LEVEL SECURITY;
DROP POLICY IF EXISTS channels_tenant_isolation ON notify.channels;
CREATE POLICY channels_tenant_isolation ON notify.channels
FOR ALL
USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
-- notify.rules
ALTER TABLE notify.rules ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.rules FORCE ROW LEVEL SECURITY;
DROP POLICY IF EXISTS rules_tenant_isolation ON notify.rules;
CREATE POLICY rules_tenant_isolation ON notify.rules
FOR ALL
USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
-- notify.templates
ALTER TABLE notify.templates ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.templates FORCE ROW LEVEL SECURITY;
DROP POLICY IF EXISTS templates_tenant_isolation ON notify.templates;
CREATE POLICY templates_tenant_isolation ON notify.templates
FOR ALL
USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
-- notify.deliveries
ALTER TABLE notify.deliveries ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.deliveries FORCE ROW LEVEL SECURITY;
DROP POLICY IF EXISTS deliveries_tenant_isolation ON notify.deliveries;
CREATE POLICY deliveries_tenant_isolation ON notify.deliveries
FOR ALL
USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
-- notify.digests
ALTER TABLE notify.digests ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.digests FORCE ROW LEVEL SECURITY;
DROP POLICY IF EXISTS digests_tenant_isolation ON notify.digests;
CREATE POLICY digests_tenant_isolation ON notify.digests
FOR ALL
USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
-- notify.quiet_hours
ALTER TABLE notify.quiet_hours ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.quiet_hours FORCE ROW LEVEL SECURITY;
DROP POLICY IF EXISTS quiet_hours_tenant_isolation ON notify.quiet_hours;
CREATE POLICY quiet_hours_tenant_isolation ON notify.quiet_hours
FOR ALL
USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
-- notify.maintenance_windows
ALTER TABLE notify.maintenance_windows ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.maintenance_windows FORCE ROW LEVEL SECURITY;
DROP POLICY IF EXISTS maintenance_windows_tenant_isolation ON notify.maintenance_windows;
CREATE POLICY maintenance_windows_tenant_isolation ON notify.maintenance_windows
FOR ALL
USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
-- notify.escalation_policies
ALTER TABLE notify.escalation_policies ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.escalation_policies FORCE ROW LEVEL SECURITY;
DROP POLICY IF EXISTS escalation_policies_tenant_isolation ON notify.escalation_policies;
CREATE POLICY escalation_policies_tenant_isolation ON notify.escalation_policies
FOR ALL
USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
-- notify.escalation_states
ALTER TABLE notify.escalation_states ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.escalation_states FORCE ROW LEVEL SECURITY;
DROP POLICY IF EXISTS escalation_states_tenant_isolation ON notify.escalation_states;
CREATE POLICY escalation_states_tenant_isolation ON notify.escalation_states
FOR ALL
USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
-- notify.on_call_schedules
ALTER TABLE notify.on_call_schedules ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.on_call_schedules FORCE ROW LEVEL SECURITY;
DROP POLICY IF EXISTS on_call_schedules_tenant_isolation ON notify.on_call_schedules;
CREATE POLICY on_call_schedules_tenant_isolation ON notify.on_call_schedules
FOR ALL
USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
-- notify.inbox
ALTER TABLE notify.inbox ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.inbox FORCE ROW LEVEL SECURITY;
DROP POLICY IF EXISTS inbox_tenant_isolation ON notify.inbox;
CREATE POLICY inbox_tenant_isolation ON notify.inbox
FOR ALL
USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
-- notify.incidents
ALTER TABLE notify.incidents ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.incidents FORCE ROW LEVEL SECURITY;
DROP POLICY IF EXISTS incidents_tenant_isolation ON notify.incidents;
CREATE POLICY incidents_tenant_isolation ON notify.incidents
FOR ALL
USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
-- notify.audit
ALTER TABLE notify.audit ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.audit FORCE ROW LEVEL SECURITY;
DROP POLICY IF EXISTS audit_tenant_isolation ON notify.audit;
CREATE POLICY audit_tenant_isolation ON notify.audit
FOR ALL
USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
-- notify.locks
ALTER TABLE notify.locks ENABLE ROW LEVEL SECURITY;
ALTER TABLE notify.locks FORCE ROW LEVEL SECURITY;
DROP POLICY IF EXISTS locks_tenant_isolation ON notify.locks;
CREATE POLICY locks_tenant_isolation ON notify.locks
FOR ALL
USING (tenant_id = notify_app.require_current_tenant())
WITH CHECK (tenant_id = notify_app.require_current_tenant());
-- ============================================================================
-- Step 3: Create admin bypass role
-- ============================================================================
DO $$
BEGIN
IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'notify_admin') THEN
CREATE ROLE notify_admin WITH NOLOGIN BYPASSRLS;
END IF;
END
$$;
COMMIT;

View File

@@ -1,181 +0,0 @@
-- Notify Schema Migration 011: Partition deliveries Table
-- Sprint: SPRINT_3422_0001_0001 - Time-Based Partitioning
-- Task: 5.1 - Create partitioned notify.deliveries table
-- Category: C (infrastructure change, requires maintenance window)
--
-- Purpose: Convert notify.deliveries to a partitioned table for improved
-- query performance on time-range queries and easier data lifecycle management.
--
-- Partition strategy: Monthly by created_at
BEGIN;
-- ============================================================================
-- Step 1: Create partitioned deliveries table
-- ============================================================================
CREATE TABLE IF NOT EXISTS notify.deliveries_partitioned (
id UUID NOT NULL DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
channel_id UUID NOT NULL,
rule_id UUID,
template_id UUID,
status notify.delivery_status NOT NULL DEFAULT 'pending',
recipient TEXT NOT NULL,
subject TEXT,
body TEXT,
event_type TEXT NOT NULL,
event_payload JSONB NOT NULL DEFAULT '{}',
attempt INT NOT NULL DEFAULT 0,
max_attempts INT NOT NULL DEFAULT 3,
next_retry_at TIMESTAMPTZ,
error_message TEXT,
external_id TEXT,
correlation_id TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
queued_at TIMESTAMPTZ,
sent_at TIMESTAMPTZ,
delivered_at TIMESTAMPTZ,
failed_at TIMESTAMPTZ,
PRIMARY KEY (id, created_at)
) PARTITION BY RANGE (created_at);
-- Note: Foreign keys cannot reference partitioned tables directly.
-- Application-level integrity checks are used instead.
-- ============================================================================
-- Step 2: Create initial partitions (past 3 months + 4 months ahead)
-- ============================================================================
DO $$
DECLARE
v_start DATE;
v_end DATE;
v_partition_name TEXT;
BEGIN
-- Start from 3 months ago (shorter history for high-volume table)
v_start := date_trunc('month', NOW() - INTERVAL '3 months')::DATE;
-- Create partitions until 4 months ahead
WHILE v_start <= date_trunc('month', NOW() + INTERVAL '4 months')::DATE LOOP
v_end := (v_start + INTERVAL '1 month')::DATE;
v_partition_name := 'deliveries_' || to_char(v_start, 'YYYY_MM');
IF NOT EXISTS (
SELECT 1 FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = 'notify' AND c.relname = v_partition_name
) THEN
EXECUTE format(
'CREATE TABLE notify.%I PARTITION OF notify.deliveries_partitioned
FOR VALUES FROM (%L) TO (%L)',
v_partition_name, v_start, v_end
);
RAISE NOTICE 'Created partition notify.%', v_partition_name;
END IF;
v_start := v_end;
END LOOP;
END
$$;
-- Create default partition for any data outside defined ranges
CREATE TABLE IF NOT EXISTS notify.deliveries_default
PARTITION OF notify.deliveries_partitioned DEFAULT;
-- ============================================================================
-- Step 3: Create indexes on partitioned table
-- ============================================================================
-- Tenant index
CREATE INDEX IF NOT EXISTS ix_deliveries_part_tenant
ON notify.deliveries_partitioned (tenant_id);
-- Status-based queries (most common for worker processing)
CREATE INDEX IF NOT EXISTS ix_deliveries_part_status
ON notify.deliveries_partitioned (tenant_id, status);
-- Pending deliveries for retry processing
CREATE INDEX IF NOT EXISTS ix_deliveries_part_pending
ON notify.deliveries_partitioned (status, next_retry_at)
WHERE status IN ('pending', 'queued');
-- Channel-based queries
CREATE INDEX IF NOT EXISTS ix_deliveries_part_channel
ON notify.deliveries_partitioned (channel_id);
-- Correlation tracking
CREATE INDEX IF NOT EXISTS ix_deliveries_part_correlation
ON notify.deliveries_partitioned (correlation_id)
WHERE correlation_id IS NOT NULL;
-- Time-range queries (tenant + created_at)
CREATE INDEX IF NOT EXISTS ix_deliveries_part_created
ON notify.deliveries_partitioned (tenant_id, created_at DESC);
-- BRIN index for efficient time-range scans
CREATE INDEX IF NOT EXISTS ix_deliveries_part_created_brin
ON notify.deliveries_partitioned USING BRIN (created_at)
WITH (pages_per_range = 32);
-- External ID lookup (for webhook callbacks)
CREATE INDEX IF NOT EXISTS ix_deliveries_part_external_id
ON notify.deliveries_partitioned (external_id)
WHERE external_id IS NOT NULL;
-- ============================================================================
-- Step 4: Add partition to partition_mgmt tracking (if schema exists)
-- ============================================================================
DO $$
BEGIN
IF EXISTS (SELECT 1 FROM pg_namespace WHERE nspname = 'partition_mgmt') THEN
INSERT INTO partition_mgmt.managed_tables (
schema_name,
table_name,
partition_key,
partition_type,
retention_months,
months_ahead,
created_at
) VALUES (
'notify',
'deliveries_partitioned',
'created_at',
'monthly',
12, -- 1 year retention (high volume, short lifecycle)
4, -- Create 4 months ahead
NOW()
) ON CONFLICT (schema_name, table_name) DO NOTHING;
END IF;
END
$$;
-- ============================================================================
-- Migration Notes (for DBA to execute during maintenance window)
-- ============================================================================
-- After this migration, to complete the table swap:
--
-- 1. Stop writes to notify.deliveries (pause notification worker)
-- 2. Migrate existing data:
-- INSERT INTO notify.deliveries_partitioned (
-- id, tenant_id, channel_id, rule_id, template_id, status,
-- recipient, subject, body, event_type, event_payload,
-- attempt, max_attempts, next_retry_at, error_message,
-- external_id, correlation_id, created_at, queued_at,
-- sent_at, delivered_at, failed_at
-- )
-- SELECT id, tenant_id, channel_id, rule_id, template_id, status,
-- recipient, subject, body, event_type, event_payload,
-- attempt, max_attempts, next_retry_at, error_message,
-- external_id, correlation_id, created_at, queued_at,
-- sent_at, delivered_at, failed_at
-- FROM notify.deliveries;
-- 3. Rename tables:
-- ALTER TABLE notify.deliveries RENAME TO deliveries_old;
-- ALTER TABLE notify.deliveries_partitioned RENAME TO deliveries;
-- 4. Drop old table after verification:
-- DROP TABLE notify.deliveries_old;
-- 5. Resume notification worker
COMMIT;

View File

@@ -1,165 +0,0 @@
-- Notify Schema Migration 011b: Complete deliveries Partition Migration
-- Sprint: SPRINT_3422_0001_0001 - Time-Based Partitioning
-- Task: 5.2 - Migrate data from existing table
-- Category: C (data migration, requires maintenance window)
--
-- IMPORTANT: Run this during maintenance window AFTER 011_partition_deliveries.sql
-- Prerequisites:
-- 1. Stop notification worker (pause delivery processing)
-- 2. Verify partitioned table exists: \d+ notify.deliveries_partitioned
--
-- Execution time depends on data volume. For large tables (>1M rows), consider
-- batched migration (see bottom of file).
BEGIN;
-- ============================================================================
-- Step 1: Verify partitioned table exists
-- ============================================================================
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = 'notify' AND c.relname = 'deliveries_partitioned'
) THEN
RAISE EXCEPTION 'Partitioned table notify.deliveries_partitioned does not exist. Run 011_partition_deliveries.sql first.';
END IF;
END
$$;
-- ============================================================================
-- Step 2: Record row counts for verification
-- ============================================================================
DO $$
DECLARE
v_source_count BIGINT;
BEGIN
SELECT COUNT(*) INTO v_source_count FROM notify.deliveries;
RAISE NOTICE 'Source table row count: %', v_source_count;
END
$$;
-- ============================================================================
-- Step 3: Migrate data from old table to partitioned table
-- ============================================================================
INSERT INTO notify.deliveries_partitioned (
id, tenant_id, channel_id, rule_id, template_id, status,
recipient, subject, body, event_type, event_payload,
attempt, max_attempts, next_retry_at, error_message,
external_id, correlation_id, created_at, queued_at,
sent_at, delivered_at, failed_at
)
SELECT
id, tenant_id, channel_id, rule_id, template_id, status,
recipient, subject, body, event_type, event_payload,
attempt, max_attempts, next_retry_at, error_message,
external_id, correlation_id, created_at, queued_at,
sent_at, delivered_at, failed_at
FROM notify.deliveries
ON CONFLICT DO NOTHING;
-- ============================================================================
-- Step 4: Verify row counts match
-- ============================================================================
DO $$
DECLARE
v_source_count BIGINT;
v_target_count BIGINT;
BEGIN
SELECT COUNT(*) INTO v_source_count FROM notify.deliveries;
SELECT COUNT(*) INTO v_target_count FROM notify.deliveries_partitioned;
IF v_source_count <> v_target_count THEN
RAISE WARNING 'Row count mismatch: source=% target=%. Check for conflicts.', v_source_count, v_target_count;
ELSE
RAISE NOTICE 'Row counts match: % rows migrated successfully', v_target_count;
END IF;
END
$$;
-- ============================================================================
-- Step 5: Swap tables
-- ============================================================================
-- Drop foreign key constraints first (if any)
DO $$
DECLARE
v_constraint RECORD;
BEGIN
FOR v_constraint IN
SELECT conname FROM pg_constraint
WHERE conrelid = 'notify.deliveries'::regclass
AND contype = 'f'
LOOP
EXECUTE 'ALTER TABLE notify.deliveries DROP CONSTRAINT IF EXISTS ' || v_constraint.conname;
END LOOP;
END
$$;
-- Rename old table to backup
ALTER TABLE notify.deliveries RENAME TO deliveries_old;
-- Rename partitioned table to production name
ALTER TABLE notify.deliveries_partitioned RENAME TO deliveries;
-- ============================================================================
-- Step 6: Enable RLS on new table (if applicable)
-- ============================================================================
ALTER TABLE notify.deliveries ENABLE ROW LEVEL SECURITY;
-- Create RLS policy for tenant isolation
DROP POLICY IF EXISTS deliveries_tenant_isolation ON notify.deliveries;
CREATE POLICY deliveries_tenant_isolation ON notify.deliveries
FOR ALL
USING (tenant_id = current_setting('notify.current_tenant', true))
WITH CHECK (tenant_id = current_setting('notify.current_tenant', true));
-- ============================================================================
-- Step 7: Add comment about partitioning
-- ============================================================================
COMMENT ON TABLE notify.deliveries IS
'Notification deliveries. Partitioned monthly by created_at. Migrated on ' || NOW()::TEXT;
COMMIT;
-- ============================================================================
-- Post-migration verification (run manually)
-- ============================================================================
--
-- Verify partition structure:
-- SELECT tableoid::regclass, count(*) FROM notify.deliveries GROUP BY 1;
--
-- Verify BRIN index is being used:
-- EXPLAIN (ANALYZE, BUFFERS) SELECT * FROM notify.deliveries
-- WHERE created_at > NOW() - INTERVAL '1 day';
--
-- Verify pending deliveries query uses partition pruning:
-- EXPLAIN (ANALYZE) SELECT * FROM notify.deliveries
-- WHERE status = 'pending' AND created_at > NOW() - INTERVAL '7 days';
--
-- After verification, drop old table:
-- DROP TABLE IF EXISTS notify.deliveries_old;
-- ============================================================================
-- Resume checklist
-- ============================================================================
--
-- 1. Verify deliveries table exists:
-- SELECT COUNT(*) FROM notify.deliveries;
--
-- 2. Verify partitions exist:
-- SELECT tableoid::regclass, count(*) FROM notify.deliveries GROUP BY 1;
--
-- 3. Resume notification worker
--
-- 4. Monitor for errors in first 15 minutes
--
-- 5. After 24h validation, drop old table:
-- DROP TABLE IF EXISTS notify.deliveries_old;

View File

@@ -1,21 +0,0 @@
<?xml version="1.0" ?>
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<LangVersion>preview</LangVersion>
<TreatWarningsAsErrors>false</TreatWarningsAsErrors>
<RootNamespace>StellaOps.Notify.Storage.Postgres</RootNamespace>
</PropertyGroup>
<ItemGroup>
<EmbeddedResource Include="Migrations\**\*.sql" LogicalName="%(RecursiveDir)%(Filename)%(Extension)" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.Postgres\StellaOps.Infrastructure.Postgres.csproj" />
</ItemGroup>
</Project>

View File

@@ -117,7 +117,7 @@ public sealed class EmailConnectorErrorTests
// Assert
result.Success.Should().BeFalse();
result.ShouldRetry.Should().BeTrue();
result.RetryAfterMs.Should().BeGreaterOrEqualTo(1000);
result.RetryAfterMs.Should().BeGreaterThanOrEqualTo(1000);
}
#endregion
@@ -324,7 +324,7 @@ public sealed class EmailConnectorErrorTests
result.Success.Should().BeFalse();
result.ShouldRetry.Should().BeTrue();
result.ErrorCode.Should().Be("RATE_LIMITED");
result.RetryAfterMs.Should().BeGreaterOrEqualTo(60000, "should respect retry-after from server");
result.RetryAfterMs.Should().BeGreaterThanOrEqualTo(60000, "should respect retry-after from server");
}
#endregion

View File

@@ -1,11 +1,14 @@
<?xml version='1.0' encoding='utf-8'?>
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<UseXunitV3>true</UseXunitV3>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<UseConcelierTestInfra>false</UseConcelierTestInfra>
</PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup> <ItemGroup>
<Using Include="Xunit" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="../../__Libraries/StellaOps.Notify.Connectors.Email/StellaOps.Notify.Connectors.Email.csproj" />
@@ -15,9 +18,8 @@
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.0" />
<PackageReference Include="xunit" Version="2.9.2" />
<PackageReference Include="xunit.runner.visualstudio" Version="2.8.2" />
<PackageReference Include="coverlet.collector" Version="6.0.4" />
<PackageReference Include="FluentAssertions" />
<PackageReference Include="NSubstitute" />
<PackageReference Include="xunit.v3" />
</ItemGroup>
</Project>

View File

@@ -69,7 +69,7 @@ public sealed class SlackConnectorErrorTests
result.Success.Should().BeFalse();
result.ShouldRetry.Should().BeTrue();
result.ErrorCode.Should().Be("RATE_LIMITED");
result.RetryAfterMs.Should().BeGreaterOrEqualTo(30000, "should respect Retry-After header");
result.RetryAfterMs.Should().BeGreaterThanOrEqualTo(30000, "should respect Retry-After header");
}
/// <summary>
@@ -567,12 +567,12 @@ internal sealed class SlackConnector
// Validate
var validationError = Validate(notification);
if (validationError != null)
if (validationError is { } error)
{
result.Success = false;
result.ShouldRetry = false;
result.ErrorCode = validationError.Code;
result.ErrorMessage = validationError.Message;
result.ErrorCode = error.Code;
result.ErrorMessage = error.Message;
return result;
}

View File

@@ -1,4 +1,4 @@
using System;
using System;
using System.Collections.Generic;
using System.Text.Json;
using System.Threading;
@@ -110,7 +110,6 @@ public sealed class SlackChannelTestProviderTests
private static string ComputeSecretHash(string secretRef)
{
using var sha = System.Security.Cryptography.SHA256.Create();
using StellaOps.TestKit;
var bytes = System.Text.Encoding.UTF8.GetBytes(secretRef.Trim());
var hash = sha.ComputeHash(bytes);
return System.Convert.ToHexString(hash, 0, 8).ToLowerInvariant();

View File

@@ -1,11 +1,14 @@
<?xml version='1.0' encoding='utf-8'?>
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<UseXunitV3>true</UseXunitV3>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<UseConcelierTestInfra>false</UseConcelierTestInfra>
</PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup> <ItemGroup>
<Using Include="Xunit" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="../../__Libraries/StellaOps.Notify.Connectors.Slack/StellaOps.Notify.Connectors.Slack.csproj" />
@@ -15,9 +18,8 @@
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.0" />
<PackageReference Include="xunit" Version="2.9.2" />
<PackageReference Include="xunit.runner.visualstudio" Version="2.8.2" />
<PackageReference Include="coverlet.collector" Version="6.0.4" />
<PackageReference Include="FluentAssertions" />
<PackageReference Include="NSubstitute" />
<PackageReference Include="xunit.v3" />
</ItemGroup>
</Project>

View File

@@ -66,7 +66,7 @@ public sealed class TeamsConnectorErrorTests
result.Success.Should().BeFalse();
result.ShouldRetry.Should().BeTrue();
result.ErrorCode.Should().Be("RATE_LIMITED");
result.RetryAfterMs.Should().BeGreaterOrEqualTo(60000);
result.RetryAfterMs.Should().BeGreaterThanOrEqualTo(60000);
}
/// <summary>
@@ -601,12 +601,12 @@ internal sealed class TeamsConnector
// Validate
var validationError = Validate(notification);
if (validationError != null)
if (validationError is { } error)
{
result.Success = false;
result.ShouldRetry = false;
result.ErrorCode = validationError.Code;
result.ErrorMessage = validationError.Message;
result.ErrorCode = error.Code;
result.ErrorMessage = error.Message;
return result;
}

View File

@@ -1,11 +1,14 @@
<?xml version='1.0' encoding='utf-8'?>
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<UseXunitV3>true</UseXunitV3>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<UseConcelierTestInfra>false</UseConcelierTestInfra>
</PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup> <ItemGroup>
<Using Include="Xunit" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="../../__Libraries/StellaOps.Notify.Connectors.Teams/StellaOps.Notify.Connectors.Teams.csproj" />
@@ -15,9 +18,8 @@
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.0" />
<PackageReference Include="xunit" Version="2.9.2" />
<PackageReference Include="xunit.runner.visualstudio" Version="2.8.2" />
<PackageReference Include="coverlet.collector" Version="6.0.4" />
<PackageReference Include="FluentAssertions" />
<PackageReference Include="NSubstitute" />
<PackageReference Include="xunit.v3" />
</ItemGroup>
</Project>

View File

@@ -1,4 +1,4 @@
using System;
using System;
using System.Collections.Generic;
using System.Text.Json;
using System.Threading;
@@ -66,7 +66,6 @@ public sealed class TeamsChannelTestProviderTests
Assert.Equal(channel.Config.Endpoint, result.Metadata["teams.config.endpoint"]);
using var payload = JsonDocument.Parse(result.Preview.Body);
using StellaOps.TestKit;
Assert.Equal("message", payload.RootElement.GetProperty("type").GetString());
Assert.Equal(result.Preview.TextBody, payload.RootElement.GetProperty("text").GetString());
Assert.Equal(result.Preview.Summary, payload.RootElement.GetProperty("summary").GetString());

View File

@@ -87,7 +87,7 @@ public sealed class WebhookConnectorErrorHandlingTests
// Assert
result.Success.Should().BeFalse();
result.IsRetryable.Should().BeTrue();
result.Error.Should().Contain("timeout", StringComparison.OrdinalIgnoreCase);
result.Error.Should().ContainEquivalentOf("timeout");
}
/// <summary>
@@ -180,7 +180,7 @@ public sealed class WebhookConnectorErrorHandlingTests
result.Success.Should().BeFalse();
result.IsRetryable.Should().BeTrue();
result.RetryAfter.Should().NotBeNull();
result.RetryAfter!.Value.TotalSeconds.Should().BeGreaterOrEqualTo(60);
result.RetryAfter!.Value.TotalSeconds.Should().BeGreaterThanOrEqualTo(60);
}
#endregion
@@ -489,12 +489,13 @@ public sealed class WebhookConnector
RetryAfter = retryAfter
};
}
catch (OperationCanceledException)
{
throw;
}
catch (TaskCanceledException ex)
catch (TaskCanceledException ex) when (ex is not OperationCanceledException || ex.CancellationToken.IsCancellationRequested)
{
// Check if it was an actual cancellation request or a timeout
if (ex.CancellationToken.IsCancellationRequested)
{
throw;
}
return new WebhookSendResult
{
Success = false,

View File

@@ -32,7 +32,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new FailingWebhookClient(HttpStatusCode.ServiceUnavailable);
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions
{
MaxRetries = 3,
RetryDelayMs = 100
@@ -56,7 +56,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new FailingWebhookClient(HttpStatusCode.TooManyRequests, retryAfterSeconds: 30);
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions());
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions());
var notification = CreateTestNotification();
// Act
@@ -66,7 +66,7 @@ public sealed class WebhookConnectorErrorTests
result.Success.Should().BeFalse();
result.ShouldRetry.Should().BeTrue();
result.ErrorCode.Should().Be("RATE_LIMITED");
result.RetryAfterMs.Should().BeGreaterOrEqualTo(30000);
result.RetryAfterMs.Should().BeGreaterThanOrEqualTo(30000);
}
/// <summary>
@@ -77,7 +77,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new TimeoutWebhookClient();
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions());
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions());
var notification = CreateTestNotification();
// Act
@@ -97,7 +97,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new DnsFailureWebhookClient();
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions());
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions());
var notification = CreateTestNotification();
// Act
@@ -120,7 +120,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new FailingWebhookClient(statusCode);
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions());
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions());
var notification = CreateTestNotification();
// Act
@@ -143,7 +143,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new FailingWebhookClient(HttpStatusCode.NotFound);
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions());
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions());
var notification = CreateTestNotification();
// Act
@@ -163,7 +163,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new FailingWebhookClient(HttpStatusCode.Gone);
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions());
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions());
var notification = CreateTestNotification();
// Act
@@ -187,7 +187,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new FailingWebhookClient(HttpStatusCode.Unauthorized);
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions());
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions());
var notification = CreateTestNotification();
// Act
@@ -207,7 +207,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new FailingWebhookClient(HttpStatusCode.Forbidden);
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions());
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions());
var notification = CreateTestNotification();
// Act
@@ -231,7 +231,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new SucceedingWebhookClient();
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions());
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions());
var notification = new WebhookNotification
{
NotificationId = "notif-001",
@@ -257,7 +257,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new SucceedingWebhookClient();
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions { RequireHttps = true });
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions { RequireHttps = true });
var notification = new WebhookNotification
{
NotificationId = "notif-001",
@@ -285,7 +285,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new SucceedingWebhookClient();
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions { AllowLocalhost = false });
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions { AllowLocalhost = false });
var notification = new WebhookNotification
{
NotificationId = "notif-001",
@@ -313,7 +313,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new SucceedingWebhookClient();
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions { AllowPrivateIp = false });
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions { AllowPrivateIp = false });
var notification = new WebhookNotification
{
NotificationId = "notif-001",
@@ -338,7 +338,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new SucceedingWebhookClient();
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions { MaxPayloadSize = 1000 });
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions { MaxPayloadSize = 1000 });
var notification = new WebhookNotification
{
NotificationId = "notif-001",
@@ -366,7 +366,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new FailingWebhookClient(HttpStatusCode.BadRequest, errorMessage: "Invalid JSON");
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions());
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions());
var notification = CreateTestNotification();
// Act
@@ -391,7 +391,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new SlowWebhookClient(TimeSpan.FromSeconds(10));
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions());
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions());
var notification = CreateTestNotification();
var cts = new CancellationTokenSource();
cts.CancelAfter(TimeSpan.FromMilliseconds(100));
@@ -417,7 +417,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new CapturingWebhookClient();
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions());
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions());
var notification = new WebhookNotification
{
NotificationId = "notif-001",
@@ -442,7 +442,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new CapturingWebhookClient();
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions());
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions());
var notification = new WebhookNotification
{
NotificationId = "notif-001",
@@ -470,7 +470,7 @@ public sealed class WebhookConnectorErrorTests
{
// Arrange
var httpClient = new CapturingWebhookClient();
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions());
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions());
var notification = new WebhookNotification
{
NotificationId = "notif-001",
@@ -519,7 +519,7 @@ public sealed class WebhookConnectorErrorTests
var httpClient = isSuccess
? (IWebhookClient)new SucceedingWebhookClient()
: new FailingWebhookClient(statusCode);
var connector = new WebhookConnector(httpClient, new WebhookConnectorOptions());
var connector = new TestWebhookConnector(httpClient, new TestWebhookConnectorOptions());
var notification = CreateTestNotification();
// Act
@@ -685,75 +685,91 @@ internal sealed class WebhookResponse
}
/// <summary>
/// Webhook notification model.
/// Webhook notification model for testing.
/// </summary>
internal sealed class WebhookNotification
{
public required string NotificationId { get; set; }
public required string Url { get; set; }
public required string Payload { get; set; }
public string NotificationId { get; set; } = string.Empty;
public string Url { get; set; } = string.Empty;
public string Payload { get; set; } = string.Empty;
public string? Secret { get; set; }
public Dictionary<string, string>? Headers { get; set; }
}
/// <summary>
/// Webhook connector options.
/// Webhook connector options for testing.
/// </summary>
internal sealed class WebhookConnectorOptions
internal sealed class TestWebhookConnectorOptions
{
public int MaxRetries { get; set; } = 3;
public int RetryDelayMs { get; set; } = 1000;
public bool RequireHttps { get; set; }
public bool RequireHttps { get; set; } = false;
public bool AllowLocalhost { get; set; } = true;
public bool AllowPrivateIp { get; set; } = true;
public int MaxPayloadSize { get; set; } = 1_000_000;
}
/// <summary>
/// Webhook send result.
/// Webhook send result for testing.
/// </summary>
internal sealed class WebhookSendResult
internal sealed class TestWebhookSendResult
{
public bool Success { get; set; }
public bool ShouldRetry { get; set; }
public int RetryAfterMs { get; set; }
public string? ErrorCode { get; set; }
public string? ErrorMessage { get; set; }
public DateTime Timestamp { get; set; } = DateTime.UtcNow;
public string? NotificationId { get; set; }
public int RetryAfterMs { get; set; }
}
/// <summary>
/// Webhook connector for testing.
/// Test webhook connector that simulates sending webhooks.
/// </summary>
internal sealed class WebhookConnector
internal sealed class TestWebhookConnector
{
private readonly IWebhookClient _client;
private readonly WebhookConnectorOptions _options;
private readonly TestWebhookConnectorOptions _options;
public WebhookConnector(IWebhookClient client, WebhookConnectorOptions options)
public TestWebhookConnector(IWebhookClient client, TestWebhookConnectorOptions options)
{
_client = client;
_options = options;
}
public async Task<WebhookSendResult> SendAsync(WebhookNotification notification, CancellationToken cancellationToken)
public async Task<TestWebhookSendResult> SendAsync(WebhookNotification notification, CancellationToken cancellationToken)
{
var result = new WebhookSendResult
// Validate URL
if (string.IsNullOrWhiteSpace(notification.Url))
{
NotificationId = notification.NotificationId,
Timestamp = DateTime.UtcNow
};
return new TestWebhookSendResult { Success = false, ShouldRetry = false, ErrorCode = "VALIDATION_FAILED" };
}
// Validate
var validationError = Validate(notification);
if (validationError != null)
if (!Uri.TryCreate(notification.Url, UriKind.Absolute, out var uri))
{
result.Success = false;
result.ShouldRetry = false;
result.ErrorCode = validationError.Code;
result.ErrorMessage = validationError.Message;
return result;
return new TestWebhookSendResult { Success = false, ShouldRetry = false, ErrorCode = "VALIDATION_FAILED" };
}
// HTTPS validation
if (_options.RequireHttps && uri.Scheme != "https")
{
return new TestWebhookSendResult { Success = false, ShouldRetry = false, ErrorCode = "HTTPS_REQUIRED" };
}
// Localhost validation
if (!_options.AllowLocalhost && (uri.Host == "localhost" || uri.Host == "127.0.0.1" || uri.Host == "[::1]"))
{
return new TestWebhookSendResult { Success = false, ShouldRetry = false, ErrorCode = "LOCALHOST_NOT_ALLOWED" };
}
// Private IP validation
if (!_options.AllowPrivateIp && IsPrivateIp(uri.Host))
{
return new TestWebhookSendResult { Success = false, ShouldRetry = false, ErrorCode = "PRIVATE_IP_NOT_ALLOWED" };
}
// Payload size validation
if (notification.Payload.Length > _options.MaxPayloadSize)
{
return new TestWebhookSendResult { Success = false, ShouldRetry = false, ErrorCode = "PAYLOAD_TOO_LARGE" };
}
try
@@ -762,106 +778,103 @@ internal sealed class WebhookConnector
if (response.Success)
{
result.Success = true;
return result;
return new TestWebhookSendResult { Success = true };
}
return ClassifyHttpError(result, response);
return MapHttpStatusToResult(response);
}
catch (OperationCanceledException) when (!cancellationToken.IsCancellationRequested)
catch (TaskCanceledException ex) when (ex.Message.Contains("timeout", StringComparison.OrdinalIgnoreCase))
{
result.Success = false;
result.ShouldRetry = true;
result.ErrorCode = "TIMEOUT";
return result;
return new TestWebhookSendResult { Success = false, ShouldRetry = true, ErrorCode = "TIMEOUT" };
}
catch (OperationCanceledException)
catch (TaskCanceledException)
{
result.Success = false;
result.ShouldRetry = true;
result.ErrorCode = "CANCELLED";
return result;
return new TestWebhookSendResult { Success = false, ShouldRetry = true, ErrorCode = "CANCELLED" };
}
catch (HttpRequestException ex) when (ex.Message.Contains("No such host"))
catch (HttpRequestException ex) when (ex.Message.Contains("No such host", StringComparison.OrdinalIgnoreCase))
{
result.Success = false;
result.ShouldRetry = true;
result.ErrorCode = "DNS_FAILURE";
result.ErrorMessage = ex.Message;
return result;
return new TestWebhookSendResult { Success = false, ShouldRetry = true, ErrorCode = "DNS_FAILURE" };
}
catch (Exception ex)
{
result.Success = false;
result.ShouldRetry = true;
result.ErrorCode = "UNKNOWN_ERROR";
result.ErrorMessage = ex.Message;
return result;
}
}
private (string Code, string Message)? Validate(WebhookNotification notification)
{
if (string.IsNullOrWhiteSpace(notification.Url))
return ("VALIDATION_FAILED", "URL is required");
if (!Uri.TryCreate(notification.Url, UriKind.Absolute, out var uri))
return ("VALIDATION_FAILED", "Invalid URL format");
if (_options.RequireHttps && uri.Scheme != "https")
return ("HTTPS_REQUIRED", "HTTPS is required");
if (!_options.AllowLocalhost && (uri.Host == "localhost" || uri.Host == "127.0.0.1" || uri.Host == "[::1]"))
return ("LOCALHOST_NOT_ALLOWED", "Localhost URLs are not allowed");
if (!_options.AllowPrivateIp && IsPrivateIp(uri.Host))
return ("PRIVATE_IP_NOT_ALLOWED", "Private IP addresses are not allowed");
if (notification.Payload?.Length > _options.MaxPayloadSize)
return ("PAYLOAD_TOO_LARGE", $"Payload exceeds {_options.MaxPayloadSize} byte limit");
return null;
}
private static bool IsPrivateIp(string host)
{
if (System.Net.IPAddress.TryParse(host, out var ip))
{
var bytes = ip.GetAddressBytes();
if (bytes.Length == 4) // IPv4
{
return bytes[0] == 10 ||
(bytes[0] == 172 && bytes[1] >= 16 && bytes[1] <= 31) ||
(bytes[0] == 192 && bytes[1] == 168);
}
}
return false;
return host.StartsWith("192.168.") || host.StartsWith("10.") || host.StartsWith("172.16.");
}
private WebhookSendResult ClassifyHttpError(WebhookSendResult result, WebhookResponse response)
private static TestWebhookSendResult MapHttpStatusToResult(WebhookResponse response)
{
result.Success = false;
result.ErrorMessage = response.ErrorMessage;
(result.ErrorCode, result.ShouldRetry) = response.HttpStatusCode switch
return response.HttpStatusCode switch
{
HttpStatusCode.BadRequest => ("BAD_REQUEST", false),
HttpStatusCode.Unauthorized => ("UNAUTHORIZED", false),
HttpStatusCode.Forbidden => ("FORBIDDEN", false),
HttpStatusCode.NotFound => ("NOT_FOUND", false),
HttpStatusCode.Gone => ("GONE", false),
HttpStatusCode.TooManyRequests => ("RATE_LIMITED", true),
HttpStatusCode.InternalServerError => ("INTERNAL_SERVER_ERROR", true),
HttpStatusCode.BadGateway => ("BAD_GATEWAY", true),
HttpStatusCode.ServiceUnavailable => ("SERVICE_UNAVAILABLE", true),
HttpStatusCode.GatewayTimeout => ("GATEWAY_TIMEOUT", true),
_ => ("UNKNOWN_ERROR", true)
HttpStatusCode.BadRequest => new TestWebhookSendResult
{
Success = false,
ShouldRetry = false,
ErrorCode = "BAD_REQUEST",
ErrorMessage = response.ErrorMessage
},
HttpStatusCode.Unauthorized => new TestWebhookSendResult
{
Success = false,
ShouldRetry = false,
ErrorCode = "UNAUTHORIZED"
},
HttpStatusCode.Forbidden => new TestWebhookSendResult
{
Success = false,
ShouldRetry = false,
ErrorCode = "FORBIDDEN"
},
HttpStatusCode.NotFound => new TestWebhookSendResult
{
Success = false,
ShouldRetry = false,
ErrorCode = "NOT_FOUND"
},
HttpStatusCode.Gone => new TestWebhookSendResult
{
Success = false,
ShouldRetry = false,
ErrorCode = "GONE"
},
HttpStatusCode.TooManyRequests => new TestWebhookSendResult
{
Success = false,
ShouldRetry = true,
ErrorCode = "RATE_LIMITED",
RetryAfterMs = response.RetryAfterSeconds * 1000
},
HttpStatusCode.InternalServerError => new TestWebhookSendResult
{
Success = false,
ShouldRetry = true,
ErrorCode = "INTERNAL_SERVER_ERROR"
},
HttpStatusCode.ServiceUnavailable => new TestWebhookSendResult
{
Success = false,
ShouldRetry = true,
ErrorCode = "SERVICE_UNAVAILABLE"
},
HttpStatusCode.BadGateway => new TestWebhookSendResult
{
Success = false,
ShouldRetry = true,
ErrorCode = "BAD_GATEWAY"
},
HttpStatusCode.GatewayTimeout => new TestWebhookSendResult
{
Success = false,
ShouldRetry = true,
ErrorCode = "GATEWAY_TIMEOUT"
},
_ => new TestWebhookSendResult
{
Success = false,
ShouldRetry = (int)response.HttpStatusCode >= 500,
ErrorCode = response.HttpStatusCode.ToString().ToUpperInvariant()
}
};
if (response.RetryAfterSeconds > 0)
result.RetryAfterMs = response.RetryAfterSeconds * 1000;
return result;
}
}

View File

@@ -1,6 +1,8 @@
<?xml version='1.0' encoding='utf-8'?>
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<UseXunitV3>true</UseXunitV3>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
@@ -16,17 +18,9 @@
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.0" />
<PackageReference Include="xunit" Version="2.9.3" />
<PackageReference Include="xunit.runner.visualstudio" Version="3.0.2">
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
<PrivateAssets>all</PrivateAssets>
</PackageReference>
<PackageReference Include="FluentAssertions" Version="8.0.1" />
<PackageReference Include="coverlet.collector" Version="6.0.4">
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
<PrivateAssets>all</PrivateAssets>
</PackageReference>
<PackageReference Include="FluentAssertions" />
<PackageReference Include="NSubstitute" />
<PackageReference Include="xunit.v3" />
</ItemGroup>
<ItemGroup>
@@ -37,4 +31,4 @@
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
</ItemGroup>
</Project>
</Project>

View File

@@ -296,7 +296,7 @@ public sealed class NotificationRateLimitingTests
// Assert
allowed.Should().BeFalse();
retryAfter.Should().BeGreaterThan(TimeSpan.Zero);
retryAfter.Should().BeLessOrEqualTo(TimeSpan.FromMilliseconds(5000));
retryAfter.Should().BeLessThanOrEqualTo(TimeSpan.FromMilliseconds(5000));
}
#endregion

View File

@@ -1,12 +1,22 @@
<?xml version='1.0' encoding='utf-8'?>
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<UseXunitV3>true</UseXunitV3>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<IsPackable>false</IsPackable>
<IsTestProject>true</IsTestProject>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="../../__Libraries/StellaOps.Notify.Models/StellaOps.Notify.Models.csproj" />
</ItemGroup>
</Project>
<ItemGroup>
<PackageReference Include="FluentAssertions" />
<PackageReference Include="NSubstitute" />
<PackageReference Include="xunit.v3" />
</ItemGroup>
</Project>

View File

@@ -442,7 +442,7 @@ public sealed class NotificationTemplatingTests
var result = _renderer.Render(template, context);
// Assert
result.Body.Length.Should().BeLessOrEqualTo(23); // 20 + "..."
result.Body.Length.Should().BeLessThanOrEqualTo(23); // 20 + "..."
result.Body.Should().EndWith("...");
}
@@ -581,7 +581,7 @@ public sealed class NotificationTemplatingTests
public void SlackMrkdwn_SpecialCharsEscaped()
{
// Arrange
var template = CreateTemplate("Message: {{message}}", NotifyDeliveryFormat.SlackMrkdwn);
var template = CreateTemplate("Message: {{message}}", NotifyDeliveryFormat.Slack);
var context = new TemplateContext
{
Variables = new Dictionary<string, object>
@@ -668,7 +668,7 @@ public sealed class NotificationTemplatingTests
// Arrange
var template = CreateTemplate(
body: @"{""@type"":""MessageCard"",""title"":""{{title}}"",""text"":""{{message}}""}",
channelType: NotifyChannelType.MicrosoftTeams,
channelType: NotifyChannelType.Teams,
format: NotifyDeliveryFormat.Json);
var context = new TemplateContext
{
@@ -1259,7 +1259,7 @@ public sealed class NotificationTemplateRenderer
{
NotifyDeliveryFormat.Html => System.Net.WebUtility.HtmlEncode(stringValue),
NotifyDeliveryFormat.Markdown => EscapeMarkdown(stringValue),
NotifyDeliveryFormat.SlackMrkdwn => EscapeSlackMrkdwn(stringValue),
NotifyDeliveryFormat.Slack => EscapeSlackMrkdwn(stringValue),
_ => stringValue
};
}

View File

@@ -313,7 +313,7 @@ public class NotifyRateLimitingTests
public void CheckRateLimit_DefaultConfig_UsesDefaultLimits()
{
// Arrange
var defaultConfig = NotifyThrottleConfig.CreateDefault(TestTenantId, "default-config");
var defaultConfig = NotifyThrottleConfig.CreateDefault(TestTenantId, createdBy: "default-config");
var limiter = new NotifyRateLimiter(new FakeTimeProvider(FixedTimestamp));
// Act

View File

@@ -1,11 +1,14 @@
<?xml version='1.0' encoding='utf-8'?>
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<UseXunitV3>true</UseXunitV3>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<UseConcelierTestInfra>false</UseConcelierTestInfra>
</PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup> <ItemGroup>
<Using Include="Xunit" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="../../__Libraries/StellaOps.Notify.Engine/StellaOps.Notify.Engine.csproj" />
@@ -13,11 +16,8 @@
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.0" />
<PackageReference Include="xunit" Version="2.9.3" />
<PackageReference Include="xunit.runner.visualstudio" Version="3.0.2" />
<PackageReference Include="coverlet.collector" Version="6.0.4" />
<PackageReference Include="FluentAssertions" Version="8.0.1" />
<PackageReference Include="NSubstitute" Version="5.3.0" />
<PackageReference Include="FluentAssertions" />
<PackageReference Include="NSubstitute" />
<PackageReference Include="xunit.v3" />
</ItemGroup>
</Project>
</Project>

View File

@@ -31,7 +31,7 @@ public sealed class PlatformEventSchemaValidationTests
Assert.True(File.Exists(samplePath), $"Sample '{sampleFile}' not found at '{samplePath}'.");
Assert.True(File.Exists(schemaPath), $"Schema '{schemaFile}' not found at '{schemaPath}'.");
var schema = await JsonSchema.FromJsonAsync(File.ReadAllText(schemaPath));
var schema = await JsonSchema.FromJsonAsync(File.ReadAllText(schemaPath), CancellationToken.None);
var errors = schema.Validate(File.ReadAllText(samplePath));
if (errors.Count > 0)

View File

@@ -1,9 +1,13 @@
<?xml version='1.0' encoding='utf-8'?>
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<UseXunitV3>true</UseXunitV3>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<IsPackable>false</IsPackable>
<IsTestProject>true</IsTestProject>
</PropertyGroup>
<ItemGroup>
@@ -12,7 +16,10 @@
</ItemGroup>
<ItemGroup>
<PackageReference Include="NJsonSchema" Version="10.9.0" />
<PackageReference Include="FluentAssertions" />
<PackageReference Include="NJsonSchema" />
<PackageReference Include="NSubstitute" />
<PackageReference Include="xunit.v3" />
<None Include="../../../../docs/events/samples/*.json">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</None>
@@ -23,4 +30,4 @@
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</None>
</ItemGroup>
</Project>
</Project>

View File

@@ -1,12 +1,13 @@
using FluentAssertions;
using Microsoft.Extensions.Logging.Abstractions;
using Microsoft.Extensions.Options;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Storage.Postgres.Repositories;
using StellaOps.Notify.Persistence.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Repositories;
using Xunit;
using Xunit.v3;
using StellaOps.TestKit;
namespace StellaOps.Notify.Storage.Postgres.Tests;
namespace StellaOps.Notify.Persistence.Postgres.Tests;
[Collection(NotifyPostgresCollection.Name)]
public sealed class ChannelRepositoryTests : IAsyncLifetime
@@ -25,8 +26,8 @@ public sealed class ChannelRepositoryTests : IAsyncLifetime
_repository = new ChannelRepository(dataSource, NullLogger<ChannelRepository>.Instance);
}
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
public Task DisposeAsync() => Task.CompletedTask;
public ValueTask InitializeAsync() => new ValueTask(_fixture.TruncateAllTablesAsync());
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
[Trait("Category", TestCategories.Unit)]
[Fact]
@@ -44,8 +45,8 @@ public sealed class ChannelRepositoryTests : IAsyncLifetime
};
// Act
await _repository.CreateAsync(channel);
var fetched = await _repository.GetByIdAsync(_tenantId, channel.Id);
await _repository.CreateAsync(channel, CancellationToken.None);
var fetched = await _repository.GetByIdAsync(_tenantId, channel.Id, CancellationToken.None);
// Assert
fetched.Should().NotBeNull();
@@ -60,10 +61,10 @@ public sealed class ChannelRepositoryTests : IAsyncLifetime
{
// Arrange
var channel = CreateChannel("slack-alerts", ChannelType.Slack);
await _repository.CreateAsync(channel);
await _repository.CreateAsync(channel, CancellationToken.None);
// Act
var fetched = await _repository.GetByNameAsync(_tenantId, "slack-alerts");
var fetched = await _repository.GetByNameAsync(_tenantId, "slack-alerts", CancellationToken.None);
// Assert
fetched.Should().NotBeNull();
@@ -77,11 +78,11 @@ public sealed class ChannelRepositoryTests : IAsyncLifetime
// Arrange
var channel1 = CreateChannel("channel1", ChannelType.Email);
var channel2 = CreateChannel("channel2", ChannelType.Slack);
await _repository.CreateAsync(channel1);
await _repository.CreateAsync(channel2);
await _repository.CreateAsync(channel1, CancellationToken.None);
await _repository.CreateAsync(channel2, CancellationToken.None);
// Act
var channels = await _repository.GetAllAsync(_tenantId);
var channels = await _repository.GetAllAsync(_tenantId, cancellationToken: CancellationToken.None);
// Assert
channels.Should().HaveCount(2);
@@ -102,11 +103,11 @@ public sealed class ChannelRepositoryTests : IAsyncLifetime
ChannelType = ChannelType.Email,
Enabled = false
};
await _repository.CreateAsync(enabledChannel);
await _repository.CreateAsync(disabledChannel);
await _repository.CreateAsync(enabledChannel, CancellationToken.None);
await _repository.CreateAsync(disabledChannel, CancellationToken.None);
// Act
var enabledChannels = await _repository.GetAllAsync(_tenantId, enabled: true);
var enabledChannels = await _repository.GetAllAsync(_tenantId, enabled: true, cancellationToken: CancellationToken.None);
// Assert
enabledChannels.Should().HaveCount(1);
@@ -120,11 +121,11 @@ public sealed class ChannelRepositoryTests : IAsyncLifetime
// Arrange
var emailChannel = CreateChannel("email", ChannelType.Email);
var slackChannel = CreateChannel("slack", ChannelType.Slack);
await _repository.CreateAsync(emailChannel);
await _repository.CreateAsync(slackChannel);
await _repository.CreateAsync(emailChannel, CancellationToken.None);
await _repository.CreateAsync(slackChannel, CancellationToken.None);
// Act
var slackChannels = await _repository.GetAllAsync(_tenantId, channelType: ChannelType.Slack);
var slackChannels = await _repository.GetAllAsync(_tenantId, channelType: ChannelType.Slack, cancellationToken: CancellationToken.None);
// Assert
slackChannels.Should().HaveCount(1);
@@ -137,7 +138,7 @@ public sealed class ChannelRepositoryTests : IAsyncLifetime
{
// Arrange
var channel = CreateChannel("update-test", ChannelType.Email);
await _repository.CreateAsync(channel);
await _repository.CreateAsync(channel, CancellationToken.None);
// Act
var updated = new ChannelEntity
@@ -149,8 +150,8 @@ public sealed class ChannelRepositoryTests : IAsyncLifetime
Enabled = false,
Config = "{\"updated\": true}"
};
var result = await _repository.UpdateAsync(updated);
var fetched = await _repository.GetByIdAsync(_tenantId, channel.Id);
var result = await _repository.UpdateAsync(updated, CancellationToken.None);
var fetched = await _repository.GetByIdAsync(_tenantId, channel.Id, CancellationToken.None);
// Assert
result.Should().BeTrue();
@@ -164,11 +165,11 @@ public sealed class ChannelRepositoryTests : IAsyncLifetime
{
// Arrange
var channel = CreateChannel("delete-test", ChannelType.Email);
await _repository.CreateAsync(channel);
await _repository.CreateAsync(channel, CancellationToken.None);
// Act
var result = await _repository.DeleteAsync(_tenantId, channel.Id);
var fetched = await _repository.GetByIdAsync(_tenantId, channel.Id);
var result = await _repository.DeleteAsync(_tenantId, channel.Id, CancellationToken.None);
var fetched = await _repository.GetByIdAsync(_tenantId, channel.Id, CancellationToken.None);
// Assert
result.Should().BeTrue();
@@ -190,12 +191,12 @@ public sealed class ChannelRepositoryTests : IAsyncLifetime
Enabled = false
};
var enabledSlack = CreateChannel("enabled-slack", ChannelType.Slack);
await _repository.CreateAsync(enabledEmail);
await _repository.CreateAsync(disabledEmail);
await _repository.CreateAsync(enabledSlack);
await _repository.CreateAsync(enabledEmail, CancellationToken.None);
await _repository.CreateAsync(disabledEmail, CancellationToken.None);
await _repository.CreateAsync(enabledSlack, CancellationToken.None);
// Act
var channels = await _repository.GetEnabledByTypeAsync(_tenantId, ChannelType.Email);
var channels = await _repository.GetEnabledByTypeAsync(_tenantId, ChannelType.Email, CancellationToken.None);
// Assert
channels.Should().HaveCount(1);

View File

@@ -8,12 +8,12 @@
using FluentAssertions;
using Microsoft.Extensions.Logging.Abstractions;
using Microsoft.Extensions.Options;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Storage.Postgres.Repositories;
using StellaOps.Notify.Persistence.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Repositories;
using StellaOps.TestKit;
using Xunit;
namespace StellaOps.Notify.Storage.Postgres.Tests;
namespace StellaOps.Notify.Persistence.Postgres.Tests;
/// <summary>
/// Idempotency tests for Notify delivery storage operations.
@@ -39,7 +39,7 @@ public sealed class DeliveryIdempotencyTests : IAsyncLifetime
_fixture = fixture;
}
public async Task InitializeAsync()
public async ValueTask InitializeAsync()
{
await _fixture.ExecuteSqlAsync(
"TRUNCATE TABLE notify.audit, notify.deliveries, notify.digests, notify.channels RESTART IDENTITY CASCADE;");
@@ -62,7 +62,7 @@ public sealed class DeliveryIdempotencyTests : IAsyncLifetime
});
}
public Task DisposeAsync() => Task.CompletedTask;
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
[Fact]
public async Task CreateDelivery_SameId_SecondInsertFails()

View File

@@ -1,12 +1,12 @@
using FluentAssertions;
using Microsoft.Extensions.Logging.Abstractions;
using Microsoft.Extensions.Options;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Storage.Postgres.Repositories;
using StellaOps.Notify.Persistence.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Repositories;
using Xunit;
using StellaOps.TestKit;
namespace StellaOps.Notify.Storage.Postgres.Tests;
namespace StellaOps.Notify.Persistence.Postgres.Tests;
[Collection(NotifyPostgresCollection.Name)]
public sealed class DeliveryRepositoryTests : IAsyncLifetime
@@ -28,8 +28,8 @@ public sealed class DeliveryRepositoryTests : IAsyncLifetime
_channelRepository = new ChannelRepository(dataSource, NullLogger<ChannelRepository>.Instance);
}
public Task InitializeAsync() => Task.CompletedTask;
public Task DisposeAsync() => Task.CompletedTask;
public ValueTask InitializeAsync() => ValueTask.CompletedTask;
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
private async Task ResetAsync()
{

View File

@@ -1,12 +1,13 @@
using FluentAssertions;
using Microsoft.Extensions.Logging.Abstractions;
using Microsoft.Extensions.Options;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Storage.Postgres.Repositories;
using StellaOps.Notify.Persistence.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Repositories;
using Xunit;
using Xunit.v3;
using StellaOps.TestKit;
namespace StellaOps.Notify.Storage.Postgres.Tests;
namespace StellaOps.Notify.Persistence.Postgres.Tests;
/// <summary>
/// End-to-end tests for digest aggregation (PG-T3.10.4).
@@ -36,8 +37,8 @@ public sealed class DigestAggregationTests : IAsyncLifetime
_maintenanceRepository = new MaintenanceWindowRepository(dataSource, NullLogger<MaintenanceWindowRepository>.Instance);
}
public Task InitializeAsync() => _fixture.TruncateAllTablesAsync();
public Task DisposeAsync() => Task.CompletedTask;
public ValueTask InitializeAsync() => new ValueTask(_fixture.TruncateAllTablesAsync());
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
[Trait("Category", TestCategories.Unit)]
[Fact]
@@ -52,7 +53,7 @@ public sealed class DigestAggregationTests : IAsyncLifetime
ChannelType = ChannelType.Email,
Enabled = true
};
await _channelRepository.CreateAsync(channel);
await _channelRepository.CreateAsync(channel, cancellationToken: CancellationToken.None);
// Create digest in collecting state
var digest = new DigestEntity
@@ -67,27 +68,27 @@ public sealed class DigestAggregationTests : IAsyncLifetime
Status = DigestStatus.Collecting,
CollectUntil = DateTimeOffset.UtcNow.AddHours(1)
};
await _digestRepository.UpsertAsync(digest);
await _digestRepository.UpsertAsync(digest, cancellationToken: CancellationToken.None);
// Act - Add events to digest
await _digestRepository.AddEventAsync(_tenantId, digest.Id, """{"type": "vuln.detected", "cve": "CVE-2025-0001"}""");
await _digestRepository.AddEventAsync(_tenantId, digest.Id, """{"type": "vuln.detected", "cve": "CVE-2025-0002"}""");
await _digestRepository.AddEventAsync(_tenantId, digest.Id, """{"type": "vuln.detected", "cve": "CVE-2025-0003"}""");
await _digestRepository.AddEventAsync(_tenantId, digest.Id, """{"type": "vuln.detected", "cve": "CVE-2025-0001"}""", cancellationToken: CancellationToken.None);
await _digestRepository.AddEventAsync(_tenantId, digest.Id, """{"type": "vuln.detected", "cve": "CVE-2025-0002"}""", cancellationToken: CancellationToken.None);
await _digestRepository.AddEventAsync(_tenantId, digest.Id, """{"type": "vuln.detected", "cve": "CVE-2025-0003"}""", cancellationToken: CancellationToken.None);
var afterEvents = await _digestRepository.GetByIdAsync(_tenantId, digest.Id);
var afterEvents = await _digestRepository.GetByIdAsync(_tenantId, digest.Id, cancellationToken: CancellationToken.None);
afterEvents!.EventCount.Should().Be(3);
afterEvents.Events.Should().Contain("CVE-2025-0001");
afterEvents.Events.Should().Contain("CVE-2025-0002");
afterEvents.Events.Should().Contain("CVE-2025-0003");
// Transition to sending
await _digestRepository.MarkSendingAsync(_tenantId, digest.Id);
var sending = await _digestRepository.GetByIdAsync(_tenantId, digest.Id);
await _digestRepository.MarkSendingAsync(_tenantId, digest.Id, cancellationToken: CancellationToken.None);
var sending = await _digestRepository.GetByIdAsync(_tenantId, digest.Id, cancellationToken: CancellationToken.None);
sending!.Status.Should().Be(DigestStatus.Sending);
// Transition to sent
await _digestRepository.MarkSentAsync(_tenantId, digest.Id);
var sent = await _digestRepository.GetByIdAsync(_tenantId, digest.Id);
await _digestRepository.MarkSentAsync(_tenantId, digest.Id, cancellationToken: CancellationToken.None);
var sent = await _digestRepository.GetByIdAsync(_tenantId, digest.Id, cancellationToken: CancellationToken.None);
sent!.Status.Should().Be(DigestStatus.Sent);
sent.SentAt.Should().NotBeNull();
}
@@ -105,7 +106,7 @@ public sealed class DigestAggregationTests : IAsyncLifetime
ChannelType = ChannelType.Email,
Enabled = true
};
await _channelRepository.CreateAsync(channel);
await _channelRepository.CreateAsync(channel, cancellationToken: CancellationToken.None);
// Create digest that's ready (collect window passed)
var readyDigest = new DigestEntity
@@ -133,11 +134,11 @@ public sealed class DigestAggregationTests : IAsyncLifetime
CollectUntil = DateTimeOffset.UtcNow.AddHours(1) // Still collecting
};
await _digestRepository.UpsertAsync(readyDigest);
await _digestRepository.UpsertAsync(notReadyDigest);
await _digestRepository.UpsertAsync(readyDigest, cancellationToken: CancellationToken.None);
await _digestRepository.UpsertAsync(notReadyDigest, cancellationToken: CancellationToken.None);
// Act
var ready = await _digestRepository.GetReadyToSendAsync();
var ready = await _digestRepository.GetReadyToSendAsync(cancellationToken: CancellationToken.None);
// Assert
ready.Should().Contain(d => d.Id == readyDigest.Id);
@@ -157,7 +158,7 @@ public sealed class DigestAggregationTests : IAsyncLifetime
ChannelType = ChannelType.Email,
Enabled = true
};
await _channelRepository.CreateAsync(channel);
await _channelRepository.CreateAsync(channel, cancellationToken: CancellationToken.None);
var digestKey = "hourly-alerts";
var recipient = "alerts@example.com";
@@ -172,10 +173,10 @@ public sealed class DigestAggregationTests : IAsyncLifetime
Status = DigestStatus.Collecting,
CollectUntil = DateTimeOffset.UtcNow.AddHours(1)
};
await _digestRepository.UpsertAsync(digest);
await _digestRepository.UpsertAsync(digest, cancellationToken: CancellationToken.None);
// Act
var fetched = await _digestRepository.GetByKeyAsync(_tenantId, channel.Id, recipient, digestKey);
var fetched = await _digestRepository.GetByKeyAsync(_tenantId, channel.Id, recipient, digestKey, cancellationToken: CancellationToken.None);
// Assert
fetched.Should().NotBeNull();
@@ -196,7 +197,7 @@ public sealed class DigestAggregationTests : IAsyncLifetime
ChannelType = ChannelType.Email,
Enabled = true
};
await _channelRepository.CreateAsync(channel);
await _channelRepository.CreateAsync(channel, cancellationToken: CancellationToken.None);
var digest = new DigestEntity
{
@@ -210,7 +211,7 @@ public sealed class DigestAggregationTests : IAsyncLifetime
Status = DigestStatus.Collecting,
CollectUntil = DateTimeOffset.UtcNow.AddHours(1)
};
await _digestRepository.UpsertAsync(digest);
await _digestRepository.UpsertAsync(digest, cancellationToken: CancellationToken.None);
// Act - Upsert with updated collect window
var updated = new DigestEntity
@@ -225,10 +226,10 @@ public sealed class DigestAggregationTests : IAsyncLifetime
Status = DigestStatus.Collecting,
CollectUntil = DateTimeOffset.UtcNow.AddHours(2) // Extended
};
await _digestRepository.UpsertAsync(updated);
await _digestRepository.UpsertAsync(updated, cancellationToken: CancellationToken.None);
// Assert
var fetched = await _digestRepository.GetByIdAsync(_tenantId, digest.Id);
var fetched = await _digestRepository.GetByIdAsync(_tenantId, digest.Id, cancellationToken: CancellationToken.None);
fetched!.CollectUntil.Should().BeCloseTo(DateTimeOffset.UtcNow.AddHours(2), TimeSpan.FromMinutes(1));
}
@@ -245,7 +246,7 @@ public sealed class DigestAggregationTests : IAsyncLifetime
ChannelType = ChannelType.Email,
Enabled = true
};
await _channelRepository.CreateAsync(channel);
await _channelRepository.CreateAsync(channel, cancellationToken: CancellationToken.None);
// Create old sent digest
var oldDigest = new DigestEntity
@@ -258,8 +259,8 @@ public sealed class DigestAggregationTests : IAsyncLifetime
Status = DigestStatus.Sent,
CollectUntil = DateTimeOffset.UtcNow.AddDays(-10)
};
await _digestRepository.UpsertAsync(oldDigest);
await _digestRepository.MarkSentAsync(_tenantId, oldDigest.Id);
await _digestRepository.UpsertAsync(oldDigest, cancellationToken: CancellationToken.None);
await _digestRepository.MarkSentAsync(_tenantId, oldDigest.Id, cancellationToken: CancellationToken.None);
// Create recent digest
var recentDigest = new DigestEntity
@@ -272,18 +273,18 @@ public sealed class DigestAggregationTests : IAsyncLifetime
Status = DigestStatus.Collecting,
CollectUntil = DateTimeOffset.UtcNow.AddHours(1)
};
await _digestRepository.UpsertAsync(recentDigest);
await _digestRepository.UpsertAsync(recentDigest, cancellationToken: CancellationToken.None);
// Act - Delete digests older than 7 days
var cutoff = DateTimeOffset.UtcNow.AddDays(-7);
var deleted = await _digestRepository.DeleteOldAsync(cutoff);
var deleted = await _digestRepository.DeleteOldAsync(cutoff, cancellationToken: CancellationToken.None);
// Assert
deleted.Should().BeGreaterThanOrEqualTo(1);
var oldFetch = await _digestRepository.GetByIdAsync(_tenantId, oldDigest.Id);
var oldFetch = await _digestRepository.GetByIdAsync(_tenantId, oldDigest.Id, cancellationToken: CancellationToken.None);
oldFetch.Should().BeNull();
var recentFetch = await _digestRepository.GetByIdAsync(_tenantId, recentDigest.Id);
var recentFetch = await _digestRepository.GetByIdAsync(_tenantId, recentDigest.Id, cancellationToken: CancellationToken.None);
recentFetch.Should().NotBeNull();
}
@@ -300,7 +301,7 @@ public sealed class DigestAggregationTests : IAsyncLifetime
ChannelType = ChannelType.Email,
Enabled = true
};
await _channelRepository.CreateAsync(channel);
await _channelRepository.CreateAsync(channel, cancellationToken: CancellationToken.None);
var digestKey = "shared-key";
var digest1 = new DigestEntity
@@ -323,12 +324,12 @@ public sealed class DigestAggregationTests : IAsyncLifetime
Status = DigestStatus.Collecting,
CollectUntil = DateTimeOffset.UtcNow.AddHours(1)
};
await _digestRepository.UpsertAsync(digest1);
await _digestRepository.UpsertAsync(digest2);
await _digestRepository.UpsertAsync(digest1, cancellationToken: CancellationToken.None);
await _digestRepository.UpsertAsync(digest2, cancellationToken: CancellationToken.None);
// Act
var fetched1 = await _digestRepository.GetByKeyAsync(_tenantId, channel.Id, "user1@example.com", digestKey);
var fetched2 = await _digestRepository.GetByKeyAsync(_tenantId, channel.Id, "user2@example.com", digestKey);
var fetched1 = await _digestRepository.GetByKeyAsync(_tenantId, channel.Id, "user1@example.com", digestKey, cancellationToken: CancellationToken.None);
var fetched2 = await _digestRepository.GetByKeyAsync(_tenantId, channel.Id, "user2@example.com", digestKey, cancellationToken: CancellationToken.None);
// Assert
fetched1.Should().NotBeNull();
@@ -349,7 +350,7 @@ public sealed class DigestAggregationTests : IAsyncLifetime
ChannelType = ChannelType.Email,
Enabled = true
};
await _channelRepository.CreateAsync(channel);
await _channelRepository.CreateAsync(channel, cancellationToken: CancellationToken.None);
var digest = new DigestEntity
{
@@ -363,16 +364,16 @@ public sealed class DigestAggregationTests : IAsyncLifetime
Status = DigestStatus.Collecting,
CollectUntil = DateTimeOffset.UtcNow.AddHours(1)
};
await _digestRepository.UpsertAsync(digest);
await _digestRepository.UpsertAsync(digest, cancellationToken: CancellationToken.None);
// Act - Add 10 events
for (int i = 1; i <= 10; i++)
{
await _digestRepository.AddEventAsync(_tenantId, digest.Id, $$$"""{"id": {{{i}}}, "type": "scan.finding"}""");
await _digestRepository.AddEventAsync(_tenantId, digest.Id, $$$"""{"id": {{{i}}}, "type": "scan.finding"}""", cancellationToken: CancellationToken.None);
}
// Assert
var fetched = await _digestRepository.GetByIdAsync(_tenantId, digest.Id);
var fetched = await _digestRepository.GetByIdAsync(_tenantId, digest.Id, cancellationToken: CancellationToken.None);
fetched!.EventCount.Should().Be(10);
// Parse events JSON to verify all events are there
@@ -399,10 +400,10 @@ public sealed class DigestAggregationTests : IAsyncLifetime
DaysOfWeek = [1, 2, 3, 4, 5], // Weekdays
Enabled = true
};
await _quietHoursRepository.CreateAsync(quietHours);
await _quietHoursRepository.CreateAsync(quietHours, cancellationToken: CancellationToken.None);
// Act
var fetched = await _quietHoursRepository.GetForUserAsync(_tenantId, userId);
var fetched = await _quietHoursRepository.GetForUserAsync(_tenantId, userId, cancellationToken: CancellationToken.None);
// Assert
fetched.Should().ContainSingle();
@@ -428,15 +429,15 @@ public sealed class DigestAggregationTests : IAsyncLifetime
SuppressChannels = [suppressChannel],
SuppressEventTypes = ["scan.completed", "vulnerability.detected"]
};
await _maintenanceRepository.CreateAsync(window);
await _maintenanceRepository.CreateAsync(window, cancellationToken: CancellationToken.None);
// Act
var active = await _maintenanceRepository.GetActiveAsync(_tenantId);
var active = await _maintenanceRepository.GetActiveAsync(_tenantId, cancellationToken: CancellationToken.None);
// Assert - No active windows right now since it's scheduled for tomorrow
active.Should().BeEmpty();
var all = await _maintenanceRepository.ListAsync(_tenantId);
var all = await _maintenanceRepository.ListAsync(_tenantId, cancellationToken: CancellationToken.None);
all.Should().ContainSingle(w => w.Id == window.Id);
}
@@ -453,7 +454,7 @@ public sealed class DigestAggregationTests : IAsyncLifetime
ChannelType = ChannelType.Email,
Enabled = true
};
await _channelRepository.CreateAsync(channel);
await _channelRepository.CreateAsync(channel, cancellationToken: CancellationToken.None);
// Create multiple digests
for (int i = 0; i < 10; i++)
@@ -468,13 +469,13 @@ public sealed class DigestAggregationTests : IAsyncLifetime
Status = DigestStatus.Collecting,
CollectUntil = DateTimeOffset.UtcNow.AddMinutes(-i) // All ready
};
await _digestRepository.UpsertAsync(digest);
await _digestRepository.UpsertAsync(digest, cancellationToken: CancellationToken.None);
}
// Act
var results1 = await _digestRepository.GetReadyToSendAsync(limit: 100);
var results2 = await _digestRepository.GetReadyToSendAsync(limit: 100);
var results3 = await _digestRepository.GetReadyToSendAsync(limit: 100);
var results1 = await _digestRepository.GetReadyToSendAsync(limit: 100, cancellationToken: CancellationToken.None);
var results2 = await _digestRepository.GetReadyToSendAsync(limit: 100, cancellationToken: CancellationToken.None);
var results3 = await _digestRepository.GetReadyToSendAsync(limit: 100, cancellationToken: CancellationToken.None);
// Assert
var ids1 = results1.Select(d => d.Id).ToList();
@@ -509,8 +510,8 @@ public sealed class DigestAggregationTests : IAsyncLifetime
ChannelType = ChannelType.Email,
Enabled = true
};
await _channelRepository.CreateAsync(channel1);
await _channelRepository.CreateAsync(channel2);
await _channelRepository.CreateAsync(channel1, cancellationToken: CancellationToken.None);
await _channelRepository.CreateAsync(channel2, cancellationToken: CancellationToken.None);
var digest1 = new DigestEntity
{
@@ -532,16 +533,16 @@ public sealed class DigestAggregationTests : IAsyncLifetime
Status = DigestStatus.Collecting,
CollectUntil = DateTimeOffset.UtcNow.AddHours(1)
};
await _digestRepository.UpsertAsync(digest1);
await _digestRepository.UpsertAsync(digest2);
await _digestRepository.UpsertAsync(digest1, cancellationToken: CancellationToken.None);
await _digestRepository.UpsertAsync(digest2, cancellationToken: CancellationToken.None);
// Act
var tenant1Fetch = await _digestRepository.GetByKeyAsync(tenant1, channel1.Id, "user@tenant1.com", "shared-key");
var tenant2Fetch = await _digestRepository.GetByKeyAsync(tenant2, channel2.Id, "user@tenant2.com", "shared-key");
var tenant1Fetch = await _digestRepository.GetByKeyAsync(tenant1, channel1.Id, "user@tenant1.com", "shared-key", cancellationToken: CancellationToken.None);
var tenant2Fetch = await _digestRepository.GetByKeyAsync(tenant2, channel2.Id, "user@tenant2.com", "shared-key", cancellationToken: CancellationToken.None);
// Cross-tenant attempts should fail
var crossFetch1 = await _digestRepository.GetByIdAsync(tenant1, digest2.Id);
var crossFetch2 = await _digestRepository.GetByIdAsync(tenant2, digest1.Id);
var crossFetch1 = await _digestRepository.GetByIdAsync(tenant1, digest2.Id, cancellationToken: CancellationToken.None);
var crossFetch2 = await _digestRepository.GetByIdAsync(tenant2, digest1.Id, cancellationToken: CancellationToken.None);
// Assert
tenant1Fetch.Should().NotBeNull();

View File

@@ -1,12 +1,12 @@
using FluentAssertions;
using Microsoft.Extensions.Logging.Abstractions;
using Microsoft.Extensions.Options;
using StellaOps.Notify.Storage.Postgres.Models;
using StellaOps.Notify.Storage.Postgres.Repositories;
using StellaOps.Notify.Persistence.Postgres.Models;
using StellaOps.Notify.Persistence.Postgres.Repositories;
using Xunit;
using StellaOps.TestKit;
namespace StellaOps.Notify.Storage.Postgres.Tests;
namespace StellaOps.Notify.Persistence.Postgres.Tests;
[Collection(NotifyPostgresCollection.Name)]
public sealed class DigestRepositoryTests : IAsyncLifetime
@@ -28,8 +28,8 @@ public sealed class DigestRepositoryTests : IAsyncLifetime
_channelRepository = new ChannelRepository(dataSource, NullLogger<ChannelRepository>.Instance);
}
public Task InitializeAsync() => Task.CompletedTask;
public Task DisposeAsync() => Task.CompletedTask;
public ValueTask InitializeAsync() => ValueTask.CompletedTask;
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
private async Task ResetAsync()
{

Some files were not shown because too many files have changed in this diff Show More