feat: Add VEX Status Chip component and integration tests for reachability drift detection

- Introduced `VexStatusChipComponent` to display VEX status with color coding and tooltips.
- Implemented integration tests for reachability drift detection, covering various scenarios including drift detection, determinism, and error handling.
- Enhanced `ScannerToSignalsReachabilityTests` with a null implementation of `ICallGraphSyncService` for better test isolation.
- Updated project references to include the new Reachability Drift library.
This commit is contained in:
StellaOps Bot
2025-12-20 01:26:42 +02:00
parent edc91ea96f
commit 5fc469ad98
159 changed files with 41116 additions and 2305 deletions

View File

@@ -0,0 +1,165 @@
-- Notify Schema Migration 011b: Complete deliveries Partition Migration
-- Sprint: SPRINT_3422_0001_0001 - Time-Based Partitioning
-- Task: 5.2 - Migrate data from existing table
-- Category: C (data migration, requires maintenance window)
--
-- IMPORTANT: Run this during maintenance window AFTER 011_partition_deliveries.sql
-- Prerequisites:
-- 1. Stop notification worker (pause delivery processing)
-- 2. Verify partitioned table exists: \d+ notify.deliveries_partitioned
--
-- Execution time depends on data volume. For large tables (>1M rows), consider
-- batched migration (see bottom of file).
BEGIN;
-- ============================================================================
-- Step 1: Verify partitioned table exists
-- ============================================================================
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = 'notify' AND c.relname = 'deliveries_partitioned'
) THEN
RAISE EXCEPTION 'Partitioned table notify.deliveries_partitioned does not exist. Run 011_partition_deliveries.sql first.';
END IF;
END
$$;
-- ============================================================================
-- Step 2: Record row counts for verification
-- ============================================================================
DO $$
DECLARE
v_source_count BIGINT;
BEGIN
SELECT COUNT(*) INTO v_source_count FROM notify.deliveries;
RAISE NOTICE 'Source table row count: %', v_source_count;
END
$$;
-- ============================================================================
-- Step 3: Migrate data from old table to partitioned table
-- ============================================================================
INSERT INTO notify.deliveries_partitioned (
id, tenant_id, channel_id, rule_id, template_id, status,
recipient, subject, body, event_type, event_payload,
attempt, max_attempts, next_retry_at, error_message,
external_id, correlation_id, created_at, queued_at,
sent_at, delivered_at, failed_at
)
SELECT
id, tenant_id, channel_id, rule_id, template_id, status,
recipient, subject, body, event_type, event_payload,
attempt, max_attempts, next_retry_at, error_message,
external_id, correlation_id, created_at, queued_at,
sent_at, delivered_at, failed_at
FROM notify.deliveries
ON CONFLICT DO NOTHING;
-- ============================================================================
-- Step 4: Verify row counts match
-- ============================================================================
DO $$
DECLARE
v_source_count BIGINT;
v_target_count BIGINT;
BEGIN
SELECT COUNT(*) INTO v_source_count FROM notify.deliveries;
SELECT COUNT(*) INTO v_target_count FROM notify.deliveries_partitioned;
IF v_source_count <> v_target_count THEN
RAISE WARNING 'Row count mismatch: source=% target=%. Check for conflicts.', v_source_count, v_target_count;
ELSE
RAISE NOTICE 'Row counts match: % rows migrated successfully', v_target_count;
END IF;
END
$$;
-- ============================================================================
-- Step 5: Swap tables
-- ============================================================================
-- Drop foreign key constraints first (if any)
DO $$
DECLARE
v_constraint RECORD;
BEGIN
FOR v_constraint IN
SELECT conname FROM pg_constraint
WHERE conrelid = 'notify.deliveries'::regclass
AND contype = 'f'
LOOP
EXECUTE 'ALTER TABLE notify.deliveries DROP CONSTRAINT IF EXISTS ' || v_constraint.conname;
END LOOP;
END
$$;
-- Rename old table to backup
ALTER TABLE notify.deliveries RENAME TO deliveries_old;
-- Rename partitioned table to production name
ALTER TABLE notify.deliveries_partitioned RENAME TO deliveries;
-- ============================================================================
-- Step 6: Enable RLS on new table (if applicable)
-- ============================================================================
ALTER TABLE notify.deliveries ENABLE ROW LEVEL SECURITY;
-- Create RLS policy for tenant isolation
DROP POLICY IF EXISTS deliveries_tenant_isolation ON notify.deliveries;
CREATE POLICY deliveries_tenant_isolation ON notify.deliveries
FOR ALL
USING (tenant_id = current_setting('notify.current_tenant', true))
WITH CHECK (tenant_id = current_setting('notify.current_tenant', true));
-- ============================================================================
-- Step 7: Add comment about partitioning
-- ============================================================================
COMMENT ON TABLE notify.deliveries IS
'Notification deliveries. Partitioned monthly by created_at. Migrated on ' || NOW()::TEXT;
COMMIT;
-- ============================================================================
-- Post-migration verification (run manually)
-- ============================================================================
--
-- Verify partition structure:
-- SELECT tableoid::regclass, count(*) FROM notify.deliveries GROUP BY 1;
--
-- Verify BRIN index is being used:
-- EXPLAIN (ANALYZE, BUFFERS) SELECT * FROM notify.deliveries
-- WHERE created_at > NOW() - INTERVAL '1 day';
--
-- Verify pending deliveries query uses partition pruning:
-- EXPLAIN (ANALYZE) SELECT * FROM notify.deliveries
-- WHERE status = 'pending' AND created_at > NOW() - INTERVAL '7 days';
--
-- After verification, drop old table:
-- DROP TABLE IF EXISTS notify.deliveries_old;
-- ============================================================================
-- Resume checklist
-- ============================================================================
--
-- 1. Verify deliveries table exists:
-- SELECT COUNT(*) FROM notify.deliveries;
--
-- 2. Verify partitions exist:
-- SELECT tableoid::regclass, count(*) FROM notify.deliveries GROUP BY 1;
--
-- 3. Resume notification worker
--
-- 4. Monitor for errors in first 15 minutes
--
-- 5. After 24h validation, drop old table:
-- DROP TABLE IF EXISTS notify.deliveries_old;

View File

@@ -65,6 +65,9 @@ public sealed class DeliveryRepository : RepositoryBase<NotifyDataSource>, IDeli
public async Task<DeliveryEntity> UpsertAsync(DeliveryEntity delivery, CancellationToken cancellationToken = default)
{
// Note: With partitioned tables, ON CONFLICT requires partition key in unique constraint.
// Using INSERT ... ON CONFLICT (id, created_at) for partition-safe upsert.
// For existing records, we fall back to UPDATE if insert conflicts.
const string sql = """
INSERT INTO notify.deliveries (
id, tenant_id, channel_id, rule_id, template_id, status, recipient, subject, body,
@@ -75,7 +78,7 @@ public sealed class DeliveryRepository : RepositoryBase<NotifyDataSource>, IDeli
@event_type, @event_payload::jsonb, @attempt, @max_attempts, @next_retry_at, @error_message,
@external_id, @correlation_id, @created_at, @queued_at, @sent_at, @delivered_at, @failed_at
)
ON CONFLICT (id) DO UPDATE SET
ON CONFLICT (id, created_at) DO UPDATE SET
status = EXCLUDED.status,
recipient = EXCLUDED.recipient,
subject = EXCLUDED.subject,
@@ -432,6 +435,16 @@ public sealed class DeliveryRepository : RepositoryBase<NotifyDataSource>, IDeli
AddJsonbParameter(command, "event_payload", delivery.EventPayload);
AddParameter(command, "max_attempts", delivery.MaxAttempts);
AddParameter(command, "correlation_id", delivery.CorrelationId);
// Partition-aware parameters (required for partitioned table upsert)
AddParameter(command, "attempt", delivery.Attempt);
AddParameter(command, "next_retry_at", delivery.NextRetryAt);
AddParameter(command, "error_message", delivery.ErrorMessage);
AddParameter(command, "external_id", delivery.ExternalId);
AddParameter(command, "created_at", delivery.CreatedAt);
AddParameter(command, "queued_at", delivery.QueuedAt);
AddParameter(command, "sent_at", delivery.SentAt);
AddParameter(command, "delivered_at", delivery.DeliveredAt);
AddParameter(command, "failed_at", delivery.FailedAt);
}
private static DeliveryEntity MapDelivery(NpgsqlDataReader reader) => new()