- Introduced `sink-detect.js` with various security sink detection patterns categorized by type (e.g., command injection, SQL injection, file operations). - Implemented functions to build a lookup map for fast sink detection and to match sink calls against known patterns. - Added `package-lock.json` for dependency management.
236 lines
8.2 KiB
C#
236 lines
8.2 KiB
C#
// -----------------------------------------------------------------------------
|
|
// BackpressureVerificationTests.cs
|
|
// Sprint: SPRINT_5100_0005_0001_router_chaos_suite
|
|
// Task: T2 - Backpressure Verification Tests
|
|
// Description: Verify router emits correct 429/503 responses with Retry-After.
|
|
// -----------------------------------------------------------------------------
|
|
|
|
using System.Net;
|
|
using FluentAssertions;
|
|
using StellaOps.Chaos.Router.Tests.Fixtures;
|
|
|
|
namespace StellaOps.Chaos.Router.Tests;
|
|
|
|
[Trait("Category", "Chaos")]
|
|
[Trait("Category", "Router")]
|
|
public class BackpressureVerificationTests : IClassFixture<RouterTestFixture>
|
|
{
|
|
private readonly RouterTestFixture _fixture;
|
|
|
|
public BackpressureVerificationTests(RouterTestFixture fixture)
|
|
{
|
|
_fixture = fixture;
|
|
}
|
|
|
|
[Fact]
|
|
public async Task Router_UnderLoad_Returns429WithRetryAfter()
|
|
{
|
|
// Arrange
|
|
var client = _fixture.CreateClient();
|
|
var tasks = new List<Task<HttpResponseMessage>>();
|
|
|
|
// Act - Send burst of requests
|
|
for (var i = 0; i < 1000; i++)
|
|
{
|
|
tasks.Add(client.PostAsync("/api/v1/scan", RouterTestFixture.CreateScanRequest()));
|
|
}
|
|
|
|
var responses = await Task.WhenAll(tasks);
|
|
|
|
// Assert - Some should be throttled
|
|
var throttled = responses.Where(r => r.StatusCode == HttpStatusCode.TooManyRequests).ToList();
|
|
|
|
// Note: This test may not trigger throttling if router is not under significant load
|
|
// In production chaos testing, we expect throttling to occur
|
|
if (throttled.Count > 0)
|
|
{
|
|
foreach (var response in throttled)
|
|
{
|
|
response.Headers.Should().Contain(
|
|
h => h.Key.Equals("Retry-After", StringComparison.OrdinalIgnoreCase),
|
|
"429 response should have Retry-After header");
|
|
|
|
var retryAfter = response.Headers.GetValues("Retry-After").FirstOrDefault();
|
|
retryAfter.Should().NotBeNull();
|
|
|
|
int.TryParse(retryAfter, out var seconds).Should().BeTrue(
|
|
"Retry-After should be a valid integer");
|
|
|
|
seconds.Should().BeInRange(1, 300,
|
|
"Retry-After should be reasonable (1-300 seconds)");
|
|
}
|
|
}
|
|
}
|
|
|
|
[Fact]
|
|
public async Task Router_UnderLoad_Returns503WhenOverloaded()
|
|
{
|
|
// Arrange
|
|
await _fixture.ConfigureLowLimitsAsync();
|
|
var client = _fixture.CreateClient();
|
|
|
|
// Act - Massive burst
|
|
var tasks = Enumerable.Range(0, 5000)
|
|
.Select(_ => client.PostAsync("/api/v1/scan", RouterTestFixture.CreateScanRequest()));
|
|
|
|
var responses = await Task.WhenAll(tasks);
|
|
|
|
// Assert - Should see 503s when completely overloaded
|
|
var overloaded = responses.Where(r =>
|
|
r.StatusCode == HttpStatusCode.ServiceUnavailable).ToList();
|
|
|
|
// If we get 503s, they should have Retry-After headers
|
|
foreach (var response in overloaded)
|
|
{
|
|
response.Headers.Should().Contain(
|
|
h => h.Key.Equals("Retry-After", StringComparison.OrdinalIgnoreCase),
|
|
"503 response should have Retry-After header");
|
|
}
|
|
}
|
|
|
|
[Fact]
|
|
public async Task Router_RetryAfterHonored_EventuallySucceeds()
|
|
{
|
|
// Arrange
|
|
var client = _fixture.CreateClient();
|
|
var maxRetries = 5;
|
|
var retryCount = 0;
|
|
HttpResponseMessage? response = null;
|
|
|
|
// Act - Keep trying until success or max retries
|
|
while (retryCount < maxRetries)
|
|
{
|
|
response = await client.PostAsync("/api/v1/scan", RouterTestFixture.CreateScanRequest());
|
|
|
|
if (response.StatusCode == HttpStatusCode.TooManyRequests)
|
|
{
|
|
var retryAfterHeader = response.Headers.GetValues("Retry-After").FirstOrDefault();
|
|
if (int.TryParse(retryAfterHeader, out var retryAfter))
|
|
{
|
|
// Wait for Retry-After duration (with cap for test performance)
|
|
var waitTime = Math.Min(retryAfter, 5);
|
|
await Task.Delay(TimeSpan.FromSeconds(waitTime + 1));
|
|
}
|
|
retryCount++;
|
|
}
|
|
else
|
|
{
|
|
break;
|
|
}
|
|
}
|
|
|
|
// Assert - Eventually should succeed
|
|
response.Should().NotBeNull();
|
|
|
|
if (retryCount > 0)
|
|
{
|
|
// If we were throttled, we should eventually succeed
|
|
response!.StatusCode.Should().BeOneOf(
|
|
HttpStatusCode.OK,
|
|
HttpStatusCode.Accepted,
|
|
"Request should eventually succeed after honoring Retry-After");
|
|
}
|
|
}
|
|
|
|
[Fact]
|
|
public async Task Router_ThrottleMetrics_AreExposed()
|
|
{
|
|
// Arrange
|
|
var client = _fixture.CreateClient();
|
|
|
|
// Trigger some requests (may or may not cause throttling)
|
|
var tasks = Enumerable.Range(0, 100)
|
|
.Select(_ => client.PostAsync("/api/v1/scan", RouterTestFixture.CreateScanRequest()));
|
|
await Task.WhenAll(tasks);
|
|
|
|
// Act - Check metrics endpoint
|
|
var metricsResponse = await client.GetAsync("/metrics");
|
|
|
|
// Assert - Metrics endpoint should be accessible
|
|
if (metricsResponse.IsSuccessStatusCode)
|
|
{
|
|
var metrics = await metricsResponse.Content.ReadAsStringAsync();
|
|
|
|
// Basic metric checks (actual metric names depend on implementation)
|
|
// These are common Prometheus-style metric names
|
|
var expectedMetrics = new[]
|
|
{
|
|
"http_requests_total",
|
|
"http_request_duration",
|
|
};
|
|
|
|
// At least some metrics should be present
|
|
expectedMetrics.Any(m => metrics.Contains(m)).Should().BeTrue(
|
|
"Metrics endpoint should expose request metrics");
|
|
}
|
|
}
|
|
|
|
[Fact]
|
|
public async Task Router_ResponseHeaders_IncludeRateLimitInfo()
|
|
{
|
|
// Arrange
|
|
var client = _fixture.CreateClient();
|
|
|
|
// Act
|
|
var response = await client.PostAsync("/api/v1/scan", RouterTestFixture.CreateScanRequest());
|
|
|
|
// Assert - Check for rate limit headers (common patterns)
|
|
// These headers are optional but recommended for rate-limited APIs
|
|
var rateLimitHeaders = new[]
|
|
{
|
|
"X-RateLimit-Limit",
|
|
"X-RateLimit-Remaining",
|
|
"X-RateLimit-Reset",
|
|
"RateLimit-Limit",
|
|
"RateLimit-Remaining",
|
|
"RateLimit-Reset"
|
|
};
|
|
|
|
// Log which headers are present (for information)
|
|
var presentHeaders = rateLimitHeaders
|
|
.Where(h => response.Headers.Contains(h))
|
|
.ToList();
|
|
|
|
// This is informational - not all routers include these headers
|
|
Console.WriteLine($"Rate limit headers present: {string.Join(", ", presentHeaders)}");
|
|
}
|
|
|
|
[Theory]
|
|
[InlineData(10)]
|
|
[InlineData(50)]
|
|
[InlineData(100)]
|
|
public async Task Router_ConcurrentRequests_HandledGracefully(int concurrency)
|
|
{
|
|
// Arrange
|
|
var client = _fixture.CreateClient();
|
|
|
|
// Act - Send concurrent requests
|
|
var tasks = Enumerable.Range(0, concurrency)
|
|
.Select(_ => client.PostAsync("/api/v1/scan", RouterTestFixture.CreateScanRequest()));
|
|
|
|
var responses = await Task.WhenAll(tasks);
|
|
|
|
// Assert - All responses should be valid HTTP responses
|
|
foreach (var response in responses)
|
|
{
|
|
var validStatuses = new[]
|
|
{
|
|
HttpStatusCode.OK,
|
|
HttpStatusCode.Accepted,
|
|
HttpStatusCode.TooManyRequests,
|
|
HttpStatusCode.ServiceUnavailable
|
|
};
|
|
|
|
response.StatusCode.Should().BeOneOf(validStatuses,
|
|
$"Response should be a valid status code for concurrency level {concurrency}");
|
|
}
|
|
|
|
// Calculate success rate
|
|
var successCount = responses.Count(r =>
|
|
r.StatusCode == HttpStatusCode.OK || r.StatusCode == HttpStatusCode.Accepted);
|
|
|
|
var successRate = (double)successCount / responses.Length;
|
|
Console.WriteLine($"Concurrency {concurrency}: Success rate = {successRate:P2}");
|
|
}
|
|
}
|