consolidate the tests locations

This commit is contained in:
StellaOps Bot
2025-12-26 01:48:24 +02:00
parent 17613acf57
commit 39359da171
2031 changed files with 2607 additions and 476 deletions

118
src/__Tests/load/README.md Normal file
View File

@@ -0,0 +1,118 @@
# Load Tests
This directory contains k6 load test suites for StellaOps performance testing.
## Prerequisites
- [k6](https://k6.io/docs/getting-started/installation/) installed
- Target environment accessible
- (Optional) Grafana k6 Cloud for distributed testing
## Test Suites
### TTFS Load Test (`ttfs-load-test.js`)
Tests the Time to First Signal endpoint under various load conditions.
**Scenarios:**
- **Sustained**: 50 RPS for 5 minutes (normal operation)
- **Spike**: Ramp from 50 to 200 RPS, hold, ramp down (CI burst simulation)
- **Soak**: 25 RPS for 15 minutes (stability test)
**Thresholds (per Advisory §12.4):**
- Cache-hit P95 ≤ 250ms
- Cold-path P95 ≤ 500ms
- Error rate < 0.1%
**Run locally:**
```bash
k6 run tests/load/ttfs-load-test.js
```
**Run against staging:**
```bash
k6 run --env BASE_URL=https://staging.stellaops.local \
--env AUTH_TOKEN=$STAGING_TOKEN \
tests/load/ttfs-load-test.js
```
**Run with custom run IDs:**
```bash
k6 run --env BASE_URL=http://localhost:5000 \
--env RUN_IDS='["run-1","run-2","run-3"]' \
tests/load/ttfs-load-test.js
```
### Router Rate Limiting Load Test (`router-rate-limiting-load-test.js`)
Exercises Router rate limiting behavior under load (instance/environment limits, mixed routes) and validates `429` + `Retry-After`.
**Scenarios:**
- **below_limit (A)**: sustained load below expected limits
- **above_limit (B)**: ramp above expected limits (expect some `429`)
- **route_mix (C)**: mixed-path traffic to exercise route matching/overrides
- **activation_gate (F)**: low traffic then spike (activation gate exercise)
**Run locally:**
```bash
mkdir results
k6 run --env BASE_URL=http://localhost:5000 \
--env PATH=/api/test \
tests/load/router-rate-limiting-load-test.js
```
**Run with multiple paths (route mix):**
```bash
mkdir results
k6 run --env BASE_URL=http://localhost:5000 \
--env PATHS_JSON='[\"/api/a\",\"/api/b\",\"/api/c\"]' \
tests/load/router-rate-limiting-load-test.js
```
## CI Integration
Load tests can be integrated into CI pipelines. See `.gitea/workflows/load-test.yml` for an example.
```yaml
load-test-ttfs:
runs-on: ubuntu-latest
needs: [deploy-staging]
steps:
- uses: grafana/k6-action@v0.3.1
with:
filename: tests/load/ttfs-load-test.js
env:
BASE_URL: ${{ secrets.STAGING_URL }}
AUTH_TOKEN: ${{ secrets.STAGING_TOKEN }}
```
## Results
Test results are written to `results/ttfs-load-test-latest.json` and timestamped files.
Use Grafana Cloud or local Prometheus + Grafana to visualize results:
```bash
k6 run --out json=results/metrics.json tests/load/ttfs-load-test.js
```
## Writing New Load Tests
1. Create a new `.js` file in this directory
2. Define scenarios, thresholds, and the default function
3. Use custom metrics for domain-specific measurements
4. Add handleSummary for result export
5. Update this README
## Environment Variables
| Variable | Description | Default |
|----------|-------------|---------|
| `BASE_URL` | Target API base URL | `http://localhost:5000` |
| `RUN_IDS` | JSON array of run IDs to test | `["run-load-1",...,"run-load-5"]` |
| `TENANT_ID` | Tenant ID header value | `load-test-tenant` |
| `AUTH_TOKEN` | Bearer token for authentication | (none) |
| `METHOD` | HTTP method for router rate limiting test | `GET` |
| `PATH` | Single path for router rate limiting test | `/api/test` |
| `PATHS_JSON` | JSON array of paths for route mix | (none) |
| `RESULTS_DIR` | Output directory for JSON artifacts | `results` |

View File

@@ -0,0 +1,201 @@
/**
* Router Rate Limiting Load Test Suite (k6)
* Reference: SPRINT_1200_001_005 (RRL-05-003)
*
* Goals:
* - Validate 429 + Retry-After behavior under load (instance and/or environment limits).
* - Measure overhead (latency) while rate limiting is enabled.
* - Exercise route-level matching via mixed-path traffic.
*
* Notes:
* - This test suite is environment-config driven. Ensure Router rate limiting is configured
* for the targeted route(s) in the environment under test.
* - "Scenario B" (environment multi-instance) is achieved by running the same test
* concurrently from multiple machines/agents.
*/
import http from 'k6/http';
import { check, sleep } from 'k6';
import { Rate, Trend } from 'k6/metrics';
const BASE_URL = (__ENV.BASE_URL || 'http://localhost:5000').replace(/\/+$/, '');
const METHOD = (__ENV.METHOD || 'GET').toUpperCase();
const PATH = __ENV.PATH || '/api/test';
const PATHS_JSON = __ENV.PATHS_JSON || '';
const TENANT_ID = __ENV.TENANT_ID || 'load-test-tenant';
const AUTH_TOKEN = __ENV.AUTH_TOKEN || '';
const RESULTS_DIR = __ENV.RESULTS_DIR || 'results';
function parsePaths() {
if (!PATHS_JSON) {
return [PATH];
}
try {
const parsed = JSON.parse(PATHS_JSON);
if (Array.isArray(parsed) && parsed.length > 0) {
return parsed.map((p) => (typeof p === 'string' ? p : PATH)).filter((p) => !!p);
}
} catch {
// Ignore parse errors; fall back to single PATH.
}
return [PATH];
}
const PATHS = parsePaths();
// Custom metrics
const rateLimitDenied = new Rate('router_rate_limit_denied');
const retryAfterSeconds = new Trend('router_rate_limit_retry_after_seconds');
const status429MissingRetryAfter = new Rate('router_rate_limit_429_missing_retry_after');
// Scenario configuration (defaults can be overridden via env vars)
const BELOW_RPS = parseInt(__ENV.BELOW_RPS || '50', 10);
const ABOVE_RPS = parseInt(__ENV.ABOVE_RPS || '500', 10);
export const options = {
scenarios: {
// Scenario A: baseline below configured limits
below_limit: {
executor: 'constant-arrival-rate',
rate: BELOW_RPS,
timeUnit: '1s',
duration: __ENV.BELOW_DURATION || '2m',
preAllocatedVUs: parseInt(__ENV.BELOW_VUS || '50', 10),
maxVUs: parseInt(__ENV.BELOW_MAX_VUS || '200', 10),
tags: { scenario: 'below_limit' },
},
// Scenario B: above configured limits (expect some 429s)
above_limit: {
executor: 'ramping-arrival-rate',
startRate: BELOW_RPS,
timeUnit: '1s',
stages: [
{ duration: __ENV.ABOVE_RAMP_UP || '20s', target: ABOVE_RPS },
{ duration: __ENV.ABOVE_HOLD || '40s', target: ABOVE_RPS },
{ duration: __ENV.ABOVE_RAMP_DOWN || '20s', target: BELOW_RPS },
],
preAllocatedVUs: parseInt(__ENV.ABOVE_VUS || '100', 10),
maxVUs: parseInt(__ENV.ABOVE_MAX_VUS || '500', 10),
startTime: __ENV.ABOVE_START || '2m10s',
tags: { scenario: 'above_limit' },
},
// Scenario C: route mix (exercise route-specific limits/matching)
route_mix: {
executor: 'constant-arrival-rate',
rate: parseInt(__ENV.MIX_RPS || '100', 10),
timeUnit: '1s',
duration: __ENV.MIX_DURATION || '2m',
preAllocatedVUs: parseInt(__ENV.MIX_VUS || '75', 10),
maxVUs: parseInt(__ENV.MIX_MAX_VUS || '300', 10),
startTime: __ENV.MIX_START || '3m30s',
tags: { scenario: 'route_mix' },
},
// Scenario F: activation gate (low traffic then spike)
activation_gate: {
executor: 'ramping-arrival-rate',
startRate: 1,
timeUnit: '1s',
stages: [
{ duration: __ENV.GATE_LOW_DURATION || '2m', target: parseInt(__ENV.GATE_LOW_RPS || '5', 10) },
{ duration: __ENV.GATE_SPIKE_DURATION || '30s', target: parseInt(__ENV.GATE_SPIKE_RPS || '200', 10) },
{ duration: __ENV.GATE_RECOVERY_DURATION || '30s', target: parseInt(__ENV.GATE_LOW_RPS || '5', 10) },
],
preAllocatedVUs: parseInt(__ENV.GATE_VUS || '50', 10),
maxVUs: parseInt(__ENV.GATE_MAX_VUS || '300', 10),
startTime: __ENV.GATE_START || '5m40s',
tags: { scenario: 'activation_gate' },
},
},
thresholds: {
'http_req_failed': ['rate<0.01'],
'router_rate_limit_429_missing_retry_after': ['rate<0.001'],
},
};
export default function () {
const path = PATHS[Math.floor(Math.random() * PATHS.length)];
const normalizedPath = path.startsWith('/') ? path : `/${path}`;
const url = `${BASE_URL}${normalizedPath}`;
const headers = {
'Accept': 'application/json',
'X-Tenant-Id': TENANT_ID,
'X-Correlation-Id': `rl-load-${Date.now()}-${Math.random().toString(36).slice(2, 10)}`,
};
if (AUTH_TOKEN) {
headers['Authorization'] = `Bearer ${AUTH_TOKEN}`;
}
const res = http.request(METHOD, url, null, {
headers,
tags: { endpoint: normalizedPath },
});
const is429 = res.status === 429;
rateLimitDenied.add(is429);
if (is429) {
const retryAfter = res.headers['Retry-After'];
status429MissingRetryAfter.add(!retryAfter);
if (retryAfter) {
const parsed = parseInt(retryAfter, 10);
if (!Number.isNaN(parsed)) {
retryAfterSeconds.add(parsed);
}
}
}
check(res, {
'status is 2xx or 429': (r) => (r.status >= 200 && r.status < 300) || r.status === 429,
'Retry-After present on 429': (r) => r.status !== 429 || r.headers['Retry-After'] !== undefined,
});
sleep(0.05 + Math.random() * 0.1);
}
export function setup() {
console.log(`Starting Router rate limiting load test against ${BASE_URL}`);
console.log(`Method=${METHOD}, paths=${JSON.stringify(PATHS)}`);
}
export function handleSummary(data) {
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
function metricValue(metricName, valueName) {
const metric = data.metrics && data.metrics[metricName];
const values = metric && metric.values;
return values ? values[valueName] : undefined;
}
const summary = {
timestampUtc: new Date().toISOString(),
baseUrl: BASE_URL,
method: METHOD,
paths: PATHS,
metrics: {
httpReqFailedRate: metricValue('http_req_failed', 'rate'),
httpReqDurationP95Ms: metricValue('http_req_duration', 'p(95)'),
rateLimitDeniedRate: metricValue('router_rate_limit_denied', 'rate'),
retryAfterP95Seconds: metricValue('router_rate_limit_retry_after_seconds', 'p(95)'),
missingRetryAfterRate: metricValue('router_rate_limit_429_missing_retry_after', 'rate'),
},
notes: [
`Set RESULTS_DIR to control file output directory (default: ${RESULTS_DIR}).`,
'Ensure the results directory exists before running if you want JSON artifacts written.',
],
};
const json = JSON.stringify(data, null, 2);
const summaryJson = JSON.stringify(summary, null, 2);
return {
stdout: `${summaryJson}\n`,
[`${RESULTS_DIR}/router-rate-limiting-load-test-${timestamp}.json`]: json,
[`${RESULTS_DIR}/router-rate-limiting-load-test-latest.json`]: json,
};
}

View File

@@ -0,0 +1,227 @@
// -----------------------------------------------------------------------------
// spike-test.js
// Sprint: SPRINT_5100_0005_0001_router_chaos_suite
// Task: T1 - Load Test Harness
// Description: k6 load test for router spike testing and backpressure validation.
// -----------------------------------------------------------------------------
import http from 'k6/http';
import { check, sleep } from 'k6';
import { Rate, Trend, Counter } from 'k6/metrics';
// Custom metrics for throttle behavior
const throttledRate = new Rate('throttled_requests');
const retryAfterTrend = new Trend('retry_after_seconds');
const recoveryTime = new Trend('recovery_time_ms');
const throttle429Count = new Counter('throttle_429_count');
const throttle503Count = new Counter('throttle_503_count');
const successCount = new Counter('success_count');
export const options = {
scenarios: {
// Phase 1: Baseline load (normal operation)
baseline: {
executor: 'constant-arrival-rate',
rate: 100,
timeUnit: '1s',
duration: '1m',
preAllocatedVUs: 50,
maxVUs: 100,
},
// Phase 2: 10x spike
spike_10x: {
executor: 'constant-arrival-rate',
rate: 1000,
timeUnit: '1s',
duration: '30s',
startTime: '1m',
preAllocatedVUs: 500,
maxVUs: 1000,
},
// Phase 3: 50x spike
spike_50x: {
executor: 'constant-arrival-rate',
rate: 5000,
timeUnit: '1s',
duration: '30s',
startTime: '2m',
preAllocatedVUs: 2000,
maxVUs: 5000,
},
// Phase 4: Recovery observation
recovery: {
executor: 'constant-arrival-rate',
rate: 100,
timeUnit: '1s',
duration: '2m',
startTime: '3m',
preAllocatedVUs: 50,
maxVUs: 100,
},
},
thresholds: {
// At least 95% of requests should succeed OR return proper throttle response
'http_req_failed{expected_response:true}': ['rate<0.05'],
// Throttled requests should have Retry-After header
'throttled_requests': ['rate>0'], // We expect some throttling during spike
// Recovery should happen within reasonable time
'recovery_time_ms': ['p(95)<30000'], // 95% recover within 30s
// Response time should be bounded even under load
'http_req_duration{expected_response:true}': ['p(95)<5000'],
},
};
const ROUTER_URL = __ENV.ROUTER_URL || 'http://localhost:8080';
const API_ENDPOINT = __ENV.API_ENDPOINT || '/api/v1/scan';
export function setup() {
console.log(`Testing router at: ${ROUTER_URL}${API_ENDPOINT}`);
// Verify router is reachable
const healthCheck = http.get(`${ROUTER_URL}/health`);
if (healthCheck.status !== 200) {
console.warn(`Router health check returned ${healthCheck.status}`);
}
return {
startTime: new Date().toISOString(),
routerUrl: ROUTER_URL,
};
}
export default function () {
const payload = JSON.stringify({
image: 'alpine:latest',
requestId: `spike-test-${__VU}-${__ITER}`,
timestamp: new Date().toISOString(),
});
const params = {
headers: {
'Content-Type': 'application/json',
'X-Request-ID': `${__VU}-${__ITER}`,
},
tags: { expected_response: 'true' },
timeout: '10s',
};
const response = http.post(`${ROUTER_URL}${API_ENDPOINT}`, payload, params);
// Handle throttle responses (429 Too Many Requests)
if (response.status === 429) {
throttledRate.add(1);
throttle429Count.add(1);
// Verify Retry-After header
const retryAfter = response.headers['Retry-After'];
check(response, {
'429 has Retry-After header': (r) => r.headers['Retry-After'] !== undefined,
'Retry-After is valid number': (r) => {
const val = r.headers['Retry-After'];
return val && !isNaN(parseInt(val));
},
'Retry-After is reasonable (1-300s)': (r) => {
const val = parseInt(r.headers['Retry-After']);
return val >= 1 && val <= 300;
},
});
if (retryAfter) {
retryAfterTrend.add(parseInt(retryAfter));
}
}
// Handle overload responses (503 Service Unavailable)
else if (response.status === 503) {
throttledRate.add(1);
throttle503Count.add(1);
check(response, {
'503 has Retry-After header': (r) => r.headers['Retry-After'] !== undefined,
});
const retryAfter = response.headers['Retry-After'];
if (retryAfter) {
retryAfterTrend.add(parseInt(retryAfter));
}
}
// Handle success responses
else {
throttledRate.add(0);
successCount.add(1);
check(response, {
'status is 200 or 202': (r) => r.status === 200 || r.status === 202,
'response has body': (r) => r.body && r.body.length > 0,
'response time < 5s': (r) => r.timings.duration < 5000,
});
}
// Track any errors
if (response.status >= 500 && response.status !== 503) {
check(response, {
'no unexpected 5xx errors': () => false,
});
}
}
export function teardown(data) {
console.log(`Test completed. Started at: ${data.startTime}`);
console.log(`Router URL: ${data.routerUrl}`);
}
export function handleSummary(data) {
const summary = {
testRun: {
startTime: new Date().toISOString(),
routerUrl: ROUTER_URL,
},
metrics: {
totalRequests: data.metrics.http_reqs ? data.metrics.http_reqs.values.count : 0,
throttled429: data.metrics.throttle_429_count ? data.metrics.throttle_429_count.values.count : 0,
throttled503: data.metrics.throttle_503_count ? data.metrics.throttle_503_count.values.count : 0,
successful: data.metrics.success_count ? data.metrics.success_count.values.count : 0,
throttleRate: data.metrics.throttled_requests ? data.metrics.throttled_requests.values.rate : 0,
retryAfterAvg: data.metrics.retry_after_seconds ? data.metrics.retry_after_seconds.values.avg : null,
retryAfterP95: data.metrics.retry_after_seconds ? data.metrics.retry_after_seconds.values['p(95)'] : null,
},
thresholds: data.thresholds,
checks: data.metrics.checks ? {
passes: data.metrics.checks.values.passes,
fails: data.metrics.checks.values.fails,
rate: data.metrics.checks.values.rate,
} : null,
};
return {
'results/spike-test-summary.json': JSON.stringify(summary, null, 2),
stdout: textSummary(data, { indent: ' ', enableColors: true }),
};
}
function textSummary(data, options) {
let output = '\n=== Router Spike Test Summary ===\n\n';
const totalReqs = data.metrics.http_reqs ? data.metrics.http_reqs.values.count : 0;
const throttled429 = data.metrics.throttle_429_count ? data.metrics.throttle_429_count.values.count : 0;
const throttled503 = data.metrics.throttle_503_count ? data.metrics.throttle_503_count.values.count : 0;
const successful = data.metrics.success_count ? data.metrics.success_count.values.count : 0;
output += `Total Requests: ${totalReqs}\n`;
output += `Successful (2xx): ${successful}\n`;
output += `Throttled (429): ${throttled429}\n`;
output += `Overloaded (503): ${throttled503}\n`;
output += `Throttle Rate: ${((throttled429 + throttled503) / totalReqs * 100).toFixed(2)}%\n`;
if (data.metrics.retry_after_seconds) {
output += `\nRetry-After Header:\n`;
output += ` Avg: ${data.metrics.retry_after_seconds.values.avg.toFixed(2)}s\n`;
output += ` P95: ${data.metrics.retry_after_seconds.values['p(95)'].toFixed(2)}s\n`;
}
output += '\nThreshold Results:\n';
for (const [name, result] of Object.entries(data.thresholds || {})) {
output += ` ${result.ok ? 'PASS' : 'FAIL'}: ${name}\n`;
}
return output;
}

View File

@@ -0,0 +1,55 @@
{
"description": "Router chaos test thresholds for SPRINT_5100_0005_0001",
"thresholds": {
"recovery_time_seconds": {
"max": 30,
"description": "Maximum time to recover after load spike"
},
"throttle_rate_max": {
"max": 0.95,
"description": "Maximum percentage of requests that can be throttled during spike"
},
"success_rate_baseline": {
"min": 0.99,
"description": "Minimum success rate during baseline load"
},
"success_rate_recovery": {
"min": 0.95,
"description": "Minimum success rate during recovery phase"
},
"retry_after_max_seconds": {
"max": 300,
"description": "Maximum Retry-After value in seconds"
},
"retry_after_min_seconds": {
"min": 1,
"description": "Minimum Retry-After value in seconds"
},
"response_time_p95_ms": {
"max": 5000,
"description": "95th percentile response time under normal load"
},
"data_loss_rate": {
"max": 0,
"description": "No data loss allowed during throttling"
}
},
"scenarios": {
"baseline": {
"expected_throttle_rate": 0.01,
"expected_success_rate": 0.99
},
"spike_10x": {
"expected_throttle_rate": 0.5,
"expected_success_rate": 0.5
},
"spike_50x": {
"expected_throttle_rate": 0.9,
"expected_success_rate": 0.1
},
"recovery": {
"expected_throttle_rate": 0.05,
"expected_success_rate": 0.95
}
}
}

View File

@@ -0,0 +1,226 @@
/**
* TTFS (Time to First Signal) Load Test Suite
* Reference: SPRINT_0341_0001_0001 Task T13
*
* Tests the /first-signal endpoint under various load scenarios.
* Requirements from Advisory §12.4:
* - Cache-hit P95 ≤ 250ms
* - Cold-path P95 ≤ 500ms
* - Error rate < 0.1%
*/
import http from 'k6/http';
import { check, sleep } from 'k6';
import { Rate, Trend } from 'k6/metrics';
import { textSummary } from 'https://jslib.k6.io/k6-summary/0.0.3/index.js';
// Custom metrics
const cacheHitLatency = new Trend('ttfs_cache_hit_latency_ms');
const coldPathLatency = new Trend('ttfs_cold_path_latency_ms');
const errorRate = new Rate('ttfs_error_rate');
const signalKindCounter = new Rate('ttfs_signal_kind_distribution');
// Configuration
export const options = {
scenarios: {
// Scenario 1: Sustained load - simulates normal operation
sustained: {
executor: 'constant-arrival-rate',
rate: 50,
timeUnit: '1s',
duration: '5m',
preAllocatedVUs: 50,
maxVUs: 100,
tags: { scenario: 'sustained' },
},
// Scenario 2: Spike test - simulates CI pipeline burst
spike: {
executor: 'ramping-arrival-rate',
startRate: 50,
timeUnit: '1s',
stages: [
{ duration: '30s', target: 200 }, // Ramp to 200 RPS
{ duration: '1m', target: 200 }, // Hold
{ duration: '30s', target: 50 }, // Ramp down
],
preAllocatedVUs: 100,
maxVUs: 300,
startTime: '5m30s',
tags: { scenario: 'spike' },
},
// Scenario 3: Soak test - long running stability
soak: {
executor: 'constant-arrival-rate',
rate: 25,
timeUnit: '1s',
duration: '15m',
preAllocatedVUs: 30,
maxVUs: 50,
startTime: '8m',
tags: { scenario: 'soak' },
},
},
thresholds: {
// Advisory requirements: §12.4
'ttfs_cache_hit_latency_ms{scenario:sustained}': ['p(95)<250'], // P95 ≤ 250ms
'ttfs_cache_hit_latency_ms{scenario:spike}': ['p(95)<350'], // Allow slightly higher during spike
'ttfs_cold_path_latency_ms{scenario:sustained}': ['p(95)<500'], // P95 ≤ 500ms
'ttfs_cold_path_latency_ms{scenario:spike}': ['p(95)<750'], // Allow slightly higher during spike
'ttfs_error_rate': ['rate<0.001'], // < 0.1% errors
'http_req_duration{scenario:sustained}': ['p(95)<300'],
'http_req_duration{scenario:spike}': ['p(95)<500'],
'http_req_failed': ['rate<0.01'], // HTTP failures < 1%
},
};
// Environment configuration
const BASE_URL = __ENV.BASE_URL || 'http://localhost:5000';
const RUN_IDS = JSON.parse(__ENV.RUN_IDS || '["run-load-1","run-load-2","run-load-3","run-load-4","run-load-5"]');
const TENANT_ID = __ENV.TENANT_ID || 'load-test-tenant';
const AUTH_TOKEN = __ENV.AUTH_TOKEN || '';
/**
* Main test function - called for each VU iteration
*/
export default function () {
const runId = RUN_IDS[Math.floor(Math.random() * RUN_IDS.length)];
const url = `${BASE_URL}/api/v1/orchestrator/runs/${runId}/first-signal`;
const params = {
headers: {
'Accept': 'application/json',
'X-Tenant-Id': TENANT_ID,
'X-Correlation-Id': `load-test-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
},
tags: { endpoint: 'first-signal' },
};
// Add auth if provided
if (AUTH_TOKEN) {
params.headers['Authorization'] = `Bearer ${AUTH_TOKEN}`;
}
const start = Date.now();
const response = http.get(url, params);
const duration = Date.now() - start;
// Track latency by cache status
const cacheStatus = response.headers['Cache-Status'] || response.headers['X-Cache-Status'];
if (cacheStatus && cacheStatus.toLowerCase().includes('hit')) {
cacheHitLatency.add(duration);
} else {
coldPathLatency.add(duration);
}
// Validate response
const checks = check(response, {
'status is 200 or 204 or 304': (r) => [200, 204, 304].includes(r.status),
'has ETag header': (r) => r.status === 200 ? !!r.headers['ETag'] : true,
'has Cache-Status header': (r) => !!cacheStatus,
'response time < 500ms': (r) => r.timings.duration < 500,
'valid JSON response': (r) => {
if (r.status !== 200) return true;
try {
const body = JSON.parse(r.body);
return body.runId !== undefined;
} catch {
return false;
}
},
'has signal kind': (r) => {
if (r.status !== 200) return true;
try {
const body = JSON.parse(r.body);
return !body.firstSignal || ['passed', 'failed', 'degraded', 'partial', 'pending'].includes(body.firstSignal.kind);
} catch {
return false;
}
},
});
errorRate.add(!checks);
// Extract signal kind for distribution analysis
if (response.status === 200) {
try {
const body = JSON.parse(response.body);
if (body.firstSignal?.kind) {
signalKindCounter.add(1, { kind: body.firstSignal.kind });
}
} catch {
// Ignore parse errors
}
}
// Minimal sleep to allow for realistic load patterns
sleep(0.05 + Math.random() * 0.1); // 50-150ms between requests per VU
}
/**
* Conditional request test - tests ETag/304 behavior
*/
export function conditionalRequest() {
const runId = RUN_IDS[0];
const url = `${BASE_URL}/api/v1/orchestrator/runs/${runId}/first-signal`;
// First request to get ETag
const firstResponse = http.get(url, {
headers: { 'Accept': 'application/json', 'X-Tenant-Id': TENANT_ID },
});
if (firstResponse.status !== 200) return;
const etag = firstResponse.headers['ETag'];
if (!etag) return;
// Conditional request
const conditionalResponse = http.get(url, {
headers: {
'Accept': 'application/json',
'X-Tenant-Id': TENANT_ID,
'If-None-Match': etag,
},
tags: { request_type: 'conditional' },
});
check(conditionalResponse, {
'conditional request returns 304': (r) => r.status === 304,
});
}
/**
* Setup function - runs once before the test
*/
export function setup() {
console.log(`Starting TTFS load test against ${BASE_URL}`);
console.log(`Testing with ${RUN_IDS.length} run IDs`);
// Verify endpoint is accessible
const healthCheck = http.get(`${BASE_URL}/health`, { timeout: '5s' });
if (healthCheck.status !== 200) {
console.warn(`Health check returned ${healthCheck.status} - proceeding anyway`);
}
return { startTime: Date.now() };
}
/**
* Teardown function - runs once after the test
*/
export function teardown(data) {
const duration = (Date.now() - data.startTime) / 1000;
console.log(`TTFS load test completed in ${duration.toFixed(1)}s`);
}
/**
* Generate test summary
*/
export function handleSummary(data) {
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
return {
'stdout': textSummary(data, { indent: ' ', enableColors: true }),
[`results/ttfs-load-test-${timestamp}.json`]: JSON.stringify(data, null, 2),
'results/ttfs-load-test-latest.json': JSON.stringify(data, null, 2),
};
}