devops folders consolidate
This commit is contained in:
@@ -11,7 +11,7 @@ Usage:
|
||||
For every target file, the script scans `image:` declarations and verifies that
|
||||
any image belonging to a repository listed in the release manifest matches the
|
||||
exact digest or tag recorded there. Images outside of the manifest (for example,
|
||||
supporting services such as `nats`) are ignored.
|
||||
supporting services such as `valkey`) are ignored.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -12,11 +12,6 @@ export DOTNET_CLI_TELEMETRY_OPTOUT=1
|
||||
# Prefer the curated offline feed to avoid network flakiness during CI.
|
||||
export NUGET_PACKAGES="${ROOT_DIR}/.nuget/packages"
|
||||
RESTORE_SOURCE="--source ${ROOT_DIR}/.nuget/packages --ignore-failed-sources"
|
||||
# Ensure Mongo2Go can find OpenSSL 1.1 (needed by bundled mongod)
|
||||
OPENSSL11_DIR="$ROOT_DIR/tools/openssl1.1/lib"
|
||||
if [[ -d "$OPENSSL11_DIR" ]]; then
|
||||
export LD_LIBRARY_PATH="$OPENSSL11_DIR:${LD_LIBRARY_PATH:-}"
|
||||
fi
|
||||
RESULTS_DIR="$ROOT_DIR/out/test-results/linksets"
|
||||
mkdir -p "$RESULTS_DIR"
|
||||
# Restore explicitly against offline cache, then run tests without restoring again.
|
||||
|
||||
@@ -6,11 +6,12 @@ COUNTS=$ARTifacts/lnm-counts.json
|
||||
CONFLICTS=$ARTifacts/lnm-conflicts.ndjson
|
||||
mkdir -p "$ARTifacts"
|
||||
|
||||
mongoexport --uri "${STAGING_MONGO_URI:?set STAGING_MONGO_URI}" --collection advisoryObservations --db concelier --type=json --query '{}' --out "$ARTifacts/obs.json" >/dev/null
|
||||
mongoexport --uri "${STAGING_MONGO_URI:?set STAGING_MONGO_URI}" --collection linksets --db concelier --type=json --query '{}' --out "$ARTifacts/linksets.json" >/dev/null
|
||||
# Export advisory observations from PostgreSQL
|
||||
psql "${STAGING_POSTGRES_URI:?set STAGING_POSTGRES_URI}" -c "COPY (SELECT row_to_json(t) FROM advisory_observations t) TO STDOUT" > "$ARTifacts/obs.json"
|
||||
psql "${STAGING_POSTGRES_URI:?set STAGING_POSTGRES_URI}" -c "COPY (SELECT row_to_json(t) FROM linksets t) TO STDOUT" > "$ARTifacts/linksets.json"
|
||||
|
||||
OBS=$(jq length "$ARTifacts/obs.json")
|
||||
LNK=$(jq length "$ARTifacts/linksets.json")
|
||||
OBS=$(wc -l < "$ARTifacts/obs.json" | tr -d ' ')
|
||||
LNK=$(wc -l < "$ARTifacts/linksets.json" | tr -d ' ')
|
||||
|
||||
cat > "$COUNTS" <<JSON
|
||||
{
|
||||
|
||||
@@ -20,9 +20,8 @@ Microsoft.Extensions.Logging.Abstractions|9.0.0
|
||||
Microsoft.Extensions.Options.ConfigurationExtensions|10.0.0-rc.2.25502.107
|
||||
Microsoft.Extensions.Options|10.0.0-rc.2.25502.107
|
||||
Microsoft.Extensions.Options|9.0.0
|
||||
MongoDB.Driver|3.5.0
|
||||
NATS.Client.Core|2.0.0
|
||||
NATS.Client.JetStream|2.0.0
|
||||
Npgsql|9.0.3
|
||||
Npgsql.EntityFrameworkCore.PostgreSQL|9.0.3
|
||||
RoaringBitmap|0.0.9
|
||||
Serilog.AspNetCore|8.0.1
|
||||
Serilog.Extensions.Hosting|8.0.0
|
||||
|
||||
@@ -26,8 +26,8 @@
|
||||
<PackageDownload Include="Microsoft.Extensions.Logging.Abstractions" Version="[10.0.0-rc.2.25502.107]" />
|
||||
<PackageDownload Include="Microsoft.Extensions.Options.ConfigurationExtensions" Version="[10.0.0-rc.2.25502.107]" />
|
||||
<PackageDownload Include="Microsoft.Extensions.Options" Version="[10.0.0-rc.2.25502.107]" />
|
||||
<PackageDownload Include="NATS.Client.Core" Version="[2.0.0]" />
|
||||
<PackageDownload Include="NATS.Client.JetStream" Version="[2.0.0]" />
|
||||
<PackageDownload Include="Npgsql" Version="[9.0.3]" />
|
||||
<PackageDownload Include="Npgsql.EntityFrameworkCore.PostgreSQL" Version="[9.0.3]" />
|
||||
<PackageDownload Include="RoaringBitmap" Version="[0.0.9]" />
|
||||
<PackageDownload Include="Serilog.AspNetCore" Version="[8.0.1]" />
|
||||
<PackageDownload Include="Serilog.Extensions.Hosting" Version="[8.0.0]" />
|
||||
|
||||
@@ -1,19 +1,21 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# DEVOPS-OBS-52-001: validate streaming pipeline knobs
|
||||
# DEVOPS-OBS-52-001: validate streaming pipeline knobs (Valkey-based)
|
||||
|
||||
OUT="out/obs-stream"
|
||||
mkdir -p "$OUT"
|
||||
|
||||
echo "[obs-stream] checking NATS connectivity"
|
||||
if command -v nats >/dev/null 2>&1; then
|
||||
nats --server "${NATS_URL:-nats://localhost:4222}" req health.ping ping || true
|
||||
echo "[obs-stream] checking Valkey connectivity"
|
||||
if command -v valkey-cli >/dev/null 2>&1; then
|
||||
valkey-cli -h "${VALKEY_HOST:-localhost}" -p "${VALKEY_PORT:-6379}" ping || true
|
||||
elif command -v redis-cli >/dev/null 2>&1; then
|
||||
redis-cli -h "${VALKEY_HOST:-localhost}" -p "${VALKEY_PORT:-6379}" ping || true
|
||||
else
|
||||
echo "nats CLI not installed; skipping connectivity check" > "${OUT}/nats.txt"
|
||||
echo "valkey-cli/redis-cli not installed; skipping connectivity check" > "${OUT}/valkey.txt"
|
||||
fi
|
||||
|
||||
echo "[obs-stream] dumping retention/partitions (Kafka-like env variables)"
|
||||
env | grep -E 'KAFKA_|REDIS_|NATS_' | sort > "${OUT}/env.txt"
|
||||
echo "[obs-stream] dumping retention/partitions env variables"
|
||||
env | grep -E 'KAFKA_|REDIS_|VALKEY_' | sort > "${OUT}/env.txt"
|
||||
|
||||
echo "[obs-stream] done; outputs in $OUT"
|
||||
|
||||
Binary file not shown.
Binary file not shown.
@@ -1,77 +0,0 @@
|
||||
/**
|
||||
* Aggregation helper that surfaces advisory_raw duplicate candidates prior to enabling the
|
||||
* idempotency unique index. Intended for staging/offline snapshots.
|
||||
*
|
||||
* Usage:
|
||||
* mongo concelier ops/devops/scripts/check-advisory-raw-duplicates.js
|
||||
*
|
||||
* Environment variables:
|
||||
* LIMIT - optional cap on number of duplicate groups to print (default 50).
|
||||
*/
|
||||
(function () {
|
||||
function toInt(value, fallback) {
|
||||
var parsed = parseInt(value, 10);
|
||||
return Number.isFinite(parsed) && parsed > 0 ? parsed : fallback;
|
||||
}
|
||||
|
||||
var limit = typeof LIMIT !== "undefined" ? toInt(LIMIT, 50) : 50;
|
||||
var database = db.getName ? db.getSiblingDB(db.getName()) : db;
|
||||
if (!database) {
|
||||
throw new Error("Unable to resolve database handle");
|
||||
}
|
||||
|
||||
print("");
|
||||
print("== advisory_raw duplicate audit ==");
|
||||
print("Database: " + database.getName());
|
||||
print("Limit : " + limit);
|
||||
print("");
|
||||
|
||||
var pipeline = [
|
||||
{
|
||||
$group: {
|
||||
_id: {
|
||||
vendor: "$source.vendor",
|
||||
upstreamId: "$upstream.upstream_id",
|
||||
contentHash: "$upstream.content_hash",
|
||||
tenant: "$tenant"
|
||||
},
|
||||
ids: { $addToSet: "$_id" },
|
||||
count: { $sum: 1 }
|
||||
}
|
||||
},
|
||||
{ $match: { count: { $gt: 1 } } },
|
||||
{
|
||||
$project: {
|
||||
_id: 0,
|
||||
vendor: "$_id.vendor",
|
||||
upstreamId: "$_id.upstreamId",
|
||||
contentHash: "$_id.contentHash",
|
||||
tenant: "$_id.tenant",
|
||||
count: 1,
|
||||
ids: 1
|
||||
}
|
||||
},
|
||||
{ $sort: { count: -1, vendor: 1, upstreamId: 1 } },
|
||||
{ $limit: limit }
|
||||
];
|
||||
|
||||
var cursor = database.getCollection("advisory_raw").aggregate(pipeline, { allowDiskUse: true });
|
||||
var any = false;
|
||||
while (cursor.hasNext()) {
|
||||
var doc = cursor.next();
|
||||
any = true;
|
||||
print("---");
|
||||
print("vendor : " + doc.vendor);
|
||||
print("upstream_id : " + doc.upstreamId);
|
||||
print("tenant : " + doc.tenant);
|
||||
print("content_hash: " + doc.contentHash);
|
||||
print("count : " + doc.count);
|
||||
print("ids : " + doc.ids.join(", "));
|
||||
}
|
||||
|
||||
if (!any) {
|
||||
print("No duplicate advisory_raw documents detected.");
|
||||
}
|
||||
|
||||
print("");
|
||||
})();
|
||||
46
devops/tools/ops-scripts/check-advisory-raw-duplicates.sql
Normal file
46
devops/tools/ops-scripts/check-advisory-raw-duplicates.sql
Normal file
@@ -0,0 +1,46 @@
|
||||
-- Advisory raw duplicate detection query
|
||||
-- Surfaces advisory_raw duplicate candidates prior to enabling the idempotency unique index.
|
||||
-- Intended for staging/offline snapshots.
|
||||
--
|
||||
-- Usage:
|
||||
-- psql -d concelier -f ops/devops/tools/ops-scripts/check-advisory-raw-duplicates.sql
|
||||
--
|
||||
-- Environment variables:
|
||||
-- LIMIT - optional cap on number of duplicate groups to print (default 50).
|
||||
|
||||
\echo '== advisory_raw duplicate audit =='
|
||||
\conninfo
|
||||
|
||||
WITH duplicates AS (
|
||||
SELECT
|
||||
source_vendor,
|
||||
upstream_id,
|
||||
content_hash,
|
||||
tenant,
|
||||
COUNT(*) as count,
|
||||
ARRAY_AGG(id) as ids
|
||||
FROM advisory_raw
|
||||
GROUP BY source_vendor, upstream_id, content_hash, tenant
|
||||
HAVING COUNT(*) > 1
|
||||
ORDER BY COUNT(*) DESC, source_vendor, upstream_id
|
||||
LIMIT COALESCE(NULLIF(:'LIMIT', '')::INT, 50)
|
||||
)
|
||||
SELECT
|
||||
'vendor: ' || source_vendor || E'\n' ||
|
||||
'upstream_id: ' || upstream_id || E'\n' ||
|
||||
'tenant: ' || COALESCE(tenant, 'NULL') || E'\n' ||
|
||||
'content_hash: ' || content_hash || E'\n' ||
|
||||
'count: ' || count || E'\n' ||
|
||||
'ids: ' || ARRAY_TO_STRING(ids, ', ') AS duplicate_info
|
||||
FROM duplicates;
|
||||
|
||||
SELECT CASE WHEN COUNT(*) = 0
|
||||
THEN 'No duplicate advisory_raw documents detected.'
|
||||
ELSE 'Found ' || COUNT(*) || ' duplicate groups.'
|
||||
END as status
|
||||
FROM (
|
||||
SELECT 1 FROM advisory_raw
|
||||
GROUP BY source_vendor, upstream_id, content_hash, tenant
|
||||
HAVING COUNT(*) > 1
|
||||
LIMIT 1
|
||||
) t;
|
||||
@@ -1,100 +0,0 @@
|
||||
/**
|
||||
* Rollback script for LNM-21-102-DEV legacy advisory backfill migration.
|
||||
* Removes backfilled observations and linksets by querying the backfill_marker field,
|
||||
* then clears the tombstone markers from advisory_raw.
|
||||
*
|
||||
* Usage:
|
||||
* mongo concelier ops/devops/scripts/rollback-lnm-backfill.js
|
||||
*
|
||||
* Environment variables:
|
||||
* DRY_RUN - if set to "1", only reports what would be deleted without making changes.
|
||||
* BATCH_SIZE - optional batch size for deletions (default 500).
|
||||
*
|
||||
* After running this script, delete the migration record:
|
||||
* db.schema_migrations.deleteOne({ _id: "20251127_lnm_legacy_backfill" })
|
||||
*
|
||||
* Then restart the Concelier service.
|
||||
*/
|
||||
(function () {
|
||||
var BACKFILL_MARKER = "lnm_21_102_dev";
|
||||
|
||||
function toInt(value, fallback) {
|
||||
var parsed = parseInt(value, 10);
|
||||
return Number.isFinite(parsed) && parsed > 0 ? parsed : fallback;
|
||||
}
|
||||
|
||||
function toBool(value) {
|
||||
return value === "1" || value === "true" || value === true;
|
||||
}
|
||||
|
||||
var dryRun = typeof DRY_RUN !== "undefined" ? toBool(DRY_RUN) : false;
|
||||
var batchSize = typeof BATCH_SIZE !== "undefined" ? toInt(BATCH_SIZE, 500) : 500;
|
||||
var database = db.getName ? db.getSiblingDB(db.getName()) : db;
|
||||
if (!database) {
|
||||
throw new Error("Unable to resolve database handle");
|
||||
}
|
||||
|
||||
print("");
|
||||
print("== LNM-21-102-DEV Backfill Rollback ==");
|
||||
print("Database : " + database.getName());
|
||||
print("Dry Run : " + dryRun);
|
||||
print("Batch Size: " + batchSize);
|
||||
print("");
|
||||
|
||||
// Step 1: Count and delete backfilled observations
|
||||
var observationsCollection = database.getCollection("advisory_observations");
|
||||
var observationsFilter = { backfill_marker: BACKFILL_MARKER };
|
||||
var observationsCount = observationsCollection.countDocuments(observationsFilter);
|
||||
|
||||
print("Found " + observationsCount + " backfilled observations to remove.");
|
||||
|
||||
if (!dryRun && observationsCount > 0) {
|
||||
var obsResult = observationsCollection.deleteMany(observationsFilter);
|
||||
print("Deleted " + obsResult.deletedCount + " observations.");
|
||||
}
|
||||
|
||||
// Step 2: Count and delete backfilled linksets
|
||||
var linksetsCollection = database.getCollection("advisory_linksets");
|
||||
var linksetsFilter = { backfill_marker: BACKFILL_MARKER };
|
||||
var linksetsCount = linksetsCollection.countDocuments(linksetsFilter);
|
||||
|
||||
print("Found " + linksetsCount + " backfilled linksets to remove.");
|
||||
|
||||
if (!dryRun && linksetsCount > 0) {
|
||||
var linkResult = linksetsCollection.deleteMany(linksetsFilter);
|
||||
print("Deleted " + linkResult.deletedCount + " linksets.");
|
||||
}
|
||||
|
||||
// Step 3: Clear tombstone markers from advisory_raw
|
||||
var rawCollection = database.getCollection("advisory_raw");
|
||||
var rawFilter = { backfill_marker: BACKFILL_MARKER };
|
||||
var rawCount = rawCollection.countDocuments(rawFilter);
|
||||
|
||||
print("Found " + rawCount + " advisory_raw documents with tombstone markers to clear.");
|
||||
|
||||
if (!dryRun && rawCount > 0) {
|
||||
var rawResult = rawCollection.updateMany(rawFilter, { $unset: { backfill_marker: "" } });
|
||||
print("Cleared tombstone markers from " + rawResult.modifiedCount + " advisory_raw documents.");
|
||||
}
|
||||
|
||||
// Step 4: Summary
|
||||
print("");
|
||||
print("== Rollback Summary ==");
|
||||
if (dryRun) {
|
||||
print("DRY RUN - No changes were made.");
|
||||
print("Would delete " + observationsCount + " observations.");
|
||||
print("Would delete " + linksetsCount + " linksets.");
|
||||
print("Would clear " + rawCount + " tombstone markers.");
|
||||
} else {
|
||||
print("Observations deleted: " + observationsCount);
|
||||
print("Linksets deleted : " + linksetsCount);
|
||||
print("Tombstones cleared : " + rawCount);
|
||||
}
|
||||
|
||||
print("");
|
||||
print("Next steps:");
|
||||
print("1. Delete the migration record:");
|
||||
print(' db.schema_migrations.deleteOne({ _id: "20251127_lnm_legacy_backfill" })');
|
||||
print("2. Restart the Concelier service.");
|
||||
print("");
|
||||
})();
|
||||
60
devops/tools/ops-scripts/rollback-lnm-backfill.sql
Normal file
60
devops/tools/ops-scripts/rollback-lnm-backfill.sql
Normal file
@@ -0,0 +1,60 @@
|
||||
-- Rollback script for LNM-21-102-DEV legacy advisory backfill migration.
|
||||
-- Removes backfilled observations and linksets by querying the backfill_marker field,
|
||||
-- then clears the tombstone markers from advisory_raw.
|
||||
--
|
||||
-- Usage:
|
||||
-- psql -d concelier -f ops/devops/tools/ops-scripts/rollback-lnm-backfill.sql
|
||||
--
|
||||
-- Environment variables:
|
||||
-- DRY_RUN - if set to "1", only reports what would be deleted without making changes.
|
||||
--
|
||||
-- After running this script, delete the migration record:
|
||||
-- DELETE FROM schema_migrations WHERE id = '20251127_lnm_legacy_backfill';
|
||||
--
|
||||
-- Then restart the Concelier service.
|
||||
|
||||
\echo ''
|
||||
\echo '== LNM-21-102-DEV Backfill Rollback =='
|
||||
\conninfo
|
||||
|
||||
-- Count backfilled observations
|
||||
SELECT 'Found ' || COUNT(*) || ' backfilled observations to remove.' as status
|
||||
FROM advisory_observations
|
||||
WHERE backfill_marker = 'lnm_21_102_dev';
|
||||
|
||||
-- Count backfilled linksets
|
||||
SELECT 'Found ' || COUNT(*) || ' backfilled linksets to remove.' as status
|
||||
FROM advisory_linksets
|
||||
WHERE backfill_marker = 'lnm_21_102_dev';
|
||||
|
||||
-- Count advisory_raw tombstone markers
|
||||
SELECT 'Found ' || COUNT(*) || ' advisory_raw documents with tombstone markers to clear.' as status
|
||||
FROM advisory_raw
|
||||
WHERE backfill_marker = 'lnm_21_102_dev';
|
||||
|
||||
-- Only execute if not DRY_RUN
|
||||
\if :{?DRY_RUN}
|
||||
\echo 'DRY RUN mode - no changes made'
|
||||
\echo 'Set DRY_RUN=0 or omit it to execute the rollback'
|
||||
\else
|
||||
-- Step 1: Delete backfilled observations
|
||||
DELETE FROM advisory_observations WHERE backfill_marker = 'lnm_21_102_dev';
|
||||
\echo 'Deleted observations'
|
||||
|
||||
-- Step 2: Delete backfilled linksets
|
||||
DELETE FROM advisory_linksets WHERE backfill_marker = 'lnm_21_102_dev';
|
||||
\echo 'Deleted linksets'
|
||||
|
||||
-- Step 3: Clear tombstone markers from advisory_raw
|
||||
UPDATE advisory_raw SET backfill_marker = NULL WHERE backfill_marker = 'lnm_21_102_dev';
|
||||
\echo 'Cleared tombstone markers'
|
||||
\endif
|
||||
|
||||
\echo ''
|
||||
\echo '== Rollback Summary =='
|
||||
\echo ''
|
||||
\echo 'Next steps:'
|
||||
\echo '1. Delete the migration record:'
|
||||
\echo ' DELETE FROM schema_migrations WHERE id = ''20251127_lnm_legacy_backfill'';'
|
||||
\echo '2. Restart the Concelier service.'
|
||||
\echo ''
|
||||
@@ -1,9 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
# Synthetic probe for orchestrator infra (postgres, mongo, nats).
|
||||
# Synthetic probe for orchestrator infra (postgres, valkey).
|
||||
# Runs lightweight checks and writes a status file under out/orchestrator-probe/.
|
||||
|
||||
COMPOSE_FILE=${COMPOSE_FILE:-ops/devops/orchestrator/docker-compose.orchestrator.yml}
|
||||
COMPOSE_FILE=${COMPOSE_FILE:-devops/compose/docker-compose.stella-ops.yml}
|
||||
STATE_DIR=${STATE_DIR:-out/orchestrator-probe}
|
||||
|
||||
mkdir -p "$STATE_DIR"
|
||||
@@ -18,34 +18,26 @@ timestamp() { date -u +%Y-%m-%dT%H:%M:%SZ; }
|
||||
log "compose file: $COMPOSE_FILE"
|
||||
|
||||
PG_OK=0
|
||||
MONGO_OK=0
|
||||
NATS_OK=0
|
||||
VALKEY_OK=0
|
||||
|
||||
if docker compose -f "$COMPOSE_FILE" ps orchestrator-postgres >/dev/null 2>&1; then
|
||||
if docker compose -f "$COMPOSE_FILE" exec -T orchestrator-postgres psql -U orch -tAc "select 1" | grep -q 1; then
|
||||
if docker compose -f "$COMPOSE_FILE" ps stellaops-postgres >/dev/null 2>&1; then
|
||||
if docker compose -f "$COMPOSE_FILE" exec -T stellaops-postgres psql -U stellaops -tAc "select 1" | grep -q 1; then
|
||||
PG_OK=1
|
||||
fi
|
||||
fi
|
||||
|
||||
if docker compose -f "$COMPOSE_FILE" ps orchestrator-mongo >/dev/null 2>&1; then
|
||||
if docker compose -f "$COMPOSE_FILE" exec -T orchestrator-mongo mongosh --quiet --eval "db.adminCommand('ping').ok" | grep -q 1; then
|
||||
MONGO_OK=1
|
||||
fi
|
||||
fi
|
||||
|
||||
if docker compose -f "$COMPOSE_FILE" ps orchestrator-nats >/dev/null 2>&1; then
|
||||
if docker compose -f "$COMPOSE_FILE" exec -T orchestrator-nats nats --server localhost:4222 ping >/dev/null 2>&1; then
|
||||
# publish & request to ensure traffic path works
|
||||
docker compose -f "$COMPOSE_FILE" exec -T orchestrator-nats nats --server localhost:4222 pub probe.ping "ok" >/dev/null 2>&1 || true
|
||||
NATS_OK=1
|
||||
if docker compose -f "$COMPOSE_FILE" ps stellaops-valkey >/dev/null 2>&1; then
|
||||
if docker compose -f "$COMPOSE_FILE" exec -T stellaops-valkey valkey-cli ping | grep -qi pong; then
|
||||
# publish & subscribe quick check
|
||||
docker compose -f "$COMPOSE_FILE" exec -T stellaops-valkey valkey-cli publish probe.ping "ok" >/dev/null 2>&1 || true
|
||||
VALKEY_OK=1
|
||||
fi
|
||||
fi
|
||||
|
||||
cat > "$STATE_DIR/status.txt" <<EOF
|
||||
timestamp=$(timestamp)
|
||||
postgres_ok=$PG_OK
|
||||
mongo_ok=$MONGO_OK
|
||||
nats_ok=$NATS_OK
|
||||
valkey_ok=$VALKEY_OK
|
||||
EOF
|
||||
|
||||
log "probe complete (pg=$PG_OK mongo=$MONGO_OK nats=$NATS_OK)"
|
||||
log "probe complete (pg=$PG_OK valkey=$VALKEY_OK)"
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
ROOT=$(cd "$(dirname "$0")/.." && pwd)
|
||||
COMPOSE_FILE="${COMPOSE_FILE:-$ROOT/devops/orchestrator/docker-compose.orchestrator.yml}"
|
||||
ROOT=$(cd "$(dirname "$0")/../.." && pwd)
|
||||
COMPOSE_FILE="${COMPOSE_FILE:-$ROOT/devops/compose/docker-compose.stella-ops.yml}"
|
||||
STATE_DIR="${STATE_DIR:-$ROOT/out/orchestrator-smoke}"
|
||||
|
||||
usage() {
|
||||
cat <<'USAGE'
|
||||
Orchestrator infra smoke test
|
||||
- Starts postgres + mongo + nats via docker-compose
|
||||
- Starts postgres + valkey via docker-compose
|
||||
- Verifies basic connectivity and prints ready endpoints
|
||||
|
||||
Env/flags:
|
||||
COMPOSE_FILE path to compose file (default: ops/devops/orchestrator/docker-compose.orchestrator.yml)
|
||||
COMPOSE_FILE path to compose file (default: devops/compose/docker-compose.stella-ops.yml)
|
||||
STATE_DIR path for logs (default: out/orchestrator-smoke)
|
||||
SKIP_UP set to 1 to skip compose up (assumes already running)
|
||||
USAGE
|
||||
@@ -21,38 +21,30 @@ if [[ ${1:-} == "-h" || ${1:-} == "--help" ]]; then usage; exit 0; fi
|
||||
mkdir -p "$STATE_DIR"
|
||||
|
||||
if [[ "${SKIP_UP:-0}" != "1" ]]; then
|
||||
docker compose -f "$COMPOSE_FILE" up -d
|
||||
docker compose -f "$COMPOSE_FILE" up -d stellaops-postgres stellaops-valkey
|
||||
fi
|
||||
|
||||
log() { echo "[smoke] $*"; }
|
||||
|
||||
log "waiting for postgres..."
|
||||
for i in {1..12}; do
|
||||
if docker compose -f "$COMPOSE_FILE" exec -T orchestrator-postgres pg_isready -U orch >/dev/null 2>&1; then break; fi
|
||||
if docker compose -f "$COMPOSE_FILE" exec -T stellaops-postgres pg_isready -U stellaops >/dev/null 2>&1; then break; fi
|
||||
sleep 5;
|
||||
done
|
||||
|
||||
log "waiting for mongo..."
|
||||
log "waiting for valkey..."
|
||||
for i in {1..12}; do
|
||||
if docker compose -f "$COMPOSE_FILE" exec -T orchestrator-mongo mongosh --quiet --eval "db.adminCommand('ping')" >/dev/null 2>&1; then break; fi
|
||||
if docker compose -f "$COMPOSE_FILE" exec -T stellaops-valkey valkey-cli ping | grep -qi pong >/dev/null 2>&1; then break; fi
|
||||
sleep 5;
|
||||
done
|
||||
|
||||
log "waiting for nats..."
|
||||
for i in {1..12}; do
|
||||
if docker compose -f "$COMPOSE_FILE" exec -T orchestrator-nats nats --server localhost:4222 ping >/dev/null 2>&1; then break; fi
|
||||
sleep 5;
|
||||
done
|
||||
|
||||
log "postgres DSN: postgres://orch:orchpass@localhost:55432/orchestrator"
|
||||
log "mongo uri: mongodb://localhost:57017"
|
||||
log "nats uri: nats://localhost:4222"
|
||||
log "postgres DSN: postgres://stellaops:stellaops@localhost:5432/stellaops"
|
||||
log "valkey uri: valkey://localhost:6379"
|
||||
|
||||
# Write readiness summary
|
||||
cat > "$STATE_DIR/readiness.txt" <<EOF
|
||||
postgres=postgres://orch:orchpass@localhost:55432/orchestrator
|
||||
mongo=mongodb://localhost:57017
|
||||
nats=nats://localhost:4222
|
||||
postgres=postgres://stellaops:stellaops@localhost:5432/stellaops
|
||||
valkey=valkey://localhost:6379
|
||||
ready_at=$(date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||
EOF
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Runs live TTL validation for Attestor dedupe stores against local MongoDB/Valkey.
|
||||
# Runs live TTL validation for Attestor dedupe stores against local PostgreSQL/Valkey.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
@@ -30,17 +30,21 @@ trap cleanup EXIT
|
||||
|
||||
cat >"$compose_file" <<'YAML'
|
||||
services:
|
||||
mongo:
|
||||
image: mongo:7.0
|
||||
postgres:
|
||||
image: postgres:18.1-alpine
|
||||
environment:
|
||||
POSTGRES_USER: attestor
|
||||
POSTGRES_PASSWORD: attestor
|
||||
POSTGRES_DB: attestor_ttl
|
||||
ports:
|
||||
- "27017:27017"
|
||||
- "5432:5432"
|
||||
healthcheck:
|
||||
test: ["CMD", "mongosh", "--quiet", "localhost/test", "--eval", "db.runCommand({ ping: 1 })"]
|
||||
test: ["CMD-SHELL", "pg_isready -U attestor -d attestor_ttl"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 20
|
||||
valkey:
|
||||
image: valkey/valkey:8-alpine
|
||||
image: valkey/valkey:9-alpine
|
||||
command: ["valkey-server", "--save", "", "--appendonly", "no"]
|
||||
ports:
|
||||
- "6379:6379"
|
||||
@@ -51,7 +55,7 @@ services:
|
||||
retries: 20
|
||||
YAML
|
||||
|
||||
echo "Starting MongoDB and Valkey containers..."
|
||||
echo "Starting PostgreSQL and Valkey containers..."
|
||||
$compose_cmd -f "$compose_file" up -d
|
||||
|
||||
wait_for_port() {
|
||||
@@ -69,10 +73,10 @@ wait_for_port() {
|
||||
return 1
|
||||
}
|
||||
|
||||
wait_for_port 127.0.0.1 27017 "MongoDB"
|
||||
wait_for_port 127.0.0.1 5432 "PostgreSQL"
|
||||
wait_for_port 127.0.0.1 6379 "Valkey"
|
||||
|
||||
export ATTESTOR_LIVE_MONGO_URI="${ATTESTOR_LIVE_MONGO_URI:-mongodb://127.0.0.1:27017}"
|
||||
export ATTESTOR_LIVE_POSTGRES_URI="${ATTESTOR_LIVE_POSTGRES_URI:-Host=127.0.0.1;Port=5432;Database=attestor_ttl;Username=attestor;Password=attestor}"
|
||||
export ATTESTOR_LIVE_VALKEY_URI="${ATTESTOR_LIVE_VALKEY_URI:-127.0.0.1:6379}"
|
||||
|
||||
echo "Running live TTL validation tests..."
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Minimal helper to run the LNM-21-002/003-related slices with TRX output.
|
||||
# Minimal helper to run the linkset-related test slices with TRX output.
|
||||
# Use a clean environment to reduce "invalid test source" issues seen locally.
|
||||
export DOTNET_CLI_TELEMETRY_OPTOUT=1
|
||||
export DOTNET_ROLL_FORWARD=Major
|
||||
@@ -11,12 +11,12 @@ pushd "$root_dir" >/dev/null
|
||||
|
||||
dotnet test \
|
||||
src/Concelier/__Tests/StellaOps.Concelier.Core.Tests/StellaOps.Concelier.Core.Tests.csproj \
|
||||
--filter "AdvisoryObservationAggregationTests" \
|
||||
--filter "AdvisoryObservationAggregationTests|LinksetCorrelation" \
|
||||
--logger "trx;LogFileName=core-linksets.trx"
|
||||
|
||||
dotnet test \
|
||||
src/Concelier/__Tests/StellaOps.Concelier.Storage.Mongo.Tests/StellaOps.Concelier.Storage.Mongo.Tests.csproj \
|
||||
--filter "ConcelierMongoLinksetStoreTests" \
|
||||
src/Concelier/__Tests/StellaOps.Concelier.Persistence.Tests/StellaOps.Concelier.Persistence.Tests.csproj \
|
||||
--filter "LinksetStore" \
|
||||
--logger "trx;LogFileName=storage-linksets.trx"
|
||||
|
||||
popd >/dev/null
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# DEVOPS-SYMS-90-005: Deploy Symbols.Server (Helm) with MinIO/Mongo dependencies.
|
||||
# DEVOPS-SYMS-90-005: Deploy Symbols.Server (Helm) with RustFS/PostgreSQL dependencies.
|
||||
|
||||
SYMS_CHART=${SYMS_CHART:-"charts/symbols-server"}
|
||||
NAMESPACE=${NAMESPACE:-"symbols"}
|
||||
VALUES=${VALUES:-"ops/devops/symbols/values.yaml"}
|
||||
VALUES=${VALUES:-"devops/services/symbols/values.yaml"}
|
||||
|
||||
echo "[symbols] creating namespace $NAMESPACE"
|
||||
kubectl create namespace "$NAMESPACE" --dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
@@ -3,7 +3,7 @@ set -euo pipefail
|
||||
|
||||
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
ROOT=$(cd "$SCRIPT_DIR/../.." && pwd)
|
||||
COMPOSE_FILE="$ROOT/ops/devops/symbols/docker-compose.symbols.yaml"
|
||||
COMPOSE_FILE="$ROOT/devops/compose/docker-compose.stella-ops.yml"
|
||||
PROJECT_NAME=${PROJECT_NAME:-symbolsci}
|
||||
ARTIFACT_DIR=${ARTIFACT_DIR:-"$ROOT/out/symbols-ci"}
|
||||
STAMP=$(date -u +"%Y%m%dT%H%M%SZ")
|
||||
@@ -27,7 +27,7 @@ log "Pulling images"
|
||||
docker compose -f "$COMPOSE_FILE" -p "$PROJECT_NAME" pull --ignore-pull-failures >/dev/null 2>&1 || true
|
||||
|
||||
log "Starting services"
|
||||
docker compose -f "$COMPOSE_FILE" -p "$PROJECT_NAME" up -d --remove-orphans
|
||||
docker compose -f "$COMPOSE_FILE" -p "$PROJECT_NAME" up -d stellaops-rustfs stellaops-postgres --remove-orphans
|
||||
|
||||
wait_http() {
|
||||
local url=$1; local name=$2; local tries=${3:-30}
|
||||
@@ -42,20 +42,17 @@ wait_http() {
|
||||
return 1
|
||||
}
|
||||
|
||||
wait_http "http://localhost:9000/minio/health/ready" "MinIO" 25
|
||||
wait_http "http://localhost:8080/healthz" "Symbols.Server" 25
|
||||
wait_http "http://localhost:8080/health" "RustFS" 25
|
||||
wait_http "http://localhost:8081/healthz" "Symbols.Server" 25
|
||||
|
||||
log "Seeding bucket"
|
||||
docker run --rm --network symbols-ci minio/mc:RELEASE.2024-08-17T00-00-00Z \
|
||||
alias set symbols http://minio:9000 minio minio123 >/dev/null
|
||||
|
||||
docker run --rm --network symbols-ci minio/mc:RELEASE.2024-08-17T00-00-00Z \
|
||||
mb -p symbols/symbols >/dev/null
|
||||
log "Seeding bucket via RustFS S3-compatible API"
|
||||
# RustFS auto-creates buckets on first PUT, or use AWS CLI with S3 endpoint
|
||||
aws --endpoint-url http://localhost:8080 s3 mb s3://symbols 2>/dev/null || true
|
||||
|
||||
log "Capture readiness endpoint"
|
||||
curl -fsS http://localhost:8080/healthz -o "$RUN_DIR/healthz.json"
|
||||
curl -fsS http://localhost:8081/healthz -o "$RUN_DIR/healthz.json"
|
||||
|
||||
log "Smoke list request"
|
||||
curl -fsS http://localhost:8080/ -o "$RUN_DIR/root.html" || true
|
||||
curl -fsS http://localhost:8081/ -o "$RUN_DIR/root.html" || true
|
||||
|
||||
echo "status=pass" > "$RUN_DIR/summary.txt"
|
||||
|
||||
Reference in New Issue
Block a user