feat: Implement BerkeleyDB reader for RPM databases
Some checks failed
AOC Guard CI / aoc-guard (push) Has been cancelled
AOC Guard CI / aoc-verify (push) Has been cancelled
Concelier Attestation Tests / attestation-tests (push) Has been cancelled
Docs CI / lint-and-preview (push) Has been cancelled
Policy Lint & Smoke / policy-lint (push) Has been cancelled
Scanner Analyzers / Discover Analyzers (push) Has been cancelled
Scanner Analyzers / Build Analyzers (push) Has been cancelled
Scanner Analyzers / Test Language Analyzers (push) Has been cancelled
Scanner Analyzers / Validate Test Fixtures (push) Has been cancelled
Scanner Analyzers / Verify Deterministic Output (push) Has been cancelled
console-runner-image / build-runner-image (push) Has been cancelled
wine-csp-build / Build Wine CSP Image (push) Has been cancelled
wine-csp-build / Integration Tests (push) Has been cancelled
wine-csp-build / Security Scan (push) Has been cancelled
wine-csp-build / Generate SBOM (push) Has been cancelled
wine-csp-build / Publish Image (push) Has been cancelled
wine-csp-build / Air-Gap Bundle (push) Has been cancelled
wine-csp-build / Test Summary (push) Has been cancelled

- Added BerkeleyDbReader class to read and extract RPM header blobs from BerkeleyDB hash databases.
- Implemented methods to detect BerkeleyDB format and extract values, including handling of page sizes and magic numbers.
- Added tests for BerkeleyDbReader to ensure correct functionality and header extraction.

feat: Add Yarn PnP data tests

- Created YarnPnpDataTests to validate package resolution and data loading from Yarn PnP cache.
- Implemented tests for resolved keys, package presence, and loading from cache structure.

test: Add egg-info package fixtures for Python tests

- Created egg-info package fixtures for testing Python analyzers.
- Included PKG-INFO, entry_points.txt, and installed-files.txt for comprehensive coverage.

test: Enhance RPM database reader tests

- Added tests for RpmDatabaseReader to validate fallback to legacy packages when SQLite is missing.
- Implemented helper methods to create legacy package files and RPM headers for testing.

test: Implement dual signing tests

- Added DualSignTests to validate secondary signature addition when configured.
- Created stub implementations for crypto providers and key resolvers to facilitate testing.

chore: Update CI script for Playwright Chromium installation

- Modified ci-console-exports.sh to ensure deterministic Chromium binary installation for console exports tests.
- Added checks for Windows compatibility and environment variable setups for Playwright browsers.
This commit is contained in:
StellaOps Bot
2025-12-07 16:24:45 +02:00
parent e3f28a21ab
commit 11597679ed
199 changed files with 9809 additions and 4404 deletions

View File

@@ -0,0 +1,184 @@
using System;
using System.Buffers.Binary;
using StellaOps.Scanner.Analyzers.OS.Rpm.Internal;
using Xunit;
namespace StellaOps.Scanner.Analyzers.OS.Tests.Rpm;
public sealed class BerkeleyDbReaderTests
{
[Fact]
public void IsBerkeleyDb_ReturnsFalse_ForEmptyData()
{
var data = Array.Empty<byte>();
Assert.False(BerkeleyDbReader.IsBerkeleyDb(data));
}
[Fact]
public void IsBerkeleyDb_ReturnsFalse_ForSmallData()
{
var data = new byte[10];
Assert.False(BerkeleyDbReader.IsBerkeleyDb(data));
}
[Fact]
public void IsBerkeleyDb_ReturnsFalse_ForNonBdbData()
{
// Random non-BDB data
var data = new byte[64];
new Random(42).NextBytes(data);
Assert.False(BerkeleyDbReader.IsBerkeleyDb(data));
}
[Fact]
public void IsBerkeleyDb_ReturnsTrue_ForBdbHashMagicBigEndian()
{
// BerkeleyDB Hash magic at offset 12: 0x00061561 (big-endian)
var data = new byte[20];
BinaryPrimitives.WriteUInt32BigEndian(data.AsSpan(12), 0x00061561);
Assert.True(BerkeleyDbReader.IsBerkeleyDb(data));
}
[Fact]
public void IsBerkeleyDb_ReturnsTrue_ForBdbHashMagicLittleEndian()
{
// BerkeleyDB Hash magic at offset 12: 0x61150600 (little-endian representation)
var data = new byte[20];
BinaryPrimitives.WriteUInt32BigEndian(data.AsSpan(12), 0x61150600);
Assert.True(BerkeleyDbReader.IsBerkeleyDb(data));
}
[Fact]
public void IsBerkeleyDb_ReturnsTrue_ForBdbBtreeMagicBigEndian()
{
// BerkeleyDB Btree magic at offset 12: 0x00053162 (big-endian)
var data = new byte[20];
BinaryPrimitives.WriteUInt32BigEndian(data.AsSpan(12), 0x00053162);
Assert.True(BerkeleyDbReader.IsBerkeleyDb(data));
}
[Fact]
public void IsBerkeleyDb_ReturnsTrue_ForBdbBtreeMagicLittleEndian()
{
// BerkeleyDB Btree magic at offset 12: 0x62310500 (little-endian representation)
var data = new byte[20];
BinaryPrimitives.WriteUInt32BigEndian(data.AsSpan(12), 0x62310500);
Assert.True(BerkeleyDbReader.IsBerkeleyDb(data));
}
[Fact]
public void ExtractValues_ReturnsEmptyList_ForSmallData()
{
var data = new byte[256];
var result = BerkeleyDbReader.ExtractValues(data);
Assert.Empty(result);
}
[Fact]
public void ExtractValues_FindsRpmHeaders_WithValidMagic()
{
// Create data with a valid RPM header structure
var data = CreateDataWithRpmHeader(pageSize: 4096);
var result = BerkeleyDbReader.ExtractValues(data);
Assert.NotEmpty(result);
}
[Fact]
public void ExtractValuesWithOverflow_FindsRpmHeaders()
{
// Create data with a valid RPM header anywhere in the file
var data = CreateDataWithRpmHeader(pageSize: 4096);
var result = BerkeleyDbReader.ExtractValuesWithOverflow(data);
Assert.NotEmpty(result);
}
[Fact]
public void ExtractValues_IgnoresInvalidHeaders()
{
// Create data with RPM magic but invalid header structure
var data = new byte[8192];
// Set up as BDB (magic at offset 12)
BinaryPrimitives.WriteUInt32BigEndian(data.AsSpan(12), 0x00061561);
// Page size at offset 20 (little-endian for LE BDB)
BinaryPrimitives.WriteInt32LittleEndian(data.AsSpan(20), 4096);
// Write RPM magic at page 1 with invalid reserved bytes
var rpmOffset = 4096 + 100;
data[rpmOffset] = 0x8e;
data[rpmOffset + 1] = 0xad;
data[rpmOffset + 2] = 0xe8;
data[rpmOffset + 3] = 0xab;
// Reserved bytes should be 0, but we set them non-zero
data[rpmOffset + 4] = 0xFF;
var result = BerkeleyDbReader.ExtractValues(data);
Assert.Empty(result);
}
[Fact]
public void ExtractValues_DeduplicatesHeaders()
{
// Create data with duplicate RPM headers
var data = new byte[16384];
// Set up as BDB
BinaryPrimitives.WriteUInt32BigEndian(data.AsSpan(12), 0x00061561);
BinaryPrimitives.WriteInt32LittleEndian(data.AsSpan(20), 4096);
// Write same RPM header at two different offsets
WriteMinimalRpmHeader(data, 4096 + 100);
WriteMinimalRpmHeader(data, 8192 + 100);
var result = BerkeleyDbReader.ExtractValuesWithOverflow(data);
// Both should be found since they're at different offsets (no dedup at value level)
Assert.Equal(2, result.Count);
}
private static byte[] CreateDataWithRpmHeader(int pageSize)
{
var data = new byte[pageSize * 3];
// Set up BDB metadata page
BinaryPrimitives.WriteUInt32BigEndian(data.AsSpan(12), 0x00061561);
BinaryPrimitives.WriteInt32LittleEndian(data.AsSpan(20), pageSize);
// Write RPM header at start of page 1
WriteMinimalRpmHeader(data, pageSize + 100);
return data;
}
private static void WriteMinimalRpmHeader(byte[] data, int offset)
{
// RPM header magic
data[offset] = 0x8e;
data[offset + 1] = 0xad;
data[offset + 2] = 0xe8;
data[offset + 3] = 0xab;
// Reserved (must be 0)
BinaryPrimitives.WriteInt32BigEndian(data.AsSpan(offset + 4), 0);
// Index count (1 entry)
BinaryPrimitives.WriteInt32BigEndian(data.AsSpan(offset + 8), 1);
// Store size (16 bytes)
BinaryPrimitives.WriteInt32BigEndian(data.AsSpan(offset + 12), 16);
// One index entry (16 bytes): tag, type, offset, count
// Tag: 1000 (NAME)
BinaryPrimitives.WriteInt32BigEndian(data.AsSpan(offset + 16), 1000);
// Type: 6 (STRING)
BinaryPrimitives.WriteInt32BigEndian(data.AsSpan(offset + 20), 6);
// Offset: 0
BinaryPrimitives.WriteInt32BigEndian(data.AsSpan(offset + 24), 0);
// Count: 1
BinaryPrimitives.WriteInt32BigEndian(data.AsSpan(offset + 28), 1);
// Data store (16 bytes): "test-pkg\0" + padding
var name = "test-pkg\0"u8;
name.CopyTo(data.AsSpan(offset + 32));
}
}

View File

@@ -0,0 +1,120 @@
using System;
using System.Buffers.Binary;
using System.IO;
using Microsoft.Extensions.Logging.Abstractions;
using StellaOps.Scanner.Analyzers.OS.Rpm;
using Xunit;
namespace StellaOps.Scanner.Analyzers.OS.Tests.Rpm;
public sealed class RpmDatabaseReaderTests
{
[Fact]
public void FallsBackToLegacyPackages_WhenSqliteMissing()
{
var root = Directory.CreateTempSubdirectory("rpmdb-legacy");
try
{
var packagesPath = Path.Combine(root.FullName, "var", "lib", "rpm");
Directory.CreateDirectory(packagesPath);
var data = CreateLegacyPackagesFile();
File.WriteAllBytes(Path.Combine(packagesPath, "Packages"), data);
var reader = new RpmDatabaseReader(NullLogger.Instance);
var headers = reader.ReadHeaders(root.FullName, CancellationToken.None);
Assert.Single(headers);
var header = headers[0];
Assert.Equal("legacy-pkg", header.Name);
Assert.Equal("1.0.0", header.Version);
Assert.Equal("x86_64", header.Architecture);
}
finally
{
try
{
root.Delete(recursive: true);
}
catch
{
}
}
}
private static byte[] CreateLegacyPackagesFile()
{
const int pageSize = 4096;
var data = new byte[pageSize * 2];
// BDB hash magic (big-endian) at offset 12
BinaryPrimitives.WriteUInt32BigEndian(data.AsSpan(12), 0x00061561);
// Page size (big-endian because we use BE magic)
BinaryPrimitives.WriteInt32BigEndian(data.AsSpan(20), pageSize);
var header = CreateRpmHeader("legacy-pkg", "1.0.0", "x86_64");
var headerOffset = pageSize + 128;
header.CopyTo(data.AsSpan(headerOffset));
return data;
}
private static byte[] CreateRpmHeader(string name, string version, string arch)
{
var nameBytes = GetNullTerminated(name);
var versionBytes = GetNullTerminated(version);
var archBytes = GetNullTerminated(arch);
var storeSize = nameBytes.Length + versionBytes.Length + archBytes.Length;
var header = new byte[16 + (3 * 16) + storeSize];
// Magic
BinaryPrimitives.WriteUInt32BigEndian(header.AsSpan(0), 0x8eade8ab);
// Version/reserved bytes (version=1)
header[4] = 1;
header[5] = 0;
BinaryPrimitives.WriteUInt16BigEndian(header.AsSpan(6), 0);
// Index count (3) and store size
BinaryPrimitives.WriteInt32BigEndian(header.AsSpan(8), 3);
BinaryPrimitives.WriteInt32BigEndian(header.AsSpan(12), storeSize);
var offset = 16;
var storeOffset = 0;
WriteIndex(header, ref offset, tag: 1000, type: 6, dataOffset: storeOffset, count: 1); // NAME
storeOffset += nameBytes.Length;
WriteIndex(header, ref offset, tag: 1001, type: 6, dataOffset: storeOffset, count: 1); // VERSION
storeOffset += versionBytes.Length;
WriteIndex(header, ref offset, tag: 1022, type: 6, dataOffset: storeOffset, count: 1); // ARCH
// Data store
var storeStart = 16 + (3 * 16);
nameBytes.CopyTo(header.AsSpan(storeStart));
versionBytes.CopyTo(header.AsSpan(storeStart + nameBytes.Length));
archBytes.CopyTo(header.AsSpan(storeStart + nameBytes.Length + versionBytes.Length));
return header;
}
private static void WriteIndex(byte[] buffer, ref int offset, int tag, int type, int dataOffset, int count)
{
BinaryPrimitives.WriteInt32BigEndian(buffer.AsSpan(offset), tag);
BinaryPrimitives.WriteInt32BigEndian(buffer.AsSpan(offset + 4), type);
BinaryPrimitives.WriteInt32BigEndian(buffer.AsSpan(offset + 8), dataOffset);
BinaryPrimitives.WriteInt32BigEndian(buffer.AsSpan(offset + 12), count);
offset += 16;
}
private static byte[] GetNullTerminated(string value)
{
var bytes = System.Text.Encoding.UTF8.GetBytes(value);
var result = new byte[bytes.Length + 1];
bytes.CopyTo(result, 0);
result[^1] = 0;
return result;
}
}