save progress
This commit is contained in:
@@ -0,0 +1,169 @@
|
||||
// <copyright file="HlcMergeService.cs" company="StellaOps">
|
||||
// Copyright (c) StellaOps. Licensed under AGPL-3.0-or-later.
|
||||
// </copyright>
|
||||
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.AirGap.Sync.Models;
|
||||
|
||||
namespace StellaOps.AirGap.Sync.Services;
|
||||
|
||||
/// <summary>
|
||||
/// Interface for HLC-based merge operations.
|
||||
/// </summary>
|
||||
public interface IHlcMergeService
|
||||
{
|
||||
/// <summary>
|
||||
/// Merges job logs from multiple offline nodes into a unified, HLC-ordered stream.
|
||||
/// </summary>
|
||||
/// <param name="nodeLogs">The node logs to merge.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The merge result.</returns>
|
||||
Task<MergeResult> MergeAsync(
|
||||
IReadOnlyList<NodeJobLog> nodeLogs,
|
||||
CancellationToken cancellationToken = default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Service for merging job logs from multiple offline nodes using HLC total ordering.
|
||||
/// </summary>
|
||||
public sealed class HlcMergeService : IHlcMergeService
|
||||
{
|
||||
private readonly IConflictResolver _conflictResolver;
|
||||
private readonly ILogger<HlcMergeService> _logger;
|
||||
|
||||
/// <summary>
|
||||
/// Initializes a new instance of the <see cref="HlcMergeService"/> class.
|
||||
/// </summary>
|
||||
public HlcMergeService(
|
||||
IConflictResolver conflictResolver,
|
||||
ILogger<HlcMergeService> logger)
|
||||
{
|
||||
_conflictResolver = conflictResolver ?? throw new ArgumentNullException(nameof(conflictResolver));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
}
|
||||
|
||||
/// <inheritdoc/>
|
||||
public Task<MergeResult> MergeAsync(
|
||||
IReadOnlyList<NodeJobLog> nodeLogs,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(nodeLogs);
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
if (nodeLogs.Count == 0)
|
||||
{
|
||||
return Task.FromResult(new MergeResult
|
||||
{
|
||||
MergedEntries = Array.Empty<MergedJobEntry>(),
|
||||
Duplicates = Array.Empty<DuplicateEntry>(),
|
||||
SourceNodes = Array.Empty<string>()
|
||||
});
|
||||
}
|
||||
|
||||
_logger.LogInformation(
|
||||
"Starting merge of {NodeCount} node logs with {TotalEntries} total entries",
|
||||
nodeLogs.Count,
|
||||
nodeLogs.Sum(l => l.Entries.Count));
|
||||
|
||||
// 1. Collect all entries from all nodes
|
||||
var allEntries = nodeLogs
|
||||
.SelectMany(log => log.Entries.Select(e => (log.NodeId, Entry: e)))
|
||||
.ToList();
|
||||
|
||||
// 2. Sort by HLC total order: (PhysicalTime, LogicalCounter, NodeId, JobId)
|
||||
var sorted = allEntries
|
||||
.OrderBy(x => x.Entry.THlc.PhysicalTime)
|
||||
.ThenBy(x => x.Entry.THlc.LogicalCounter)
|
||||
.ThenBy(x => x.Entry.THlc.NodeId, StringComparer.Ordinal)
|
||||
.ThenBy(x => x.Entry.JobId)
|
||||
.ToList();
|
||||
|
||||
// 3. Group by JobId to detect duplicates
|
||||
var groupedByJobId = sorted.GroupBy(x => x.Entry.JobId).ToList();
|
||||
|
||||
var deduplicated = new List<MergedJobEntry>();
|
||||
var duplicates = new List<DuplicateEntry>();
|
||||
|
||||
foreach (var group in groupedByJobId)
|
||||
{
|
||||
var entries = group.ToList();
|
||||
|
||||
if (entries.Count == 1)
|
||||
{
|
||||
// No conflict - add directly
|
||||
var (nodeId, entry) = entries[0];
|
||||
deduplicated.Add(CreateMergedEntry(nodeId, entry));
|
||||
}
|
||||
else
|
||||
{
|
||||
// Multiple entries with same JobId - resolve conflict
|
||||
var resolution = _conflictResolver.Resolve(group.Key, entries);
|
||||
|
||||
if (resolution.Resolution == ResolutionStrategy.Error)
|
||||
{
|
||||
_logger.LogError(
|
||||
"Conflict resolution failed for JobId {JobId}: {Error}",
|
||||
group.Key, resolution.Error);
|
||||
throw new InvalidOperationException(resolution.Error);
|
||||
}
|
||||
|
||||
// Add the selected entry
|
||||
if (resolution.SelectedEntry is not null)
|
||||
{
|
||||
var sourceEntry = entries.First(e => e.Entry == resolution.SelectedEntry);
|
||||
deduplicated.Add(CreateMergedEntry(sourceEntry.NodeId, resolution.SelectedEntry));
|
||||
}
|
||||
|
||||
// Record duplicates
|
||||
foreach (var dropped in resolution.DroppedEntries ?? Array.Empty<OfflineJobLogEntry>())
|
||||
{
|
||||
var sourceEntry = entries.First(e => e.Entry == dropped);
|
||||
duplicates.Add(new DuplicateEntry(dropped.JobId, sourceEntry.NodeId, dropped.THlc));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Sort deduplicated entries by HLC order
|
||||
deduplicated = deduplicated
|
||||
.OrderBy(x => x.THlc.PhysicalTime)
|
||||
.ThenBy(x => x.THlc.LogicalCounter)
|
||||
.ThenBy(x => x.THlc.NodeId, StringComparer.Ordinal)
|
||||
.ThenBy(x => x.JobId)
|
||||
.ToList();
|
||||
|
||||
// 5. Recompute unified chain
|
||||
byte[]? prevLink = null;
|
||||
foreach (var entry in deduplicated)
|
||||
{
|
||||
entry.MergedLink = OfflineHlcManager.ComputeLink(
|
||||
prevLink,
|
||||
entry.JobId,
|
||||
entry.THlc,
|
||||
entry.PayloadHash);
|
||||
prevLink = entry.MergedLink;
|
||||
}
|
||||
|
||||
_logger.LogInformation(
|
||||
"Merge complete: {MergedCount} entries, {DuplicateCount} duplicates dropped",
|
||||
deduplicated.Count, duplicates.Count);
|
||||
|
||||
return Task.FromResult(new MergeResult
|
||||
{
|
||||
MergedEntries = deduplicated,
|
||||
Duplicates = duplicates,
|
||||
MergedChainHead = prevLink,
|
||||
SourceNodes = nodeLogs.Select(l => l.NodeId).ToList()
|
||||
});
|
||||
}
|
||||
|
||||
private static MergedJobEntry CreateMergedEntry(string nodeId, OfflineJobLogEntry entry) => new()
|
||||
{
|
||||
SourceNodeId = nodeId,
|
||||
THlc = entry.THlc,
|
||||
JobId = entry.JobId,
|
||||
PartitionKey = entry.PartitionKey,
|
||||
Payload = entry.Payload,
|
||||
PayloadHash = entry.PayloadHash,
|
||||
OriginalLink = entry.Link
|
||||
};
|
||||
}
|
||||
Reference in New Issue
Block a user