namespace StellaOps.AdvisoryAI.Plugin.Unified; using System.Runtime.CompilerServices; using StellaOps.AdvisoryAI.Inference.LlmProviders; using StellaOps.Plugin.Abstractions; using StellaOps.Plugin.Abstractions.Capabilities; using StellaOps.Plugin.Abstractions.Context; using StellaOps.Plugin.Abstractions.Health; using StellaOps.Plugin.Abstractions.Lifecycle; // Type aliases to disambiguate between AdvisoryAI and Plugin.Abstractions types using AdvisoryLlmRequest = StellaOps.AdvisoryAI.Inference.LlmProviders.LlmCompletionRequest; using AdvisoryLlmResult = StellaOps.AdvisoryAI.Inference.LlmProviders.LlmCompletionResult; using AdvisoryStreamChunk = StellaOps.AdvisoryAI.Inference.LlmProviders.LlmStreamChunk; using PluginLlmRequest = StellaOps.Plugin.Abstractions.Capabilities.LlmCompletionRequest; using PluginLlmResult = StellaOps.Plugin.Abstractions.Capabilities.LlmCompletionResult; using PluginStreamChunk = StellaOps.Plugin.Abstractions.Capabilities.LlmStreamChunk; /// /// Adapts an existing ILlmProvider to the unified IPlugin and ILlmCapability interfaces. /// This enables gradual migration of AdvisoryAI LLM providers to the unified plugin architecture. /// public sealed class LlmPluginAdapter : IPlugin, ILlmCapability { private readonly ILlmProvider _inner; private readonly ILlmProviderPlugin _plugin; private readonly int _priority; private IPluginContext? _context; private PluginLifecycleState _state = PluginLifecycleState.Discovered; private List _models = new(); /// /// Creates a new adapter for an existing LLM provider. /// /// The existing LLM provider to wrap. /// The plugin metadata for this provider. /// Provider priority (higher = preferred). public LlmPluginAdapter(ILlmProvider inner, ILlmProviderPlugin plugin, int priority = 10) { _inner = inner ?? throw new ArgumentNullException(nameof(inner)); _plugin = plugin ?? throw new ArgumentNullException(nameof(plugin)); _priority = priority; } /// public PluginInfo Info => new( Id: $"com.stellaops.llm.{_inner.ProviderId}", Name: _plugin.DisplayName, Version: "1.0.0", Vendor: "Stella Ops", Description: _plugin.Description); /// public PluginTrustLevel TrustLevel => PluginTrustLevel.BuiltIn; /// public PluginCapabilities Capabilities => PluginCapabilities.Llm | PluginCapabilities.Network; /// public PluginLifecycleState State => _state; #region ILlmCapability /// public string ProviderId => _inner.ProviderId; /// public int Priority => _priority; /// public IReadOnlyList AvailableModels => _models; /// public Task IsAvailableAsync(CancellationToken ct) { return _inner.IsAvailableAsync(ct); } /// public async Task CompleteAsync(PluginLlmRequest request, CancellationToken ct) { var advisoryRequest = ToAdvisoryRequest(request); var result = await _inner.CompleteAsync(advisoryRequest, ct); return ToPluginResult(result); } /// public IAsyncEnumerable CompleteStreamAsync(PluginLlmRequest request, CancellationToken ct) { var advisoryRequest = ToAdvisoryRequest(request); return StreamAdapter(_inner.CompleteStreamAsync(advisoryRequest, ct), ct); } /// public Task EmbedAsync(string text, CancellationToken ct) { // Embedding is not supported by the base ILlmProvider interface // Specific providers that support embedding would need custom adapters return Task.FromResult(null); } #endregion #region IPlugin /// public async Task InitializeAsync(IPluginContext context, CancellationToken ct) { _context = context; _state = PluginLifecycleState.Initializing; // Check if the provider is available var available = await _inner.IsAvailableAsync(ct); if (!available) { _state = PluginLifecycleState.Failed; throw new InvalidOperationException($"LLM provider '{_inner.ProviderId}' is not available"); } // Initialize with a default model entry (provider-specific models would be discovered at runtime) _models = new List { new( Id: _inner.ProviderId, Name: _plugin.DisplayName, Description: _plugin.Description, ParameterCount: null, ContextLength: null, Capabilities: new[] { "chat", "completion" }) }; _state = PluginLifecycleState.Active; context.Logger.Info("LLM plugin adapter initialized for {ProviderId}", _inner.ProviderId); } /// public async Task HealthCheckAsync(CancellationToken ct) { try { var available = await _inner.IsAvailableAsync(ct); if (available) { return HealthCheckResult.Healthy() .WithDetails(new Dictionary { ["providerId"] = _inner.ProviderId, ["priority"] = _priority }); } return HealthCheckResult.Unhealthy($"LLM provider '{_inner.ProviderId}' is not available"); } catch (Exception ex) { return HealthCheckResult.Unhealthy(ex); } } /// public ValueTask DisposeAsync() { _state = PluginLifecycleState.Stopped; _inner.Dispose(); return ValueTask.CompletedTask; } #endregion #region Type Mapping private static AdvisoryLlmRequest ToAdvisoryRequest(PluginLlmRequest request) { return new AdvisoryLlmRequest { UserPrompt = request.UserPrompt, SystemPrompt = request.SystemPrompt, Model = request.Model, Temperature = request.Temperature, MaxTokens = request.MaxTokens, Seed = request.Seed, StopSequences = request.StopSequences, RequestId = request.RequestId }; } private static PluginLlmResult ToPluginResult(AdvisoryLlmResult result) { return new PluginLlmResult( Content: result.Content, ModelId: result.ModelId, ProviderId: result.ProviderId, InputTokens: result.InputTokens, OutputTokens: result.OutputTokens, TimeToFirstTokenMs: result.TimeToFirstTokenMs, TotalTimeMs: result.TotalTimeMs, FinishReason: result.FinishReason, Deterministic: result.Deterministic, RequestId: result.RequestId); } private static async IAsyncEnumerable StreamAdapter( IAsyncEnumerable source, [EnumeratorCancellation] CancellationToken ct) { await foreach (var chunk in source.WithCancellation(ct)) { yield return new PluginStreamChunk( Content: chunk.Content, IsFinal: chunk.IsFinal, FinishReason: chunk.FinishReason); } } #endregion }