sln build fix (again), tests fixes, audit work and doctors work

This commit is contained in:
master
2026-01-12 22:15:51 +02:00
parent 9873f80830
commit 9330c64349
812 changed files with 48051 additions and 3891 deletions

View File

@@ -0,0 +1,43 @@
using StellaOps.Doctor.Models;
using StellaOps.Doctor.Plugins;
using StellaOps.Doctor.Plugins.AI.Checks;
namespace StellaOps.Doctor.Plugins.AI;
/// <summary>
/// Plugin providing AI/LLM diagnostic checks including AdvisoryAI connectivity
/// and inference provider validation.
/// </summary>
public sealed class AIPlugin : IDoctorPlugin
{
/// <inheritdoc />
public string PluginId => "stellaops.doctor.ai";
/// <inheritdoc />
public string DisplayName => "AI / LLM";
/// <inheritdoc />
public DoctorCategory Category => DoctorCategory.AI;
/// <inheritdoc />
public Version Version => new(1, 0, 0);
/// <inheritdoc />
public Version MinEngineVersion => new(1, 0, 0);
/// <inheritdoc />
public bool IsAvailable(IServiceProvider services) => true;
/// <inheritdoc />
public IReadOnlyList<IDoctorCheck> GetChecks(DoctorPluginContext context) =>
[
new LlmProviderConfigurationCheck(),
new ClaudeProviderCheck(),
new OpenAiProviderCheck(),
new OllamaProviderCheck(),
new LocalInferenceCheck()
];
/// <inheritdoc />
public Task InitializeAsync(DoctorPluginContext context, CancellationToken ct) => Task.CompletedTask;
}

View File

@@ -0,0 +1,204 @@
using System.Net.Http.Headers;
using System.Text;
using System.Text.Json;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Doctor.Models;
using StellaOps.Doctor.Plugins;
namespace StellaOps.Doctor.Plugins.AI.Checks;
/// <summary>
/// Validates Claude (Anthropic) API connectivity.
/// </summary>
public sealed class ClaudeProviderCheck : IDoctorCheck
{
private const string DefaultModel = "claude-sonnet-4-20250514";
private const string DefaultEndpoint = "https://api.anthropic.com";
/// <inheritdoc />
public string CheckId => "check.ai.provider.claude";
/// <inheritdoc />
public string Name => "Claude Provider";
/// <inheritdoc />
public string Description => "Validates Claude (Anthropic) API connectivity and authentication";
/// <inheritdoc />
public DoctorSeverity DefaultSeverity => DoctorSeverity.Warn;
/// <inheritdoc />
public IReadOnlyList<string> Tags => ["ai", "llm", "claude", "anthropic", "advisoryai"];
/// <inheritdoc />
public TimeSpan EstimatedDuration => TimeSpan.FromSeconds(10);
/// <inheritdoc />
public bool CanRun(DoctorPluginContext context)
{
var apiKey = context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:Claude:ApiKey")
?? Environment.GetEnvironmentVariable("ANTHROPIC_API_KEY");
return !string.IsNullOrWhiteSpace(apiKey);
}
/// <inheritdoc />
public async Task<DoctorCheckResult> RunAsync(DoctorPluginContext context, CancellationToken ct)
{
var result = context.CreateResult(CheckId, "stellaops.doctor.ai", DoctorCategory.AI.ToString());
var apiKey = context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:Claude:ApiKey")
?? Environment.GetEnvironmentVariable("ANTHROPIC_API_KEY");
var endpoint = context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:Claude:Endpoint")
?? DefaultEndpoint;
var model = context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:Claude:Model")
?? DefaultModel;
if (string.IsNullOrWhiteSpace(apiKey))
{
return result
.Skip("Claude API key not configured")
.WithEvidence("Claude provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("ApiKeyConfigured", "false");
})
.Build();
}
var httpClientFactory = context.Services.GetService<IHttpClientFactory>();
if (httpClientFactory == null)
{
return result
.Skip("HttpClientFactory not available")
.WithEvidence("Claude provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Error", "IHttpClientFactory not registered");
})
.Build();
}
try
{
using var client = httpClientFactory.CreateClient();
client.Timeout = TimeSpan.FromSeconds(10);
client.DefaultRequestHeaders.Add("x-api-key", apiKey);
client.DefaultRequestHeaders.Add("anthropic-version", "2023-06-01");
// Make a minimal API call to validate connectivity
var requestBody = new
{
model,
max_tokens = 10,
messages = new[]
{
new { role = "user", content = "Hi" }
}
};
var content = new StringContent(
JsonSerializer.Serialize(requestBody),
Encoding.UTF8,
"application/json");
using var response = await client.PostAsync($"{endpoint}/v1/messages", content, ct);
if (response.IsSuccessStatusCode)
{
return result
.Pass("Claude API is accessible")
.WithEvidence("Claude provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Model", model);
e.Add("ApiKeyConfigured", "true (masked)");
e.Add("StatusCode", ((int)response.StatusCode).ToString());
})
.Build();
}
var errorBody = await response.Content.ReadAsStringAsync(ct);
var statusCode = (int)response.StatusCode;
var issues = new List<string>();
if (statusCode == 401)
{
issues.Add("Invalid API key");
}
else if (statusCode == 403)
{
issues.Add("Access forbidden - check API key permissions");
}
else if (statusCode == 429)
{
issues.Add("Rate limited - too many requests");
}
else
{
issues.Add($"API returned status {statusCode}");
}
return result
.Warn($"Claude API issue: {response.StatusCode}")
.WithEvidence("Claude provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Model", model);
e.Add("StatusCode", statusCode.ToString());
e.Add("Error", TruncateError(errorBody));
})
.WithCauses(issues.ToArray())
.WithRemediation(r => r
.AddManualStep(1, "Verify API key", "Check ANTHROPIC_API_KEY is valid")
.AddManualStep(2, "Check quotas", "Verify API usage limits on console.anthropic.com"))
.WithVerification("stella doctor --check check.ai.provider.claude")
.Build();
}
catch (HttpRequestException ex)
{
return result
.Fail($"Cannot connect to Claude API: {ex.Message}")
.WithEvidence("Claude provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Error", ex.Message);
})
.WithCauses("Network connectivity issue or invalid endpoint")
.WithRemediation(r => r
.AddManualStep(1, "Check network", "Verify network connectivity to api.anthropic.com")
.AddManualStep(2, "Check proxy", "Ensure proxy settings are configured if required"))
.WithVerification("stella doctor --check check.ai.provider.claude")
.Build();
}
catch (Exception ex) when (ex is not OperationCanceledException)
{
return result
.Fail($"Claude API error: {ex.Message}")
.WithEvidence("Claude provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Error", ex.GetType().Name);
})
.Build();
}
}
private static string TruncateError(string error, int maxLength = 200)
{
if (string.IsNullOrWhiteSpace(error))
{
return "(empty)";
}
if (error.Length <= maxLength)
{
return error;
}
return error[..maxLength] + "...";
}
}

View File

@@ -0,0 +1,151 @@
using Microsoft.Extensions.Configuration;
using StellaOps.Doctor.Models;
using StellaOps.Doctor.Plugins;
namespace StellaOps.Doctor.Plugins.AI.Checks;
/// <summary>
/// Validates LLM provider configuration for AdvisoryAI.
/// </summary>
public sealed class LlmProviderConfigurationCheck : IDoctorCheck
{
/// <inheritdoc />
public string CheckId => "check.ai.llm.config";
/// <inheritdoc />
public string Name => "LLM Configuration";
/// <inheritdoc />
public string Description => "Validates LLM provider configuration for AdvisoryAI";
/// <inheritdoc />
public DoctorSeverity DefaultSeverity => DoctorSeverity.Info;
/// <inheritdoc />
public IReadOnlyList<string> Tags => ["ai", "llm", "configuration", "advisoryai"];
/// <inheritdoc />
public TimeSpan EstimatedDuration => TimeSpan.FromMilliseconds(50);
/// <inheritdoc />
public bool CanRun(DoctorPluginContext context) => true;
/// <inheritdoc />
public Task<DoctorCheckResult> RunAsync(DoctorPluginContext context, CancellationToken ct)
{
var result = context.CreateResult(CheckId, "stellaops.doctor.ai", DoctorCategory.AI.ToString());
var aiEnabled = context.Configuration.GetValue<bool?>("AdvisoryAI:Enabled")
?? context.Configuration.GetValue<bool?>("AI:Enabled");
if (aiEnabled == false)
{
return Task.FromResult(result
.Info("AdvisoryAI is disabled")
.WithEvidence("AI configuration", e =>
{
e.Add("Enabled", "false");
e.Add("Note", "Enable AdvisoryAI:Enabled to use AI features");
})
.Build());
}
var defaultProvider = context.Configuration.GetValue<string>("AdvisoryAI:DefaultProvider")
?? context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:Default")
?? "claude";
var configuredProviders = new List<string>();
var issues = new List<string>();
// Check Claude configuration
var claudeApiKey = context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:Claude:ApiKey")
?? Environment.GetEnvironmentVariable("ANTHROPIC_API_KEY");
if (!string.IsNullOrWhiteSpace(claudeApiKey))
{
configuredProviders.Add("Claude");
}
// Check OpenAI configuration
var openaiApiKey = context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:OpenAI:ApiKey")
?? Environment.GetEnvironmentVariable("OPENAI_API_KEY");
if (!string.IsNullOrWhiteSpace(openaiApiKey))
{
configuredProviders.Add("OpenAI");
}
// Check Ollama configuration
var ollamaEndpoint = context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:Ollama:Endpoint")
?? "http://localhost:11434";
var ollamaEnabled = context.Configuration.GetValue<bool?>("AdvisoryAI:LlmProviders:Ollama:Enabled");
if (ollamaEnabled == true)
{
configuredProviders.Add("Ollama");
}
// Check Llama.cpp configuration
var llamaEndpoint = context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:LlamaCpp:Endpoint")
?? "http://localhost:8080";
var llamaEnabled = context.Configuration.GetValue<bool?>("AdvisoryAI:LlmProviders:LlamaCpp:Enabled");
if (llamaEnabled == true)
{
configuredProviders.Add("Llama.cpp");
}
// Validate default provider is configured
var defaultConfigured = defaultProvider.ToLowerInvariant() switch
{
"claude" => configuredProviders.Contains("Claude"),
"openai" => configuredProviders.Contains("OpenAI"),
"ollama" => configuredProviders.Contains("Ollama"),
"llamacpp" or "llama" => configuredProviders.Contains("Llama.cpp"),
_ => false
};
if (!defaultConfigured && configuredProviders.Count > 0)
{
issues.Add($"Default provider '{defaultProvider}' is not configured");
}
if (configuredProviders.Count == 0)
{
return Task.FromResult(result
.Info("No LLM providers configured")
.WithEvidence("AI configuration", e =>
{
e.Add("Enabled", aiEnabled?.ToString() ?? "(not set)");
e.Add("DefaultProvider", defaultProvider);
e.Add("ConfiguredProviders", "(none)");
e.Add("Recommendation", "Configure at least one LLM provider for AdvisoryAI");
})
.Build());
}
if (issues.Count > 0)
{
return Task.FromResult(result
.Warn($"{issues.Count} LLM configuration issue(s)")
.WithEvidence("AI configuration", e =>
{
e.Add("Enabled", aiEnabled?.ToString() ?? "true (default)");
e.Add("DefaultProvider", defaultProvider);
e.Add("ConfiguredProviders", string.Join(", ", configuredProviders));
})
.WithCauses(issues.ToArray())
.WithRemediation(r => r
.AddManualStep(1, "Set API key", "Configure API key for the default provider")
.AddManualStep(2, "Verify provider", "Ensure default provider matches a configured one"))
.WithVerification("stella doctor --check check.ai.llm.config")
.Build());
}
return Task.FromResult(result
.Pass($"{configuredProviders.Count} LLM provider(s) configured")
.WithEvidence("AI configuration", e =>
{
e.Add("Enabled", aiEnabled?.ToString() ?? "true (default)");
e.Add("DefaultProvider", defaultProvider);
e.Add("ConfiguredProviders", string.Join(", ", configuredProviders));
})
.Build());
}
}

View File

@@ -0,0 +1,184 @@
using System.Text.Json;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Doctor.Models;
using StellaOps.Doctor.Plugins;
namespace StellaOps.Doctor.Plugins.AI.Checks;
/// <summary>
/// Validates local inference server (Llama.cpp) connectivity.
/// </summary>
public sealed class LocalInferenceCheck : IDoctorCheck
{
private const string DefaultEndpoint = "http://localhost:8080";
/// <inheritdoc />
public string CheckId => "check.ai.provider.local";
/// <inheritdoc />
public string Name => "Local Inference";
/// <inheritdoc />
public string Description => "Validates local inference server (Llama.cpp) connectivity";
/// <inheritdoc />
public DoctorSeverity DefaultSeverity => DoctorSeverity.Info;
/// <inheritdoc />
public IReadOnlyList<string> Tags => ["ai", "llm", "llamacpp", "local", "advisoryai"];
/// <inheritdoc />
public TimeSpan EstimatedDuration => TimeSpan.FromSeconds(5);
/// <inheritdoc />
public bool CanRun(DoctorPluginContext context)
{
var llamaEnabled = context.Configuration.GetValue<bool?>("AdvisoryAI:LlmProviders:LlamaCpp:Enabled")
?? context.Configuration.GetValue<bool?>("AdvisoryAI:LlmProviders:Local:Enabled");
return llamaEnabled == true;
}
/// <inheritdoc />
public async Task<DoctorCheckResult> RunAsync(DoctorPluginContext context, CancellationToken ct)
{
var result = context.CreateResult(CheckId, "stellaops.doctor.ai", DoctorCategory.AI.ToString());
var endpoint = context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:LlamaCpp:Endpoint")
?? context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:Local:Endpoint")
?? DefaultEndpoint;
var modelPath = context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:LlamaCpp:ModelPath");
var httpClientFactory = context.Services.GetService<IHttpClientFactory>();
if (httpClientFactory == null)
{
return result
.Skip("HttpClientFactory not available")
.WithEvidence("Local inference", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Error", "IHttpClientFactory not registered");
})
.Build();
}
try
{
using var client = httpClientFactory.CreateClient();
client.Timeout = TimeSpan.FromSeconds(5);
// Check llama.cpp server health endpoint
using var healthResponse = await client.GetAsync($"{endpoint}/health", ct);
if (!healthResponse.IsSuccessStatusCode)
{
// Try alternative endpoints
using var altResponse = await client.GetAsync($"{endpoint}/", ct);
if (!altResponse.IsSuccessStatusCode)
{
return result
.Info("Local inference server not accessible")
.WithEvidence("Local inference", e =>
{
e.Add("Endpoint", endpoint);
e.Add("StatusCode", ((int)healthResponse.StatusCode).ToString());
e.Add("Recommendation", "Start llama.cpp server with: llama-server -m <model.gguf>");
})
.Build();
}
}
var healthContent = await healthResponse.Content.ReadAsStringAsync(ct);
string? serverStatus = null;
string? loadedModel = null;
try
{
using var doc = JsonDocument.Parse(healthContent);
if (doc.RootElement.TryGetProperty("status", out var statusProp))
{
serverStatus = statusProp.GetString();
}
if (doc.RootElement.TryGetProperty("model", out var modelProp))
{
loadedModel = modelProp.GetString();
}
}
catch
{
// Health response parsing failed, but server responded
serverStatus = "ok";
}
var issues = new List<string>();
// Check if model path is configured but file doesn't exist
if (!string.IsNullOrWhiteSpace(modelPath) && !File.Exists(modelPath))
{
issues.Add($"Configured model file not found: {modelPath}");
}
// Check server status
if (serverStatus?.Equals("error", StringComparison.OrdinalIgnoreCase) == true)
{
issues.Add("Server reported error status");
}
if (issues.Count > 0)
{
return result
.Warn($"{issues.Count} local inference issue(s)")
.WithEvidence("Local inference", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Status", serverStatus ?? "(unknown)");
e.Add("LoadedModel", loadedModel ?? "(none)");
e.Add("ConfiguredModelPath", modelPath ?? "(not set)");
})
.WithCauses(issues.ToArray())
.WithRemediation(r => r
.AddManualStep(1, "Load model", "Ensure a model is loaded in the server")
.AddManualStep(2, "Check model path", "Verify the model file exists at configured path"))
.WithVerification("stella doctor --check check.ai.provider.local")
.Build();
}
return result
.Pass("Local inference server is accessible")
.WithEvidence("Local inference", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Status", serverStatus ?? "ok");
e.Add("LoadedModel", loadedModel ?? "(default)");
e.Add("ConfiguredModelPath", modelPath ?? "(not set)");
})
.Build();
}
catch (HttpRequestException ex)
{
return result
.Info($"Local inference server not running: {ex.Message}")
.WithEvidence("Local inference", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Error", ex.Message);
e.Add("Recommendation", "Start llama.cpp server with: llama-server -m <model.gguf>");
})
.Build();
}
catch (Exception ex) when (ex is not OperationCanceledException)
{
return result
.Skip($"Local inference check error: {ex.Message}")
.WithEvidence("Local inference", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Error", ex.GetType().Name);
})
.Build();
}
}
}

View File

@@ -0,0 +1,202 @@
using System.Text.Json;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Doctor.Models;
using StellaOps.Doctor.Plugins;
namespace StellaOps.Doctor.Plugins.AI.Checks;
/// <summary>
/// Validates Ollama local LLM server connectivity.
/// </summary>
public sealed class OllamaProviderCheck : IDoctorCheck
{
private const string DefaultEndpoint = "http://localhost:11434";
private const string DefaultModel = "llama3:8b";
/// <inheritdoc />
public string CheckId => "check.ai.provider.ollama";
/// <inheritdoc />
public string Name => "Ollama Provider";
/// <inheritdoc />
public string Description => "Validates Ollama local LLM server connectivity";
/// <inheritdoc />
public DoctorSeverity DefaultSeverity => DoctorSeverity.Info;
/// <inheritdoc />
public IReadOnlyList<string> Tags => ["ai", "llm", "ollama", "local", "advisoryai"];
/// <inheritdoc />
public TimeSpan EstimatedDuration => TimeSpan.FromSeconds(5);
/// <inheritdoc />
public bool CanRun(DoctorPluginContext context)
{
var ollamaEnabled = context.Configuration.GetValue<bool?>("AdvisoryAI:LlmProviders:Ollama:Enabled");
return ollamaEnabled == true;
}
/// <inheritdoc />
public async Task<DoctorCheckResult> RunAsync(DoctorPluginContext context, CancellationToken ct)
{
var result = context.CreateResult(CheckId, "stellaops.doctor.ai", DoctorCategory.AI.ToString());
var endpoint = context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:Ollama:Endpoint")
?? DefaultEndpoint;
var model = context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:Ollama:Model")
?? DefaultModel;
var httpClientFactory = context.Services.GetService<IHttpClientFactory>();
if (httpClientFactory == null)
{
return result
.Skip("HttpClientFactory not available")
.WithEvidence("Ollama provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Error", "IHttpClientFactory not registered");
})
.Build();
}
try
{
using var client = httpClientFactory.CreateClient();
client.Timeout = TimeSpan.FromSeconds(5);
// Check Ollama version endpoint
using var versionResponse = await client.GetAsync($"{endpoint}/api/version", ct);
if (!versionResponse.IsSuccessStatusCode)
{
return result
.Warn("Ollama server not accessible")
.WithEvidence("Ollama provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("StatusCode", ((int)versionResponse.StatusCode).ToString());
})
.WithCauses("Ollama server is not running or endpoint is incorrect")
.WithRemediation(r => r
.AddManualStep(1, "Start Ollama", "Run: ollama serve")
.AddManualStep(2, "Check endpoint", $"Verify Ollama is running at {endpoint}"))
.WithVerification("stella doctor --check check.ai.provider.ollama")
.Build();
}
var versionContent = await versionResponse.Content.ReadAsStringAsync(ct);
string? ollamaVersion = null;
try
{
using var doc = JsonDocument.Parse(versionContent);
if (doc.RootElement.TryGetProperty("version", out var versionProp))
{
ollamaVersion = versionProp.GetString();
}
}
catch
{
// Version parsing failed, continue
}
// Check if the configured model is available
var issues = new List<string>();
var availableModels = new List<string>();
using var modelsResponse = await client.GetAsync($"{endpoint}/api/tags", ct);
if (modelsResponse.IsSuccessStatusCode)
{
var modelsContent = await modelsResponse.Content.ReadAsStringAsync(ct);
try
{
using var doc = JsonDocument.Parse(modelsContent);
if (doc.RootElement.TryGetProperty("models", out var modelsProp))
{
foreach (var modelElement in modelsProp.EnumerateArray())
{
if (modelElement.TryGetProperty("name", out var nameProp))
{
var modelName = nameProp.GetString();
if (!string.IsNullOrWhiteSpace(modelName))
{
availableModels.Add(modelName);
}
}
}
}
}
catch
{
// Model parsing failed
}
var modelConfigured = !string.IsNullOrWhiteSpace(model);
var modelAvailable = availableModels.Any(m =>
m.Equals(model, StringComparison.OrdinalIgnoreCase) ||
m.StartsWith(model.Split(':')[0], StringComparison.OrdinalIgnoreCase));
if (modelConfigured && !modelAvailable && availableModels.Count > 0)
{
issues.Add($"Configured model '{model}' not found - run: ollama pull {model}");
}
}
if (issues.Count > 0)
{
return result
.Warn($"{issues.Count} Ollama issue(s)")
.WithEvidence("Ollama provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Version", ollamaVersion ?? "(unknown)");
e.Add("ConfiguredModel", model);
e.Add("AvailableModels", availableModels.Count > 0 ? string.Join(", ", availableModels.Take(5)) : "(none)");
})
.WithCauses(issues.ToArray())
.WithRemediation(r => r
.AddManualStep(1, "Pull model", $"Run: ollama pull {model}")
.AddManualStep(2, "List models", "Run: ollama list"))
.WithVerification("stella doctor --check check.ai.provider.ollama")
.Build();
}
return result
.Pass("Ollama server is accessible")
.WithEvidence("Ollama provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Version", ollamaVersion ?? "(unknown)");
e.Add("ConfiguredModel", model);
e.Add("AvailableModels", availableModels.Count.ToString());
})
.Build();
}
catch (HttpRequestException ex)
{
return result
.Info($"Ollama server not running: {ex.Message}")
.WithEvidence("Ollama provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Error", ex.Message);
e.Add("Recommendation", "Start Ollama with: ollama serve");
})
.Build();
}
catch (Exception ex) when (ex is not OperationCanceledException)
{
return result
.Skip($"Ollama check error: {ex.Message}")
.WithEvidence("Ollama provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Error", ex.GetType().Name);
})
.Build();
}
}
}

View File

@@ -0,0 +1,188 @@
using System.Net.Http.Headers;
using System.Text;
using System.Text.Json;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Doctor.Models;
using StellaOps.Doctor.Plugins;
namespace StellaOps.Doctor.Plugins.AI.Checks;
/// <summary>
/// Validates OpenAI API connectivity.
/// </summary>
public sealed class OpenAiProviderCheck : IDoctorCheck
{
private const string DefaultModel = "gpt-4o";
private const string DefaultEndpoint = "https://api.openai.com";
/// <inheritdoc />
public string CheckId => "check.ai.provider.openai";
/// <inheritdoc />
public string Name => "OpenAI Provider";
/// <inheritdoc />
public string Description => "Validates OpenAI API connectivity and authentication";
/// <inheritdoc />
public DoctorSeverity DefaultSeverity => DoctorSeverity.Warn;
/// <inheritdoc />
public IReadOnlyList<string> Tags => ["ai", "llm", "openai", "gpt", "advisoryai"];
/// <inheritdoc />
public TimeSpan EstimatedDuration => TimeSpan.FromSeconds(10);
/// <inheritdoc />
public bool CanRun(DoctorPluginContext context)
{
var apiKey = context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:OpenAI:ApiKey")
?? Environment.GetEnvironmentVariable("OPENAI_API_KEY");
return !string.IsNullOrWhiteSpace(apiKey);
}
/// <inheritdoc />
public async Task<DoctorCheckResult> RunAsync(DoctorPluginContext context, CancellationToken ct)
{
var result = context.CreateResult(CheckId, "stellaops.doctor.ai", DoctorCategory.AI.ToString());
var apiKey = context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:OpenAI:ApiKey")
?? Environment.GetEnvironmentVariable("OPENAI_API_KEY");
var endpoint = context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:OpenAI:Endpoint")
?? DefaultEndpoint;
var model = context.Configuration.GetValue<string>("AdvisoryAI:LlmProviders:OpenAI:Model")
?? DefaultModel;
if (string.IsNullOrWhiteSpace(apiKey))
{
return result
.Skip("OpenAI API key not configured")
.WithEvidence("OpenAI provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("ApiKeyConfigured", "false");
})
.Build();
}
var httpClientFactory = context.Services.GetService<IHttpClientFactory>();
if (httpClientFactory == null)
{
return result
.Skip("HttpClientFactory not available")
.WithEvidence("OpenAI provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Error", "IHttpClientFactory not registered");
})
.Build();
}
try
{
using var client = httpClientFactory.CreateClient();
client.Timeout = TimeSpan.FromSeconds(10);
client.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Bearer", apiKey);
// List models to validate API key (lightweight call)
using var response = await client.GetAsync($"{endpoint}/v1/models", ct);
if (response.IsSuccessStatusCode)
{
return result
.Pass("OpenAI API is accessible")
.WithEvidence("OpenAI provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Model", model);
e.Add("ApiKeyConfigured", "true (masked)");
e.Add("StatusCode", ((int)response.StatusCode).ToString());
})
.Build();
}
var errorBody = await response.Content.ReadAsStringAsync(ct);
var statusCode = (int)response.StatusCode;
var issues = new List<string>();
if (statusCode == 401)
{
issues.Add("Invalid API key");
}
else if (statusCode == 403)
{
issues.Add("Access forbidden - check API key permissions");
}
else if (statusCode == 429)
{
issues.Add("Rate limited - too many requests");
}
else
{
issues.Add($"API returned status {statusCode}");
}
return result
.Warn($"OpenAI API issue: {response.StatusCode}")
.WithEvidence("OpenAI provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Model", model);
e.Add("StatusCode", statusCode.ToString());
e.Add("Error", TruncateError(errorBody));
})
.WithCauses(issues.ToArray())
.WithRemediation(r => r
.AddManualStep(1, "Verify API key", "Check OPENAI_API_KEY is valid")
.AddManualStep(2, "Check quotas", "Verify API usage limits on platform.openai.com"))
.WithVerification("stella doctor --check check.ai.provider.openai")
.Build();
}
catch (HttpRequestException ex)
{
return result
.Fail($"Cannot connect to OpenAI API: {ex.Message}")
.WithEvidence("OpenAI provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Error", ex.Message);
})
.WithCauses("Network connectivity issue or invalid endpoint")
.WithRemediation(r => r
.AddManualStep(1, "Check network", "Verify network connectivity to api.openai.com")
.AddManualStep(2, "Check proxy", "Ensure proxy settings are configured if required"))
.WithVerification("stella doctor --check check.ai.provider.openai")
.Build();
}
catch (Exception ex) when (ex is not OperationCanceledException)
{
return result
.Fail($"OpenAI API error: {ex.Message}")
.WithEvidence("OpenAI provider", e =>
{
e.Add("Endpoint", endpoint);
e.Add("Error", ex.GetType().Name);
})
.Build();
}
}
private static string TruncateError(string error, int maxLength = 200)
{
if (string.IsNullOrWhiteSpace(error))
{
return "(empty)";
}
if (error.Length <= maxLength)
{
return error;
}
return error[..maxLength] + "...";
}
}

View File

@@ -0,0 +1,21 @@
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Doctor.Plugins;
namespace StellaOps.Doctor.Plugins.AI.DependencyInjection;
/// <summary>
/// Extension methods for registering the AI diagnostics plugin.
/// </summary>
public static class AIPluginExtensions
{
/// <summary>
/// Adds the AI diagnostics plugin to the service collection.
/// </summary>
/// <param name="services">The service collection.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddDoctorAIPlugin(this IServiceCollection services)
{
services.AddSingleton<IDoctorPlugin, AIPlugin>();
return services;
}
}

View File

@@ -0,0 +1,21 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\StellaOps.Doctor\StellaOps.Doctor.csproj" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Configuration.Abstractions" />
<PackageReference Include="Microsoft.Extensions.Configuration.Binder" />
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" />
<PackageReference Include="Microsoft.Extensions.Http" />
</ItemGroup>
</Project>