diff --git a/etc/llm-providers/dummy.yaml b/etc/llm-providers/dummy.yaml
new file mode 100644
index 000000000..0f87caa92
--- /dev/null
+++ b/etc/llm-providers/dummy.yaml
@@ -0,0 +1,17 @@
+# Dummy echo-reverse provider for testing.
+# Requires no external API — reverses user input as the "answer".
+# Priority 1 = highest priority, so it's selected as the default provider.
+
+enabled: true
+priority: 1
+
+model:
+ name: "dummy-echo-reverse"
+
+inference:
+ temperature: 0
+ maxTokens: 4096
+
+request:
+ timeout: "00:00:05"
+ maxRetries: 0
diff --git a/src/AdvisoryAI/StellaOps.AdvisoryAI/Inference/LlmProviders/DummyLlmProvider.cs b/src/AdvisoryAI/StellaOps.AdvisoryAI/Inference/LlmProviders/DummyLlmProvider.cs
new file mode 100644
index 000000000..16c4515e2
--- /dev/null
+++ b/src/AdvisoryAI/StellaOps.AdvisoryAI/Inference/LlmProviders/DummyLlmProvider.cs
@@ -0,0 +1,80 @@
+using Microsoft.Extensions.Configuration;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Logging;
+using Microsoft.Extensions.Logging.Abstractions;
+using System.Runtime.CompilerServices;
+
+namespace StellaOps.AdvisoryAI.Inference.LlmProviders;
+
+///
+/// Dummy LLM provider for testing. Reverses the user's question as the "answer"
+/// and streams it word by word to exercise the full SSE pipeline.
+///
+public sealed class DummyLlmProvider : ILlmProvider
+{
+ public string ProviderId => "dummy";
+
+ public Task IsAvailableAsync(CancellationToken cancellationToken = default)
+ => Task.FromResult(true);
+
+ public Task CompleteAsync(
+ LlmCompletionRequest request, CancellationToken cancellationToken = default)
+ {
+ var answer = BuildAnswer(request.UserPrompt);
+ return Task.FromResult(new LlmCompletionResult
+ {
+ Content = answer,
+ ModelId = "dummy-echo-reverse",
+ ProviderId = "dummy",
+ InputTokens = request.UserPrompt.Split(' ').Length,
+ OutputTokens = answer.Split(' ').Length,
+ FinishReason = "stop",
+ Deterministic = true,
+ });
+ }
+
+ public async IAsyncEnumerable CompleteStreamAsync(
+ LlmCompletionRequest request,
+ [EnumeratorCancellation] CancellationToken cancellationToken = default)
+ {
+ var answer = BuildAnswer(request.UserPrompt);
+ var words = answer.Split(' ');
+
+ foreach (var word in words)
+ {
+ cancellationToken.ThrowIfCancellationRequested();
+ await Task.Delay(60, cancellationToken); // simulate token-by-token streaming
+ yield return new LlmStreamChunk { Content = word + " ", IsFinal = false };
+ }
+
+ yield return new LlmStreamChunk { Content = "", IsFinal = true, FinishReason = "stop" };
+ }
+
+ public void Dispose() { }
+
+ private static string BuildAnswer(string userPrompt)
+ {
+ var reversed = new string(userPrompt.Reverse().ToArray());
+ return $"[Dummy echo-reverse provider] You asked: \"{userPrompt}\" — Reversed: \"{reversed}\"";
+ }
+}
+
+///
+/// Plugin registration for the dummy provider.
+///
+public sealed class DummyLlmProviderPlugin : ILlmProviderPlugin
+{
+ public string Name => "Dummy Echo-Reverse";
+ public string ProviderId => "dummy";
+ public string DisplayName => "Dummy Echo-Reverse";
+ public string Description => "Test provider that echoes and reverses the input. No external API needed.";
+ public string DefaultConfigFileName => "dummy.yaml";
+
+ public bool IsAvailable(IServiceProvider services) => true;
+
+ public LlmProviderConfigValidation ValidateConfiguration(IConfiguration configuration)
+ => LlmProviderConfigValidation.Success();
+
+ public ILlmProvider Create(IServiceProvider services, IConfiguration configuration)
+ => new DummyLlmProvider();
+}
diff --git a/src/AdvisoryAI/StellaOps.AdvisoryAI/Inference/LlmProviders/LlmProviderFactory.cs b/src/AdvisoryAI/StellaOps.AdvisoryAI/Inference/LlmProviders/LlmProviderFactory.cs
index 373e7795f..c2f65591e 100644
--- a/src/AdvisoryAI/StellaOps.AdvisoryAI/Inference/LlmProviders/LlmProviderFactory.cs
+++ b/src/AdvisoryAI/StellaOps.AdvisoryAI/Inference/LlmProviders/LlmProviderFactory.cs
@@ -135,6 +135,7 @@ public static class LlmProviderPluginExtensions
catalog.RegisterPlugin(new GeminiLlmProviderPlugin());
catalog.RegisterPlugin(new LlamaServerLlmProviderPlugin());
catalog.RegisterPlugin(new OllamaLlmProviderPlugin());
+ catalog.RegisterPlugin(new DummyLlmProviderPlugin());
// Load configurations from directory
var fullPath = Path.GetFullPath(configDirectory);
@@ -170,6 +171,7 @@ public static class LlmProviderPluginExtensions
catalog.RegisterPlugin(new GeminiLlmProviderPlugin());
catalog.RegisterPlugin(new LlamaServerLlmProviderPlugin());
catalog.RegisterPlugin(new OllamaLlmProviderPlugin());
+ catalog.RegisterPlugin(new DummyLlmProviderPlugin());
configureCatalog(catalog);
diff --git a/src/Concelier/StellaOps.Concelier.WebService/Extensions/SourceManagementEndpointExtensions.cs b/src/Concelier/StellaOps.Concelier.WebService/Extensions/SourceManagementEndpointExtensions.cs
index f9a92ae19..1274a8601 100644
--- a/src/Concelier/StellaOps.Concelier.WebService/Extensions/SourceManagementEndpointExtensions.cs
+++ b/src/Concelier/StellaOps.Concelier.WebService/Extensions/SourceManagementEndpointExtensions.cs
@@ -1,6 +1,7 @@
using HttpResults = Microsoft.AspNetCore.Http.Results;
using Microsoft.AspNetCore.Mvc;
using StellaOps.Auth.ServerIntegration.Tenancy;
+using StellaOps.Concelier.Core.Jobs;
using StellaOps.Concelier.Core.Sources;
namespace StellaOps.Concelier.WebService.Extensions;
@@ -220,6 +221,69 @@ internal static class SourceManagementEndpointExtensions
.Produces(StatusCodes.Status400BadRequest)
.RequireAuthorization(SourcesManagePolicy);
+ // POST /{sourceId}/sync — trigger data sync for a single source
+ group.MapPost("/{sourceId}/sync", async (
+ string sourceId,
+ [FromServices] ISourceRegistry registry,
+ [FromServices] IJobCoordinator coordinator,
+ CancellationToken cancellationToken) =>
+ {
+ var source = registry.GetSource(sourceId);
+ if (source is null)
+ {
+ return HttpResults.NotFound(new { error = "source_not_found", sourceId });
+ }
+
+ var fetchKind = $"source:{sourceId}:fetch";
+ var result = await coordinator.TriggerAsync(fetchKind, null, "manual", cancellationToken).ConfigureAwait(false);
+
+ return result.Outcome switch
+ {
+ JobTriggerOutcome.Accepted => HttpResults.Accepted(null as string, new { sourceId, jobKind = fetchKind, outcome = "accepted", runId = result.Run?.RunId }),
+ JobTriggerOutcome.AlreadyRunning => HttpResults.Conflict(new { sourceId, jobKind = fetchKind, outcome = "already_running" }),
+ JobTriggerOutcome.NotFound => HttpResults.Ok(new { sourceId, jobKind = fetchKind, outcome = "no_job_defined", message = $"No fetch job registered for source '{sourceId}'" }),
+ _ => HttpResults.UnprocessableEntity(new { sourceId, jobKind = fetchKind, outcome = result.Outcome.ToString().ToLowerInvariant(), error = result.ErrorMessage })
+ };
+ })
+ .WithName("SyncSource")
+ .WithSummary("Trigger data sync for a single advisory source")
+ .WithDescription("Immediately triggers the fetch job for the specified source. Returns 202 Accepted with the job run ID, or 409 Conflict if already running.")
+ .Produces(StatusCodes.Status202Accepted)
+ .Produces(StatusCodes.Status404NotFound)
+ .Produces(StatusCodes.Status409Conflict)
+ .RequireAuthorization(SourcesManagePolicy);
+
+ // POST /sync — trigger data sync for all enabled sources
+ group.MapPost("/sync", async (
+ [FromServices] ISourceRegistry registry,
+ [FromServices] IJobCoordinator coordinator,
+ CancellationToken cancellationToken) =>
+ {
+ var enabledIds = await registry.GetEnabledSourcesAsync(cancellationToken).ConfigureAwait(false);
+ var results = new List