Files
git.stella-ops.org/etc/llm-providers/claude.yaml
2025-12-26 18:11:06 +02:00

85 lines
1.7 KiB
YAML

# Claude (Anthropic) LLM Provider Configuration
# Documentation: https://docs.anthropic.com/en/api
# Provider metadata
provider:
id: claude
name: Claude
description: Anthropic Claude models via API
# Enable/disable this provider
enabled: true
# Priority for provider selection (lower = higher priority)
priority: 100
# API Configuration
api:
# API key (can also use ANTHROPIC_API_KEY environment variable)
apiKey: "${ANTHROPIC_API_KEY}"
# Base URL for API requests
baseUrl: "https://api.anthropic.com"
# API version header
apiVersion: "2023-06-01"
# Model Configuration
model:
# Model to use for inference
# Options: claude-sonnet-4-20250514, claude-opus-4-20250514, claude-3-5-haiku-20241022
name: "claude-sonnet-4-20250514"
# Fallback models if primary is unavailable
fallbacks:
- "claude-3-5-haiku-20241022"
# Inference Parameters
inference:
# Temperature (0 = deterministic, 1 = creative)
temperature: 0
# Maximum tokens to generate
maxTokens: 4096
# Top-p (nucleus sampling)
topP: 1.0
# Top-k sampling (0 = disabled)
topK: 0
# Stop sequences
stopSequences: []
# Request Configuration
request:
# Request timeout
timeout: "00:02:00"
# Maximum retries on failure
maxRetries: 3
# Retry delay
retryDelay: "00:00:01"
# Rate Limiting
rateLimit:
# Requests per minute (0 = unlimited)
requestsPerMinute: 0
# Tokens per minute (0 = unlimited)
tokensPerMinute: 0
# Extended Thinking (Claude 3.5+ feature)
thinking:
# Enable extended thinking for complex reasoning
enabled: false
# Budget tokens for thinking (when enabled)
budgetTokens: 10000
# Logging
logging:
logBodies: false
logUsage: true