95 lines
2.0 KiB
YAML
95 lines
2.0 KiB
YAML
# OpenAI LLM Provider Configuration
|
|
# Documentation: https://platform.openai.com/docs/api-reference
|
|
|
|
# Provider metadata
|
|
provider:
|
|
id: openai
|
|
name: OpenAI
|
|
description: OpenAI GPT models via API
|
|
|
|
# Enable/disable this provider
|
|
enabled: true
|
|
|
|
# Priority for provider selection (lower = higher priority)
|
|
# When multiple providers are available, the one with lowest priority is used
|
|
priority: 100
|
|
|
|
# API Configuration
|
|
api:
|
|
# API key (can also use OPENAI_API_KEY environment variable)
|
|
# Environment variables are expanded: ${OPENAI_API_KEY}
|
|
apiKey: "${OPENAI_API_KEY}"
|
|
|
|
# Base URL for API requests
|
|
# Default: https://api.openai.com/v1
|
|
# For Azure OpenAI, use: https://{resource}.openai.azure.com/openai/deployments/{deployment}
|
|
baseUrl: "https://api.openai.com/v1"
|
|
|
|
# Organization ID (optional)
|
|
organizationId: null
|
|
|
|
# API version (for Azure OpenAI)
|
|
apiVersion: null
|
|
|
|
# Model Configuration
|
|
model:
|
|
# Model to use for inference
|
|
# Options: gpt-4o, gpt-4o-mini, gpt-4-turbo, gpt-4, gpt-3.5-turbo
|
|
name: "gpt-4o"
|
|
|
|
# Fallback models if primary is unavailable
|
|
fallbacks:
|
|
- "gpt-4o-mini"
|
|
- "gpt-4-turbo"
|
|
|
|
# Inference Parameters
|
|
inference:
|
|
# Temperature (0 = deterministic, 1 = creative)
|
|
# For security analysis, use 0 for reproducibility
|
|
temperature: 0
|
|
|
|
# Maximum tokens to generate
|
|
maxTokens: 4096
|
|
|
|
# Random seed for reproducibility (when temperature=0)
|
|
seed: 42
|
|
|
|
# Top-p (nucleus sampling)
|
|
topP: 1.0
|
|
|
|
# Frequency penalty (-2.0 to 2.0)
|
|
frequencyPenalty: 0
|
|
|
|
# Presence penalty (-2.0 to 2.0)
|
|
presencePenalty: 0
|
|
|
|
# Stop sequences
|
|
stopSequences: []
|
|
|
|
# Request Configuration
|
|
request:
|
|
# Request timeout
|
|
timeout: "00:02:00"
|
|
|
|
# Maximum retries on failure
|
|
maxRetries: 3
|
|
|
|
# Retry delay (exponential backoff base)
|
|
retryDelay: "00:00:01"
|
|
|
|
# Rate Limiting
|
|
rateLimit:
|
|
# Requests per minute (0 = unlimited)
|
|
requestsPerMinute: 0
|
|
|
|
# Tokens per minute (0 = unlimited)
|
|
tokensPerMinute: 0
|
|
|
|
# Logging
|
|
logging:
|
|
# Log request/response bodies (WARNING: may contain sensitive data)
|
|
logBodies: false
|
|
|
|
# Log token usage
|
|
logUsage: true
|