ScuttleBot

scuttlebot / internal / llm / config.go
Blame History Raw 45 lines
1
package llm
2
3
// BackendConfig holds configuration for a single LLM backend instance.
4
type BackendConfig struct {
5
// Backend is the provider type. Supported values:
6
//
7
// Native APIs:
8
// anthropic, gemini, bedrock, ollama
9
//
10
// OpenAI-compatible (Bearer token auth, /v1/models discovery):
11
// openai, openrouter, together, groq, fireworks, mistral, ai21,
12
// huggingface, deepseek, cerebras, xai,
13
// litellm, lmstudio, jan, localai, vllm, anythingllm
14
Backend string
15
16
// APIKey is the authentication key or token for cloud backends.
17
APIKey string
18
19
// BaseURL overrides the default base URL for OpenAI-compatible backends.
20
// For named backends (e.g. "openai"), this defaults from KnownBackends.
21
// Required for custom/self-hosted OpenAI-compatible endpoints.
22
BaseURL string
23
24
// Model is the model ID to use. If empty, the first discovered model
25
// that passes the allow/block filter is used.
26
Model string
27
28
// Region is the AWS region (e.g. "us-east-1"). Bedrock only.
29
Region string
30
31
// AWSKeyID is the AWS access key ID. Bedrock only.
32
AWSKeyID string
33
34
// AWSSecretKey is the AWS secret access key. Bedrock only.
35
AWSSecretKey string
36
37
// Allow is a list of regex patterns. If non-empty, only model IDs matching
38
// at least one pattern are returned by DiscoverModels.
39
Allow []string
40
41
// Block is a list of regex patterns. Model IDs matching any pattern are
42
// excluded from DiscoverModels results.
43
Block []string
44
}
45

Keyboard Shortcuts

Open search /
Next entry (timeline) j
Previous entry (timeline) k
Open focused entry Enter
Show this help ?
Toggle theme Top nav button