|
1
|
"""Fireworks AI provider implementation.""" |
|
2
|
|
|
3
|
from video_processor.providers.base import OpenAICompatibleProvider, ProviderRegistry |
|
4
|
|
|
5
|
|
|
6
|
class FireworksProvider(OpenAICompatibleProvider): |
|
7
|
"""Fireworks AI API provider (OpenAI-compatible).""" |
|
8
|
|
|
9
|
provider_name = "fireworks" |
|
10
|
base_url = "https://api.fireworks.ai/inference/v1" |
|
11
|
env_var = "FIREWORKS_API_KEY" |
|
12
|
|
|
13
|
|
|
14
|
ProviderRegistry.register( |
|
15
|
name="fireworks", |
|
16
|
provider_class=FireworksProvider, |
|
17
|
env_var="FIREWORKS_API_KEY", |
|
18
|
model_prefixes=["accounts/fireworks/"], |
|
19
|
default_models={ |
|
20
|
"chat": "accounts/fireworks/models/llama-v3p1-70b-instruct", |
|
21
|
"vision": "", |
|
22
|
"audio": "", |
|
23
|
}, |
|
24
|
) |
|
25
|
|