PlanOpticon

feat(providers): add Azure, Together, Fireworks, Cerebras, xAI providers Add five new OpenAI-compatible providers using the OpenAICompatibleProvider base class and ProviderRegistry pattern. Each provider is in its own module with lazy registration via manager imports. CLI choices updated to include all new providers. Optional dependency groups added to pyproject.toml.

lmata 2026-03-07 21:53 trunk
Commit 9975609f4e776018579561783ce143532dd52217b1e9886c419f7dea9306dbdc
--- pyproject.toml
+++ pyproject.toml
@@ -54,10 +54,15 @@
5454
[project.optional-dependencies]
5555
pdf = ["weasyprint>=60.0"]
5656
gpu = ["torch>=2.0.0", "torchvision>=0.15.0"]
5757
gdrive = ["google-auth>=2.0.0", "google-auth-oauthlib>=1.0.0", "google-api-python-client>=2.0.0"]
5858
dropbox = ["dropbox>=12.0.0"]
59
+azure = ["openai>=1.0.0"]
60
+together = ["openai>=1.0.0"]
61
+fireworks = ["openai>=1.0.0"]
62
+cerebras = ["openai>=1.0.0"]
63
+xai = ["openai>=1.0.0"]
5964
graph = []
6065
cloud = [
6166
"planopticon[gdrive]",
6267
"planopticon[dropbox]",
6368
]
6469
--- pyproject.toml
+++ pyproject.toml
@@ -54,10 +54,15 @@
54 [project.optional-dependencies]
55 pdf = ["weasyprint>=60.0"]
56 gpu = ["torch>=2.0.0", "torchvision>=0.15.0"]
57 gdrive = ["google-auth>=2.0.0", "google-auth-oauthlib>=1.0.0", "google-api-python-client>=2.0.0"]
58 dropbox = ["dropbox>=12.0.0"]
 
 
 
 
 
59 graph = []
60 cloud = [
61 "planopticon[gdrive]",
62 "planopticon[dropbox]",
63 ]
64
--- pyproject.toml
+++ pyproject.toml
@@ -54,10 +54,15 @@
54 [project.optional-dependencies]
55 pdf = ["weasyprint>=60.0"]
56 gpu = ["torch>=2.0.0", "torchvision>=0.15.0"]
57 gdrive = ["google-auth>=2.0.0", "google-auth-oauthlib>=1.0.0", "google-api-python-client>=2.0.0"]
58 dropbox = ["dropbox>=12.0.0"]
59 azure = ["openai>=1.0.0"]
60 together = ["openai>=1.0.0"]
61 fireworks = ["openai>=1.0.0"]
62 cerebras = ["openai>=1.0.0"]
63 xai = ["openai>=1.0.0"]
64 graph = []
65 cloud = [
66 "planopticon[gdrive]",
67 "planopticon[dropbox]",
68 ]
69
--- video_processor/cli/commands.py
+++ video_processor/cli/commands.py
@@ -73,11 +73,24 @@
7373
)
7474
@click.option("--title", type=str, help="Title for the analysis report")
7575
@click.option(
7676
"--provider",
7777
"-p",
78
- type=click.Choice(["auto", "openai", "anthropic", "gemini", "ollama"]),
78
+ type=click.Choice(
79
+ [
80
+ "auto",
81
+ "openai",
82
+ "anthropic",
83
+ "gemini",
84
+ "ollama",
85
+ "azure",
86
+ "together",
87
+ "fireworks",
88
+ "cerebras",
89
+ "xai",
90
+ ]
91
+ ),
7992
default="auto",
8093
help="API provider",
8194
)
8295
@click.option("--vision-model", type=str, default=None, help="Override model for vision tasks")
8396
@click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks")
@@ -154,11 +167,24 @@
154167
)
155168
@click.option("--title", type=str, default="Batch Processing Results", help="Batch title")
156169
@click.option(
157170
"--provider",
158171
"-p",
159
- type=click.Choice(["auto", "openai", "anthropic", "gemini", "ollama"]),
172
+ type=click.Choice(
173
+ [
174
+ "auto",
175
+ "openai",
176
+ "anthropic",
177
+ "gemini",
178
+ "ollama",
179
+ "azure",
180
+ "together",
181
+ "fireworks",
182
+ "cerebras",
183
+ "xai",
184
+ ]
185
+ ),
160186
default="auto",
161187
help="API provider",
162188
)
163189
@click.option("--vision-model", type=str, default=None, help="Override model for vision tasks")
164190
@click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks")
@@ -424,11 +450,24 @@
424450
)
425451
@click.option("--title", type=str, help="Title for the analysis report")
426452
@click.option(
427453
"--provider",
428454
"-p",
429
- type=click.Choice(["auto", "openai", "anthropic", "gemini", "ollama"]),
455
+ type=click.Choice(
456
+ [
457
+ "auto",
458
+ "openai",
459
+ "anthropic",
460
+ "gemini",
461
+ "ollama",
462
+ "azure",
463
+ "together",
464
+ "fireworks",
465
+ "cerebras",
466
+ "xai",
467
+ ]
468
+ ),
430469
default="auto",
431470
help="API provider",
432471
)
433472
@click.option("--vision-model", type=str, default=None, help="Override model for vision tasks")
434473
@click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks")
@@ -491,11 +530,24 @@
491530
)
492531
@click.option("--interactive", "-I", is_flag=True, help="Enter interactive REPL mode")
493532
@click.option(
494533
"--provider",
495534
"-p",
496
- type=click.Choice(["auto", "openai", "anthropic", "gemini", "ollama"]),
535
+ type=click.Choice(
536
+ [
537
+ "auto",
538
+ "openai",
539
+ "anthropic",
540
+ "gemini",
541
+ "ollama",
542
+ "azure",
543
+ "together",
544
+ "fireworks",
545
+ "cerebras",
546
+ "xai",
547
+ ]
548
+ ),
497549
default="auto",
498550
help="API provider for agentic mode",
499551
)
500552
@click.option("--chat-model", type=str, default=None, help="Override model for agentic mode")
501553
@click.pass_context
@@ -856,11 +908,24 @@
856908
type=click.Choice(["basic", "standard", "comprehensive"]),
857909
default="standard",
858910
)
859911
provider = click.prompt(
860912
" Provider",
861
- type=click.Choice(["auto", "openai", "anthropic", "gemini", "ollama"]),
913
+ type=click.Choice(
914
+ [
915
+ "auto",
916
+ "openai",
917
+ "anthropic",
918
+ "gemini",
919
+ "ollama",
920
+ "azure",
921
+ "together",
922
+ "fireworks",
923
+ "cerebras",
924
+ "xai",
925
+ ]
926
+ ),
862927
default="auto",
863928
)
864929
ctx.invoke(
865930
analyze,
866931
input=input_path,
@@ -885,11 +950,24 @@
885950
type=click.Choice(["basic", "standard", "comprehensive"]),
886951
default="standard",
887952
)
888953
provider = click.prompt(
889954
" Provider",
890
- type=click.Choice(["auto", "openai", "anthropic", "gemini", "ollama"]),
955
+ type=click.Choice(
956
+ [
957
+ "auto",
958
+ "openai",
959
+ "anthropic",
960
+ "gemini",
961
+ "ollama",
962
+ "azure",
963
+ "together",
964
+ "fireworks",
965
+ "cerebras",
966
+ "xai",
967
+ ]
968
+ ),
891969
default="auto",
892970
)
893971
ctx.invoke(
894972
batch,
895973
input_dir=input_dir,
896974
--- video_processor/cli/commands.py
+++ video_processor/cli/commands.py
@@ -73,11 +73,24 @@
73 )
74 @click.option("--title", type=str, help="Title for the analysis report")
75 @click.option(
76 "--provider",
77 "-p",
78 type=click.Choice(["auto", "openai", "anthropic", "gemini", "ollama"]),
 
 
 
 
 
 
 
 
 
 
 
 
 
79 default="auto",
80 help="API provider",
81 )
82 @click.option("--vision-model", type=str, default=None, help="Override model for vision tasks")
83 @click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks")
@@ -154,11 +167,24 @@
154 )
155 @click.option("--title", type=str, default="Batch Processing Results", help="Batch title")
156 @click.option(
157 "--provider",
158 "-p",
159 type=click.Choice(["auto", "openai", "anthropic", "gemini", "ollama"]),
 
 
 
 
 
 
 
 
 
 
 
 
 
160 default="auto",
161 help="API provider",
162 )
163 @click.option("--vision-model", type=str, default=None, help="Override model for vision tasks")
164 @click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks")
@@ -424,11 +450,24 @@
424 )
425 @click.option("--title", type=str, help="Title for the analysis report")
426 @click.option(
427 "--provider",
428 "-p",
429 type=click.Choice(["auto", "openai", "anthropic", "gemini", "ollama"]),
 
 
 
 
 
 
 
 
 
 
 
 
 
430 default="auto",
431 help="API provider",
432 )
433 @click.option("--vision-model", type=str, default=None, help="Override model for vision tasks")
434 @click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks")
@@ -491,11 +530,24 @@
491 )
492 @click.option("--interactive", "-I", is_flag=True, help="Enter interactive REPL mode")
493 @click.option(
494 "--provider",
495 "-p",
496 type=click.Choice(["auto", "openai", "anthropic", "gemini", "ollama"]),
 
 
 
 
 
 
 
 
 
 
 
 
 
497 default="auto",
498 help="API provider for agentic mode",
499 )
500 @click.option("--chat-model", type=str, default=None, help="Override model for agentic mode")
501 @click.pass_context
@@ -856,11 +908,24 @@
856 type=click.Choice(["basic", "standard", "comprehensive"]),
857 default="standard",
858 )
859 provider = click.prompt(
860 " Provider",
861 type=click.Choice(["auto", "openai", "anthropic", "gemini", "ollama"]),
 
 
 
 
 
 
 
 
 
 
 
 
 
862 default="auto",
863 )
864 ctx.invoke(
865 analyze,
866 input=input_path,
@@ -885,11 +950,24 @@
885 type=click.Choice(["basic", "standard", "comprehensive"]),
886 default="standard",
887 )
888 provider = click.prompt(
889 " Provider",
890 type=click.Choice(["auto", "openai", "anthropic", "gemini", "ollama"]),
 
 
 
 
 
 
 
 
 
 
 
 
 
891 default="auto",
892 )
893 ctx.invoke(
894 batch,
895 input_dir=input_dir,
896
--- video_processor/cli/commands.py
+++ video_processor/cli/commands.py
@@ -73,11 +73,24 @@
73 )
74 @click.option("--title", type=str, help="Title for the analysis report")
75 @click.option(
76 "--provider",
77 "-p",
78 type=click.Choice(
79 [
80 "auto",
81 "openai",
82 "anthropic",
83 "gemini",
84 "ollama",
85 "azure",
86 "together",
87 "fireworks",
88 "cerebras",
89 "xai",
90 ]
91 ),
92 default="auto",
93 help="API provider",
94 )
95 @click.option("--vision-model", type=str, default=None, help="Override model for vision tasks")
96 @click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks")
@@ -154,11 +167,24 @@
167 )
168 @click.option("--title", type=str, default="Batch Processing Results", help="Batch title")
169 @click.option(
170 "--provider",
171 "-p",
172 type=click.Choice(
173 [
174 "auto",
175 "openai",
176 "anthropic",
177 "gemini",
178 "ollama",
179 "azure",
180 "together",
181 "fireworks",
182 "cerebras",
183 "xai",
184 ]
185 ),
186 default="auto",
187 help="API provider",
188 )
189 @click.option("--vision-model", type=str, default=None, help="Override model for vision tasks")
190 @click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks")
@@ -424,11 +450,24 @@
450 )
451 @click.option("--title", type=str, help="Title for the analysis report")
452 @click.option(
453 "--provider",
454 "-p",
455 type=click.Choice(
456 [
457 "auto",
458 "openai",
459 "anthropic",
460 "gemini",
461 "ollama",
462 "azure",
463 "together",
464 "fireworks",
465 "cerebras",
466 "xai",
467 ]
468 ),
469 default="auto",
470 help="API provider",
471 )
472 @click.option("--vision-model", type=str, default=None, help="Override model for vision tasks")
473 @click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks")
@@ -491,11 +530,24 @@
530 )
531 @click.option("--interactive", "-I", is_flag=True, help="Enter interactive REPL mode")
532 @click.option(
533 "--provider",
534 "-p",
535 type=click.Choice(
536 [
537 "auto",
538 "openai",
539 "anthropic",
540 "gemini",
541 "ollama",
542 "azure",
543 "together",
544 "fireworks",
545 "cerebras",
546 "xai",
547 ]
548 ),
549 default="auto",
550 help="API provider for agentic mode",
551 )
552 @click.option("--chat-model", type=str, default=None, help="Override model for agentic mode")
553 @click.pass_context
@@ -856,11 +908,24 @@
908 type=click.Choice(["basic", "standard", "comprehensive"]),
909 default="standard",
910 )
911 provider = click.prompt(
912 " Provider",
913 type=click.Choice(
914 [
915 "auto",
916 "openai",
917 "anthropic",
918 "gemini",
919 "ollama",
920 "azure",
921 "together",
922 "fireworks",
923 "cerebras",
924 "xai",
925 ]
926 ),
927 default="auto",
928 )
929 ctx.invoke(
930 analyze,
931 input=input_path,
@@ -885,11 +950,24 @@
950 type=click.Choice(["basic", "standard", "comprehensive"]),
951 default="standard",
952 )
953 provider = click.prompt(
954 " Provider",
955 type=click.Choice(
956 [
957 "auto",
958 "openai",
959 "anthropic",
960 "gemini",
961 "ollama",
962 "azure",
963 "together",
964 "fireworks",
965 "cerebras",
966 "xai",
967 ]
968 ),
969 default="auto",
970 )
971 ctx.invoke(
972 batch,
973 input_dir=input_dir,
974
--- video_processor/providers/__init__.py
+++ video_processor/providers/__init__.py
@@ -12,6 +12,29 @@
1212
"BaseProvider",
1313
"ModelInfo",
1414
"OpenAICompatibleProvider",
1515
"ProviderManager",
1616
"ProviderRegistry",
17
+ # OpenAI-compatible providers (lazy-loaded via manager)
18
+ "AzureOpenAIProvider",
19
+ "CerebrasProvider",
20
+ "FireworksProvider",
21
+ "TogetherProvider",
22
+ "XAIProvider",
1723
]
24
+
25
+
26
+def __getattr__(name: str):
27
+ """Lazy import provider classes to avoid import-time side effects."""
28
+ _lazy_imports = {
29
+ "AzureOpenAIProvider": "video_processor.providers.azure_provider",
30
+ "CerebrasProvider": "video_processor.providers.cerebras_provider",
31
+ "FireworksProvider": "video_processor.providers.fireworks_provider",
32
+ "TogetherProvider": "video_processor.providers.together_provider",
33
+ "XAIProvider": "video_processor.providers.xai_provider",
34
+ }
35
+ if name in _lazy_imports:
36
+ import importlib
37
+
38
+ mod = importlib.import_module(_lazy_imports[name])
39
+ return getattr(mod, name)
40
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
1841
1942
ADDED video_processor/providers/azure_provider.py
2043
ADDED video_processor/providers/cerebras_provider.py
2144
ADDED video_processor/providers/fireworks_provider.py
--- video_processor/providers/__init__.py
+++ video_processor/providers/__init__.py
@@ -12,6 +12,29 @@
12 "BaseProvider",
13 "ModelInfo",
14 "OpenAICompatibleProvider",
15 "ProviderManager",
16 "ProviderRegistry",
 
 
 
 
 
 
17 ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
19 DDED video_processor/providers/azure_provider.py
20 DDED video_processor/providers/cerebras_provider.py
21 DDED video_processor/providers/fireworks_provider.py
--- video_processor/providers/__init__.py
+++ video_processor/providers/__init__.py
@@ -12,6 +12,29 @@
12 "BaseProvider",
13 "ModelInfo",
14 "OpenAICompatibleProvider",
15 "ProviderManager",
16 "ProviderRegistry",
17 # OpenAI-compatible providers (lazy-loaded via manager)
18 "AzureOpenAIProvider",
19 "CerebrasProvider",
20 "FireworksProvider",
21 "TogetherProvider",
22 "XAIProvider",
23 ]
24
25
26 def __getattr__(name: str):
27 """Lazy import provider classes to avoid import-time side effects."""
28 _lazy_imports = {
29 "AzureOpenAIProvider": "video_processor.providers.azure_provider",
30 "CerebrasProvider": "video_processor.providers.cerebras_provider",
31 "FireworksProvider": "video_processor.providers.fireworks_provider",
32 "TogetherProvider": "video_processor.providers.together_provider",
33 "XAIProvider": "video_processor.providers.xai_provider",
34 }
35 if name in _lazy_imports:
36 import importlib
37
38 mod = importlib.import_module(_lazy_imports[name])
39 return getattr(mod, name)
40 raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
41
42 DDED video_processor/providers/azure_provider.py
43 DDED video_processor/providers/cerebras_provider.py
44 DDED video_processor/providers/fireworks_provider.py
--- a/video_processor/providers/azure_provider.py
+++ b/video_processor/providers/azure_provider.py
@@ -0,0 +1,38 @@
1
+"""Azure OpenAI provider implementation."""
2
+
3
+import os
4
+
5
+from video_processor.providers.base import OpenAICompatibleProvider, ProviderRegistry
6
+
7
+
8
+class AzureOpenAIProvider(OpenAICompatibleProvider):
9
+ """Azure OpenAI API provider.
10
+
11
+ Uses the AzureOpenAI client which requires an endpoint and API version
12
+ in addition to the API key.
13
+ """
14
+
15
+ provider_name = "azure"
16
+ env_var = "AZURE_OPENAI_API_KEY"
17
+
18
+ def __init__(self, api_key=None, endpoint=None, api_version=None):
19
+ from openai import AzureOpenAI
20
+
21
+ self._api_key = api_key or os.getenv("AZURE_OPENAI_API_KEY", "")
22
+ endpoint = endpoint or os.getenv("AZURE_OPENAI_ENDPOINT", "")
23
+ api_version = api_version or os.getenv("AZURE_OPENAI_API_VERSION", "2024-02-15-preview")
24
+ self._client = AzureOpenAI(
25
+ api_key=self._api_key,
26
+ azure_endpoint=endpoint,
27
+ api_version=api_version,
28
+ )
29
+ self._last_usage = None
30
+
31
+
32
+ProviderRegistry.register(
33
+ name="azure",
34
+ provider_class=AzureOpenAIProvider,
35
+ env_var="AZURE_OPENAI_API_KEY",
36
+ model_prefixes=[], # Azure uses deployment names, not standard prefixes
37
+ default_models={"chat": "", "vision": "", "audio": ""},
38
+)
--- a/video_processor/providers/azure_provider.py
+++ b/video_processor/providers/azure_provider.py
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
--- a/video_processor/providers/azure_provider.py
+++ b/video_processor/providers/azure_provider.py
@@ -0,0 +1,38 @@
1 """Azure OpenAI provider implementation."""
2
3 import os
4
5 from video_processor.providers.base import OpenAICompatibleProvider, ProviderRegistry
6
7
8 class AzureOpenAIProvider(OpenAICompatibleProvider):
9 """Azure OpenAI API provider.
10
11 Uses the AzureOpenAI client which requires an endpoint and API version
12 in addition to the API key.
13 """
14
15 provider_name = "azure"
16 env_var = "AZURE_OPENAI_API_KEY"
17
18 def __init__(self, api_key=None, endpoint=None, api_version=None):
19 from openai import AzureOpenAI
20
21 self._api_key = api_key or os.getenv("AZURE_OPENAI_API_KEY", "")
22 endpoint = endpoint or os.getenv("AZURE_OPENAI_ENDPOINT", "")
23 api_version = api_version or os.getenv("AZURE_OPENAI_API_VERSION", "2024-02-15-preview")
24 self._client = AzureOpenAI(
25 api_key=self._api_key,
26 azure_endpoint=endpoint,
27 api_version=api_version,
28 )
29 self._last_usage = None
30
31
32 ProviderRegistry.register(
33 name="azure",
34 provider_class=AzureOpenAIProvider,
35 env_var="AZURE_OPENAI_API_KEY",
36 model_prefixes=[], # Azure uses deployment names, not standard prefixes
37 default_models={"chat": "", "vision": "", "audio": ""},
38 )
--- a/video_processor/providers/cerebras_provider.py
+++ b/video_processor/providers/cerebras_provider.py
@@ -0,0 +1,20 @@
1
+"""Cerebras provider implementation."""
2
+
3
+from video_processor.providers.base import OpenAICompatibleProvider, ProviderRegistry
4
+
5
+
6
+class CerebrasProvider(OpenAICompatibleProvider):
7
+ """Cerebras AI API provider (OpenAI-compatible)."""
8
+
9
+ provider_name = "cerebras"
10
+ base_url = "https://api.cerebras.ai/v1"
11
+ env_var = "CEREBRAS_API_KEY"
12
+
13
+
14
+ProviderRegistry.register(
15
+ name="cerebras",
16
+ provider_class=CerebrasProvider,
17
+ env_var="CEREBRAS_API_KEY",
18
+ model_prefixes=["cerebras/"],
19
+ default_models={"chat": "llama3.1-70b", "vision": "", "audio": ""},
20
+)
--- a/video_processor/providers/cerebras_provider.py
+++ b/video_processor/providers/cerebras_provider.py
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
--- a/video_processor/providers/cerebras_provider.py
+++ b/video_processor/providers/cerebras_provider.py
@@ -0,0 +1,20 @@
1 """Cerebras provider implementation."""
2
3 from video_processor.providers.base import OpenAICompatibleProvider, ProviderRegistry
4
5
6 class CerebrasProvider(OpenAICompatibleProvider):
7 """Cerebras AI API provider (OpenAI-compatible)."""
8
9 provider_name = "cerebras"
10 base_url = "https://api.cerebras.ai/v1"
11 env_var = "CEREBRAS_API_KEY"
12
13
14 ProviderRegistry.register(
15 name="cerebras",
16 provider_class=CerebrasProvider,
17 env_var="CEREBRAS_API_KEY",
18 model_prefixes=["cerebras/"],
19 default_models={"chat": "llama3.1-70b", "vision": "", "audio": ""},
20 )
--- a/video_processor/providers/fireworks_provider.py
+++ b/video_processor/providers/fireworks_provider.py
@@ -0,0 +1,24 @@
1
+"""Fireworks AI provider implementation."""
2
+
3
+from video_processor.providers.base import OpenAICompatibleProvider, ProviderRegistry
4
+
5
+
6
+class FireworksProvider(OpenAICompatibleProvider):
7
+ """Fireworks AI API provider (OpenAI-compatible)."""
8
+
9
+ provider_name = "fireworks"
10
+ base_url = "https://api.fireworks.ai/inference/v1"
11
+ env_var = "FIREWORKS_API_KEY"
12
+
13
+
14
+ProviderRegistry.register(
15
+ name="fireworks",
16
+ provider_class=FireworksProvider,
17
+ env_var="FIREWORKS_API_KEY",
18
+ model_prefixes=["accounts/fireworks/"],
19
+ default_models={
20
+ "chat": "accounts/fireworks/models/llama-v3p1-70b-instruct",
21
+ "vision": "",
22
+ "audio": "",
23
+ },
24
+)
--- a/video_processor/providers/fireworks_provider.py
+++ b/video_processor/providers/fireworks_provider.py
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
--- a/video_processor/providers/fireworks_provider.py
+++ b/video_processor/providers/fireworks_provider.py
@@ -0,0 +1,24 @@
1 """Fireworks AI provider implementation."""
2
3 from video_processor.providers.base import OpenAICompatibleProvider, ProviderRegistry
4
5
6 class FireworksProvider(OpenAICompatibleProvider):
7 """Fireworks AI API provider (OpenAI-compatible)."""
8
9 provider_name = "fireworks"
10 base_url = "https://api.fireworks.ai/inference/v1"
11 env_var = "FIREWORKS_API_KEY"
12
13
14 ProviderRegistry.register(
15 name="fireworks",
16 provider_class=FireworksProvider,
17 env_var="FIREWORKS_API_KEY",
18 model_prefixes=["accounts/fireworks/"],
19 default_models={
20 "chat": "accounts/fireworks/models/llama-v3p1-70b-instruct",
21 "vision": "",
22 "audio": "",
23 },
24 )
--- video_processor/providers/manager.py
+++ video_processor/providers/manager.py
@@ -18,13 +18,18 @@
1818
"""Import all built-in provider modules so they register themselves."""
1919
if ProviderRegistry.all_registered():
2020
return
2121
# Each module registers itself on import via ProviderRegistry.register()
2222
import video_processor.providers.anthropic_provider # noqa: F401
23
+ import video_processor.providers.azure_provider # noqa: F401
24
+ import video_processor.providers.cerebras_provider # noqa: F401
25
+ import video_processor.providers.fireworks_provider # noqa: F401
2326
import video_processor.providers.gemini_provider # noqa: F401
2427
import video_processor.providers.ollama_provider # noqa: F401
2528
import video_processor.providers.openai_provider # noqa: F401
29
+ import video_processor.providers.together_provider # noqa: F401
30
+ import video_processor.providers.xai_provider # noqa: F401
2631
2732
2833
# Default model preference rankings (tried in order)
2934
_VISION_PREFERENCES = [
3035
("gemini", "gemini-2.5-flash"),
3136
3237
ADDED video_processor/providers/together_provider.py
3338
ADDED video_processor/providers/xai_provider.py
--- video_processor/providers/manager.py
+++ video_processor/providers/manager.py
@@ -18,13 +18,18 @@
18 """Import all built-in provider modules so they register themselves."""
19 if ProviderRegistry.all_registered():
20 return
21 # Each module registers itself on import via ProviderRegistry.register()
22 import video_processor.providers.anthropic_provider # noqa: F401
 
 
 
23 import video_processor.providers.gemini_provider # noqa: F401
24 import video_processor.providers.ollama_provider # noqa: F401
25 import video_processor.providers.openai_provider # noqa: F401
 
 
26
27
28 # Default model preference rankings (tried in order)
29 _VISION_PREFERENCES = [
30 ("gemini", "gemini-2.5-flash"),
31
32 DDED video_processor/providers/together_provider.py
33 DDED video_processor/providers/xai_provider.py
--- video_processor/providers/manager.py
+++ video_processor/providers/manager.py
@@ -18,13 +18,18 @@
18 """Import all built-in provider modules so they register themselves."""
19 if ProviderRegistry.all_registered():
20 return
21 # Each module registers itself on import via ProviderRegistry.register()
22 import video_processor.providers.anthropic_provider # noqa: F401
23 import video_processor.providers.azure_provider # noqa: F401
24 import video_processor.providers.cerebras_provider # noqa: F401
25 import video_processor.providers.fireworks_provider # noqa: F401
26 import video_processor.providers.gemini_provider # noqa: F401
27 import video_processor.providers.ollama_provider # noqa: F401
28 import video_processor.providers.openai_provider # noqa: F401
29 import video_processor.providers.together_provider # noqa: F401
30 import video_processor.providers.xai_provider # noqa: F401
31
32
33 # Default model preference rankings (tried in order)
34 _VISION_PREFERENCES = [
35 ("gemini", "gemini-2.5-flash"),
36
37 DDED video_processor/providers/together_provider.py
38 DDED video_processor/providers/xai_provider.py
--- a/video_processor/providers/together_provider.py
+++ b/video_processor/providers/together_provider.py
@@ -0,0 +1,20 @@
1
+"""Together AI provider implementation."""
2
+
3
+from video_processor.providers.base import OpenAICompatibleProvider, ProviderRegistry
4
+
5
+
6
+class TogetherProvider(OpenAICompatibleProvider):
7
+ """Together AI API provider (OpenAI-compatible)."""
8
+
9
+ provider_name = "together"
10
+ base_url = "https://api.together.xyz/v1"
11
+ env_var = "TOGETHER_API_KEY"
12
+
13
+
14
+ProviderRegistry.register(
15
+ name="together",
16
+ provider_class=TogetherProvider,
17
+ env_var="TOGETHER_API_KEY",
18
+ model_prefixes=["together/", "meta-llama/", "mistralai/", "Qwen/"],
19
+ default_models={"chat": "meta-llama/Llama-3-70b-chat-hf", "vision": "", "audio": ""},
20
+)
--- a/video_processor/providers/together_provider.py
+++ b/video_processor/providers/together_provider.py
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
--- a/video_processor/providers/together_provider.py
+++ b/video_processor/providers/together_provider.py
@@ -0,0 +1,20 @@
1 """Together AI provider implementation."""
2
3 from video_processor.providers.base import OpenAICompatibleProvider, ProviderRegistry
4
5
6 class TogetherProvider(OpenAICompatibleProvider):
7 """Together AI API provider (OpenAI-compatible)."""
8
9 provider_name = "together"
10 base_url = "https://api.together.xyz/v1"
11 env_var = "TOGETHER_API_KEY"
12
13
14 ProviderRegistry.register(
15 name="together",
16 provider_class=TogetherProvider,
17 env_var="TOGETHER_API_KEY",
18 model_prefixes=["together/", "meta-llama/", "mistralai/", "Qwen/"],
19 default_models={"chat": "meta-llama/Llama-3-70b-chat-hf", "vision": "", "audio": ""},
20 )
--- a/video_processor/providers/xai_provider.py
+++ b/video_processor/providers/xai_provider.py
@@ -0,0 +1,20 @@
1
+"""xAI (Grok) provider implementation."""
2
+
3
+from video_processor.providers.base import OpenAICompatibleProvider, ProviderRegistry
4
+
5
+
6
+class XAIProvider(OpenAICompatibleProvider):
7
+ """xAI API provider (OpenAI-compatible)."""
8
+
9
+ provider_name = "xai"
10
+ base_url = "https://api.x.ai/v1"
11
+ env_var = "XAI_API_KEY"
12
+
13
+
14
+ProviderRegistry.register(
15
+ name="xai",
16
+ provider_class=XAIProvider,
17
+ env_var="XAI_API_KEY",
18
+ model_prefixes=["grok-"],
19
+ default_models={"chat": "grok-2", "vision": "grok-2-vision", "audio": ""},
20
+)
--- a/video_processor/providers/xai_provider.py
+++ b/video_processor/providers/xai_provider.py
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
--- a/video_processor/providers/xai_provider.py
+++ b/video_processor/providers/xai_provider.py
@@ -0,0 +1,20 @@
1 """xAI (Grok) provider implementation."""
2
3 from video_processor.providers.base import OpenAICompatibleProvider, ProviderRegistry
4
5
6 class XAIProvider(OpenAICompatibleProvider):
7 """xAI API provider (OpenAI-compatible)."""
8
9 provider_name = "xai"
10 base_url = "https://api.x.ai/v1"
11 env_var = "XAI_API_KEY"
12
13
14 ProviderRegistry.register(
15 name="xai",
16 provider_class=XAIProvider,
17 env_var="XAI_API_KEY",
18 model_prefixes=["grok-"],
19 default_models={"chat": "grok-2", "vision": "grok-2-vision", "audio": ""},
20 )

Keyboard Shortcuts

Open search /
Next entry (timeline) j
Previous entry (timeline) k
Open focused entry Enter
Show this help ?
Toggle theme Top nav button