PlanOpticon

planopticon / video_processor / providers / ai21_provider.py
Blame History Raw 99 lines
1
"""AI21 Labs provider implementation."""
2
3
import logging
4
import os
5
from pathlib import Path
6
from typing import Optional
7
8
from dotenv import load_dotenv
9
10
from video_processor.providers.base import ModelInfo, OpenAICompatibleProvider, ProviderRegistry
11
12
load_dotenv()
13
logger = logging.getLogger(__name__)
14
15
# Curated list of AI21 models
16
_AI21_MODELS = [
17
ModelInfo(
18
id="jamba-1.5-large",
19
provider="ai21",
20
display_name="Jamba 1.5 Large",
21
capabilities=["chat"],
22
),
23
ModelInfo(
24
id="jamba-1.5-mini",
25
provider="ai21",
26
display_name="Jamba 1.5 Mini",
27
capabilities=["chat"],
28
),
29
ModelInfo(
30
id="jamba-instruct",
31
provider="ai21",
32
display_name="Jamba Instruct",
33
capabilities=["chat"],
34
),
35
]
36
37
38
class AI21Provider(OpenAICompatibleProvider):
39
"""AI21 Labs provider using OpenAI-compatible API."""
40
41
provider_name = "ai21"
42
base_url = "https://api.ai21.com/studio/v1"
43
env_var = "AI21_API_KEY"
44
45
def __init__(self, api_key: Optional[str] = None):
46
api_key = api_key or os.getenv("AI21_API_KEY")
47
if not api_key:
48
raise ValueError("AI21_API_KEY not set")
49
super().__init__(api_key=api_key, base_url=self.base_url)
50
51
def chat(
52
self,
53
messages: list[dict],
54
max_tokens: int = 4096,
55
temperature: float = 0.7,
56
model: Optional[str] = None,
57
) -> str:
58
model = model or "jamba-1.5-large"
59
return super().chat(messages, max_tokens, temperature, model)
60
61
def analyze_image(
62
self,
63
image_bytes: bytes,
64
prompt: str,
65
max_tokens: int = 4096,
66
model: Optional[str] = None,
67
) -> str:
68
raise NotImplementedError(
69
"AI21 does not currently support vision/image analysis. "
70
"Use OpenAI, Anthropic, or Gemini for image analysis."
71
)
72
73
def transcribe_audio(
74
self,
75
audio_path: str | Path,
76
language: Optional[str] = None,
77
model: Optional[str] = None,
78
) -> dict:
79
raise NotImplementedError(
80
"AI21 does not provide a transcription API. "
81
"Use OpenAI Whisper or Gemini for transcription."
82
)
83
84
def list_models(self) -> list[ModelInfo]:
85
return list(_AI21_MODELS)
86
87
88
ProviderRegistry.register(
89
name="ai21",
90
provider_class=AI21Provider,
91
env_var="AI21_API_KEY",
92
model_prefixes=["jamba-", "j2-"],
93
default_models={
94
"chat": "jamba-1.5-large",
95
"vision": "",
96
"audio": "",
97
},
98
)
99

Keyboard Shortcuts

Open search /
Next entry (timeline) j
Previous entry (timeline) k
Open focused entry Enter
Show this help ?
Toggle theme Top nav button