|
1
|
"""LiteLLM universal proxy provider implementation.""" |
|
2
|
|
|
3
|
import base64 |
|
4
|
import logging |
|
5
|
from pathlib import Path |
|
6
|
from typing import Optional |
|
7
|
|
|
8
|
from dotenv import load_dotenv |
|
9
|
|
|
10
|
from video_processor.providers.base import BaseProvider, ModelInfo, ProviderRegistry |
|
11
|
|
|
12
|
load_dotenv() |
|
13
|
logger = logging.getLogger(__name__) |
|
14
|
|
|
15
|
|
|
16
|
class LiteLLMProvider(BaseProvider): |
|
17
|
"""LiteLLM universal proxy provider. |
|
18
|
|
|
19
|
LiteLLM supports 100+ LLM providers through a unified interface. |
|
20
|
It reads provider API keys from environment variables automatically |
|
21
|
(e.g. OPENAI_API_KEY, ANTHROPIC_API_KEY, etc.). |
|
22
|
""" |
|
23
|
|
|
24
|
provider_name = "litellm" |
|
25
|
|
|
26
|
def __init__(self): |
|
27
|
try: |
|
28
|
import litellm # noqa: F401 |
|
29
|
except ImportError: |
|
30
|
raise ImportError("litellm package not installed. Install with: pip install litellm") |
|
31
|
|
|
32
|
self._litellm = litellm |
|
33
|
self._last_usage = {} |
|
34
|
|
|
35
|
def chat( |
|
36
|
self, |
|
37
|
messages: list[dict], |
|
38
|
max_tokens: int = 4096, |
|
39
|
temperature: float = 0.7, |
|
40
|
model: Optional[str] = None, |
|
41
|
) -> str: |
|
42
|
if not model: |
|
43
|
raise ValueError( |
|
44
|
"LiteLLM requires an explicit model in provider/model format " |
|
45
|
"(e.g. 'openai/gpt-4o', 'anthropic/claude-3-sonnet-20240229')" |
|
46
|
) |
|
47
|
|
|
48
|
response = self._litellm.completion( |
|
49
|
model=model, |
|
50
|
messages=messages, |
|
51
|
max_tokens=max_tokens, |
|
52
|
temperature=temperature, |
|
53
|
) |
|
54
|
|
|
55
|
usage = getattr(response, "usage", None) |
|
56
|
self._last_usage = { |
|
57
|
"input_tokens": getattr(usage, "prompt_tokens", 0) if usage else 0, |
|
58
|
"output_tokens": getattr(usage, "completion_tokens", 0) if usage else 0, |
|
59
|
} |
|
60
|
return response.choices[0].message.content or "" |
|
61
|
|
|
62
|
def analyze_image( |
|
63
|
self, |
|
64
|
image_bytes: bytes, |
|
65
|
prompt: str, |
|
66
|
max_tokens: int = 4096, |
|
67
|
model: Optional[str] = None, |
|
68
|
) -> str: |
|
69
|
if not model: |
|
70
|
raise ValueError( |
|
71
|
"LiteLLM requires an explicit model for image analysis " |
|
72
|
"(e.g. 'openai/gpt-4o', 'anthropic/claude-3-sonnet-20240229')" |
|
73
|
) |
|
74
|
|
|
75
|
b64 = base64.b64encode(image_bytes).decode() |
|
76
|
|
|
77
|
response = self._litellm.completion( |
|
78
|
model=model, |
|
79
|
messages=[ |
|
80
|
{ |
|
81
|
"role": "user", |
|
82
|
"content": [ |
|
83
|
{"type": "text", "text": prompt}, |
|
84
|
{ |
|
85
|
"type": "image_url", |
|
86
|
"image_url": {"url": f"data:image/jpeg;base64,{b64}"}, |
|
87
|
}, |
|
88
|
], |
|
89
|
} |
|
90
|
], |
|
91
|
max_tokens=max_tokens, |
|
92
|
) |
|
93
|
|
|
94
|
usage = getattr(response, "usage", None) |
|
95
|
self._last_usage = { |
|
96
|
"input_tokens": getattr(usage, "prompt_tokens", 0) if usage else 0, |
|
97
|
"output_tokens": getattr(usage, "completion_tokens", 0) if usage else 0, |
|
98
|
} |
|
99
|
return response.choices[0].message.content or "" |
|
100
|
|
|
101
|
def transcribe_audio( |
|
102
|
self, |
|
103
|
audio_path: str | Path, |
|
104
|
language: Optional[str] = None, |
|
105
|
model: Optional[str] = None, |
|
106
|
) -> dict: |
|
107
|
model = model or "whisper-1" |
|
108
|
|
|
109
|
try: |
|
110
|
with open(audio_path, "rb") as f: |
|
111
|
response = self._litellm.transcription( |
|
112
|
model=model, |
|
113
|
file=f, |
|
114
|
language=language, |
|
115
|
) |
|
116
|
|
|
117
|
text = getattr(response, "text", str(response)) |
|
118
|
self._last_usage = { |
|
119
|
"input_tokens": 0, |
|
120
|
"output_tokens": 0, |
|
121
|
} |
|
122
|
|
|
123
|
return { |
|
124
|
"text": text, |
|
125
|
"segments": [], |
|
126
|
"language": language, |
|
127
|
"duration": None, |
|
128
|
"provider": "litellm", |
|
129
|
"model": model, |
|
130
|
} |
|
131
|
except Exception: |
|
132
|
raise NotImplementedError( |
|
133
|
"Audio transcription failed via LiteLLM. " |
|
134
|
"Ensure the underlying provider supports transcription." |
|
135
|
) |
|
136
|
|
|
137
|
def list_models(self) -> list[ModelInfo]: |
|
138
|
try: |
|
139
|
model_list = getattr(self._litellm, "model_list", None) |
|
140
|
if model_list: |
|
141
|
return [ |
|
142
|
ModelInfo( |
|
143
|
id=m if isinstance(m, str) else str(m), |
|
144
|
provider="litellm", |
|
145
|
display_name=m if isinstance(m, str) else str(m), |
|
146
|
capabilities=["chat"], |
|
147
|
) |
|
148
|
for m in model_list |
|
149
|
] |
|
150
|
except Exception as e: |
|
151
|
logger.warning(f"Failed to list LiteLLM models: {e}") |
|
152
|
return [] |
|
153
|
|
|
154
|
|
|
155
|
# Only register if litellm is importable |
|
156
|
try: |
|
157
|
import litellm # noqa: F401 |
|
158
|
|
|
159
|
ProviderRegistry.register( |
|
160
|
name="litellm", |
|
161
|
provider_class=LiteLLMProvider, |
|
162
|
env_var="", |
|
163
|
model_prefixes=[], |
|
164
|
default_models={ |
|
165
|
"chat": "", |
|
166
|
"vision": "", |
|
167
|
"audio": "", |
|
168
|
}, |
|
169
|
) |
|
170
|
except ImportError: |
|
171
|
pass |
|
172
|
|