PlanOpticon

planopticon / video_processor / providers / litellm_provider.py
Source Blame History 171 lines
0981a08… noreply 1 """LiteLLM universal proxy provider implementation."""
0981a08… noreply 2
0981a08… noreply 3 import base64
0981a08… noreply 4 import logging
0981a08… noreply 5 from pathlib import Path
0981a08… noreply 6 from typing import Optional
0981a08… noreply 7
0981a08… noreply 8 from dotenv import load_dotenv
0981a08… noreply 9
0981a08… noreply 10 from video_processor.providers.base import BaseProvider, ModelInfo, ProviderRegistry
0981a08… noreply 11
0981a08… noreply 12 load_dotenv()
0981a08… noreply 13 logger = logging.getLogger(__name__)
0981a08… noreply 14
0981a08… noreply 15
0981a08… noreply 16 class LiteLLMProvider(BaseProvider):
0981a08… noreply 17 """LiteLLM universal proxy provider.
0981a08… noreply 18
0981a08… noreply 19 LiteLLM supports 100+ LLM providers through a unified interface.
0981a08… noreply 20 It reads provider API keys from environment variables automatically
0981a08… noreply 21 (e.g. OPENAI_API_KEY, ANTHROPIC_API_KEY, etc.).
0981a08… noreply 22 """
0981a08… noreply 23
0981a08… noreply 24 provider_name = "litellm"
0981a08… noreply 25
0981a08… noreply 26 def __init__(self):
0981a08… noreply 27 try:
0981a08… noreply 28 import litellm # noqa: F401
0981a08… noreply 29 except ImportError:
0981a08… noreply 30 raise ImportError("litellm package not installed. Install with: pip install litellm")
0981a08… noreply 31
0981a08… noreply 32 self._litellm = litellm
0981a08… noreply 33 self._last_usage = {}
0981a08… noreply 34
0981a08… noreply 35 def chat(
0981a08… noreply 36 self,
0981a08… noreply 37 messages: list[dict],
0981a08… noreply 38 max_tokens: int = 4096,
0981a08… noreply 39 temperature: float = 0.7,
0981a08… noreply 40 model: Optional[str] = None,
0981a08… noreply 41 ) -> str:
0981a08… noreply 42 if not model:
0981a08… noreply 43 raise ValueError(
0981a08… noreply 44 "LiteLLM requires an explicit model in provider/model format "
0981a08… noreply 45 "(e.g. 'openai/gpt-4o', 'anthropic/claude-3-sonnet-20240229')"
0981a08… noreply 46 )
0981a08… noreply 47
0981a08… noreply 48 response = self._litellm.completion(
0981a08… noreply 49 model=model,
0981a08… noreply 50 messages=messages,
0981a08… noreply 51 max_tokens=max_tokens,
0981a08… noreply 52 temperature=temperature,
0981a08… noreply 53 )
0981a08… noreply 54
0981a08… noreply 55 usage = getattr(response, "usage", None)
0981a08… noreply 56 self._last_usage = {
0981a08… noreply 57 "input_tokens": getattr(usage, "prompt_tokens", 0) if usage else 0,
0981a08… noreply 58 "output_tokens": getattr(usage, "completion_tokens", 0) if usage else 0,
0981a08… noreply 59 }
0981a08… noreply 60 return response.choices[0].message.content or ""
0981a08… noreply 61
0981a08… noreply 62 def analyze_image(
0981a08… noreply 63 self,
0981a08… noreply 64 image_bytes: bytes,
0981a08… noreply 65 prompt: str,
0981a08… noreply 66 max_tokens: int = 4096,
0981a08… noreply 67 model: Optional[str] = None,
0981a08… noreply 68 ) -> str:
0981a08… noreply 69 if not model:
0981a08… noreply 70 raise ValueError(
0981a08… noreply 71 "LiteLLM requires an explicit model for image analysis "
0981a08… noreply 72 "(e.g. 'openai/gpt-4o', 'anthropic/claude-3-sonnet-20240229')"
0981a08… noreply 73 )
0981a08… noreply 74
0981a08… noreply 75 b64 = base64.b64encode(image_bytes).decode()
0981a08… noreply 76
0981a08… noreply 77 response = self._litellm.completion(
0981a08… noreply 78 model=model,
0981a08… noreply 79 messages=[
0981a08… noreply 80 {
0981a08… noreply 81 "role": "user",
0981a08… noreply 82 "content": [
0981a08… noreply 83 {"type": "text", "text": prompt},
0981a08… noreply 84 {
0981a08… noreply 85 "type": "image_url",
0981a08… noreply 86 "image_url": {"url": f"data:image/jpeg;base64,{b64}"},
0981a08… noreply 87 },
0981a08… noreply 88 ],
0981a08… noreply 89 }
0981a08… noreply 90 ],
0981a08… noreply 91 max_tokens=max_tokens,
0981a08… noreply 92 )
0981a08… noreply 93
0981a08… noreply 94 usage = getattr(response, "usage", None)
0981a08… noreply 95 self._last_usage = {
0981a08… noreply 96 "input_tokens": getattr(usage, "prompt_tokens", 0) if usage else 0,
0981a08… noreply 97 "output_tokens": getattr(usage, "completion_tokens", 0) if usage else 0,
0981a08… noreply 98 }
0981a08… noreply 99 return response.choices[0].message.content or ""
0981a08… noreply 100
0981a08… noreply 101 def transcribe_audio(
0981a08… noreply 102 self,
0981a08… noreply 103 audio_path: str | Path,
0981a08… noreply 104 language: Optional[str] = None,
0981a08… noreply 105 model: Optional[str] = None,
0981a08… noreply 106 ) -> dict:
0981a08… noreply 107 model = model or "whisper-1"
0981a08… noreply 108
0981a08… noreply 109 try:
0981a08… noreply 110 with open(audio_path, "rb") as f:
0981a08… noreply 111 response = self._litellm.transcription(
0981a08… noreply 112 model=model,
0981a08… noreply 113 file=f,
0981a08… noreply 114 language=language,
0981a08… noreply 115 )
0981a08… noreply 116
0981a08… noreply 117 text = getattr(response, "text", str(response))
0981a08… noreply 118 self._last_usage = {
0981a08… noreply 119 "input_tokens": 0,
0981a08… noreply 120 "output_tokens": 0,
0981a08… noreply 121 }
0981a08… noreply 122
0981a08… noreply 123 return {
0981a08… noreply 124 "text": text,
0981a08… noreply 125 "segments": [],
0981a08… noreply 126 "language": language,
0981a08… noreply 127 "duration": None,
0981a08… noreply 128 "provider": "litellm",
0981a08… noreply 129 "model": model,
0981a08… noreply 130 }
0981a08… noreply 131 except Exception:
0981a08… noreply 132 raise NotImplementedError(
0981a08… noreply 133 "Audio transcription failed via LiteLLM. "
0981a08… noreply 134 "Ensure the underlying provider supports transcription."
0981a08… noreply 135 )
0981a08… noreply 136
0981a08… noreply 137 def list_models(self) -> list[ModelInfo]:
0981a08… noreply 138 try:
0981a08… noreply 139 model_list = getattr(self._litellm, "model_list", None)
0981a08… noreply 140 if model_list:
0981a08… noreply 141 return [
0981a08… noreply 142 ModelInfo(
0981a08… noreply 143 id=m if isinstance(m, str) else str(m),
0981a08… noreply 144 provider="litellm",
0981a08… noreply 145 display_name=m if isinstance(m, str) else str(m),
0981a08… noreply 146 capabilities=["chat"],
0981a08… noreply 147 )
0981a08… noreply 148 for m in model_list
0981a08… noreply 149 ]
0981a08… noreply 150 except Exception as e:
0981a08… noreply 151 logger.warning(f"Failed to list LiteLLM models: {e}")
0981a08… noreply 152 return []
0981a08… noreply 153
0981a08… noreply 154
0981a08… noreply 155 # Only register if litellm is importable
0981a08… noreply 156 try:
0981a08… noreply 157 import litellm # noqa: F401
0981a08… noreply 158
0981a08… noreply 159 ProviderRegistry.register(
0981a08… noreply 160 name="litellm",
0981a08… noreply 161 provider_class=LiteLLMProvider,
0981a08… noreply 162 env_var="",
0981a08… noreply 163 model_prefixes=[],
0981a08… noreply 164 default_models={
0981a08… noreply 165 "chat": "",
0981a08… noreply 166 "vision": "",
0981a08… noreply 167 "audio": "",
0981a08… noreply 168 },
0981a08… noreply 169 )
0981a08… noreply 170 except ImportError:
0981a08… noreply 171 pass

Keyboard Shortcuts

Open search /
Next entry (timeline) j
Previous entry (timeline) k
Open focused entry Enter
Show this help ?
Toggle theme Top nav button