|
a94205b…
|
leo
|
1 |
"""Tests for the provider abstraction layer.""" |
|
a94205b…
|
leo
|
2 |
|
|
0981a08…
|
noreply
|
3 |
import importlib |
|
a94205b…
|
leo
|
4 |
from unittest.mock import MagicMock, patch |
|
a94205b…
|
leo
|
5 |
|
|
a0146a5…
|
noreply
|
6 |
import pytest |
|
a0146a5…
|
noreply
|
7 |
|
|
0981a08…
|
noreply
|
8 |
from video_processor.providers.base import ( |
|
0981a08…
|
noreply
|
9 |
BaseProvider, |
|
0981a08…
|
noreply
|
10 |
ModelInfo, |
|
0981a08…
|
noreply
|
11 |
OpenAICompatibleProvider, |
|
0981a08…
|
noreply
|
12 |
ProviderRegistry, |
|
0981a08…
|
noreply
|
13 |
) |
|
a94205b…
|
leo
|
14 |
from video_processor.providers.manager import ProviderManager |
|
0981a08…
|
noreply
|
15 |
|
|
0981a08…
|
noreply
|
16 |
# --------------------------------------------------------------------------- |
|
0981a08…
|
noreply
|
17 |
# ModelInfo |
|
0981a08…
|
noreply
|
18 |
# --------------------------------------------------------------------------- |
|
a94205b…
|
leo
|
19 |
|
|
a94205b…
|
leo
|
20 |
|
|
a94205b…
|
leo
|
21 |
class TestModelInfo: |
|
a94205b…
|
leo
|
22 |
def test_basic(self): |
|
a94205b…
|
leo
|
23 |
m = ModelInfo(id="gpt-4o", provider="openai", capabilities=["chat", "vision"]) |
|
a94205b…
|
leo
|
24 |
assert m.id == "gpt-4o" |
|
a94205b…
|
leo
|
25 |
assert "vision" in m.capabilities |
|
a94205b…
|
leo
|
26 |
|
|
a94205b…
|
leo
|
27 |
def test_round_trip(self): |
|
829e24a…
|
leo
|
28 |
m = ModelInfo( |
|
829e24a…
|
leo
|
29 |
id="claude-sonnet-4-5-20250929", |
|
829e24a…
|
leo
|
30 |
provider="anthropic", |
|
829e24a…
|
leo
|
31 |
display_name="Claude Sonnet", |
|
829e24a…
|
leo
|
32 |
capabilities=["chat", "vision"], |
|
829e24a…
|
leo
|
33 |
) |
|
a94205b…
|
leo
|
34 |
restored = ModelInfo.model_validate_json(m.model_dump_json()) |
|
a94205b…
|
leo
|
35 |
assert restored == m |
|
829e24a…
|
leo
|
36 |
|
|
0981a08…
|
noreply
|
37 |
def test_defaults(self): |
|
0981a08…
|
noreply
|
38 |
m = ModelInfo(id="x", provider="y") |
|
0981a08…
|
noreply
|
39 |
assert m.display_name == "" |
|
0981a08…
|
noreply
|
40 |
assert m.capabilities == [] |
|
0981a08…
|
noreply
|
41 |
|
|
0981a08…
|
noreply
|
42 |
|
|
0981a08…
|
noreply
|
43 |
# --------------------------------------------------------------------------- |
|
0981a08…
|
noreply
|
44 |
# ProviderRegistry |
|
0981a08…
|
noreply
|
45 |
# --------------------------------------------------------------------------- |
|
0981a08…
|
noreply
|
46 |
|
|
0981a08…
|
noreply
|
47 |
|
|
0981a08…
|
noreply
|
48 |
class TestProviderRegistry: |
|
0981a08…
|
noreply
|
49 |
"""Test ProviderRegistry class methods. |
|
0981a08…
|
noreply
|
50 |
|
|
0981a08…
|
noreply
|
51 |
We save and restore the internal _providers dict around each test so that |
|
0981a08…
|
noreply
|
52 |
registrations from one test don't leak into another. |
|
0981a08…
|
noreply
|
53 |
""" |
|
0981a08…
|
noreply
|
54 |
|
|
0981a08…
|
noreply
|
55 |
@pytest.fixture(autouse=True) |
|
0981a08…
|
noreply
|
56 |
def _save_restore_registry(self): |
|
0981a08…
|
noreply
|
57 |
original = dict(ProviderRegistry._providers) |
|
0981a08…
|
noreply
|
58 |
yield |
|
0981a08…
|
noreply
|
59 |
ProviderRegistry._providers = original |
|
0981a08…
|
noreply
|
60 |
|
|
0981a08…
|
noreply
|
61 |
def test_register_and_get(self): |
|
0981a08…
|
noreply
|
62 |
dummy_cls = type("Dummy", (), {}) |
|
0981a08…
|
noreply
|
63 |
ProviderRegistry.register("test_prov", dummy_cls, env_var="TEST_KEY") |
|
0981a08…
|
noreply
|
64 |
assert ProviderRegistry.get("test_prov") is dummy_cls |
|
0981a08…
|
noreply
|
65 |
|
|
0981a08…
|
noreply
|
66 |
def test_get_unknown_raises(self): |
|
0981a08…
|
noreply
|
67 |
with pytest.raises(ValueError, match="Unknown provider"): |
|
0981a08…
|
noreply
|
68 |
ProviderRegistry.get("nonexistent_provider_xyz") |
|
0981a08…
|
noreply
|
69 |
|
|
0981a08…
|
noreply
|
70 |
def test_get_by_model_prefix(self): |
|
0981a08…
|
noreply
|
71 |
dummy_cls = type("Dummy", (), {}) |
|
0981a08…
|
noreply
|
72 |
ProviderRegistry.register("myprov", dummy_cls, model_prefixes=["mymodel-"]) |
|
0981a08…
|
noreply
|
73 |
assert ProviderRegistry.get_by_model("mymodel-7b") == "myprov" |
|
0981a08…
|
noreply
|
74 |
assert ProviderRegistry.get_by_model("othermodel-7b") is None |
|
0981a08…
|
noreply
|
75 |
|
|
0981a08…
|
noreply
|
76 |
def test_get_by_model_returns_none_for_no_match(self): |
|
0981a08…
|
noreply
|
77 |
assert ProviderRegistry.get_by_model("totally_unknown_model_xyz") is None |
|
0981a08…
|
noreply
|
78 |
|
|
0981a08…
|
noreply
|
79 |
def test_available_with_env_var(self): |
|
0981a08…
|
noreply
|
80 |
dummy_cls = type("Dummy", (), {}) |
|
0981a08…
|
noreply
|
81 |
ProviderRegistry.register("envprov", dummy_cls, env_var="ENVPROV_KEY") |
|
0981a08…
|
noreply
|
82 |
# Not in env -> should not appear |
|
0981a08…
|
noreply
|
83 |
with patch.dict("os.environ", {}, clear=True): |
|
0981a08…
|
noreply
|
84 |
avail = ProviderRegistry.available() |
|
0981a08…
|
noreply
|
85 |
assert "envprov" not in avail |
|
0981a08…
|
noreply
|
86 |
|
|
0981a08…
|
noreply
|
87 |
# In env -> should appear |
|
0981a08…
|
noreply
|
88 |
with patch.dict("os.environ", {"ENVPROV_KEY": "secret"}): |
|
0981a08…
|
noreply
|
89 |
avail = ProviderRegistry.available() |
|
0981a08…
|
noreply
|
90 |
assert "envprov" in avail |
|
0981a08…
|
noreply
|
91 |
|
|
0981a08…
|
noreply
|
92 |
def test_available_no_env_var_required(self): |
|
0981a08…
|
noreply
|
93 |
dummy_cls = type("Dummy", (), {}) |
|
0981a08…
|
noreply
|
94 |
ProviderRegistry.register("noenvprov", dummy_cls, env_var="") |
|
0981a08…
|
noreply
|
95 |
avail = ProviderRegistry.available() |
|
0981a08…
|
noreply
|
96 |
assert "noenvprov" in avail |
|
0981a08…
|
noreply
|
97 |
|
|
0981a08…
|
noreply
|
98 |
def test_all_registered(self): |
|
0981a08…
|
noreply
|
99 |
dummy_cls = type("Dummy", (), {}) |
|
0981a08…
|
noreply
|
100 |
ProviderRegistry.register("regprov", dummy_cls, env_var="X", default_models={"chat": "m1"}) |
|
0981a08…
|
noreply
|
101 |
all_reg = ProviderRegistry.all_registered() |
|
0981a08…
|
noreply
|
102 |
assert "regprov" in all_reg |
|
0981a08…
|
noreply
|
103 |
assert all_reg["regprov"]["class"] is dummy_cls |
|
0981a08…
|
noreply
|
104 |
|
|
0981a08…
|
noreply
|
105 |
def test_get_default_models(self): |
|
0981a08…
|
noreply
|
106 |
dummy_cls = type("Dummy", (), {}) |
|
0981a08…
|
noreply
|
107 |
ProviderRegistry.register( |
|
0981a08…
|
noreply
|
108 |
"defprov", dummy_cls, default_models={"chat": "c1", "vision": "v1"} |
|
0981a08…
|
noreply
|
109 |
) |
|
0981a08…
|
noreply
|
110 |
defaults = ProviderRegistry.get_default_models("defprov") |
|
0981a08…
|
noreply
|
111 |
assert defaults == {"chat": "c1", "vision": "v1"} |
|
0981a08…
|
noreply
|
112 |
|
|
0981a08…
|
noreply
|
113 |
def test_get_default_models_unknown(self): |
|
0981a08…
|
noreply
|
114 |
assert ProviderRegistry.get_default_models("unknown_prov_xyz") == {} |
|
0981a08…
|
noreply
|
115 |
|
|
0981a08…
|
noreply
|
116 |
|
|
0981a08…
|
noreply
|
117 |
# --------------------------------------------------------------------------- |
|
0981a08…
|
noreply
|
118 |
# ProviderManager |
|
0981a08…
|
noreply
|
119 |
# --------------------------------------------------------------------------- |
|
0981a08…
|
noreply
|
120 |
|
|
a94205b…
|
leo
|
121 |
|
|
a94205b…
|
leo
|
122 |
class TestProviderManager: |
|
a94205b…
|
leo
|
123 |
def _make_mock_provider(self, name="openai"): |
|
a94205b…
|
leo
|
124 |
provider = MagicMock(spec=BaseProvider) |
|
a94205b…
|
leo
|
125 |
provider.provider_name = name |
|
a94205b…
|
leo
|
126 |
provider.chat.return_value = "test response" |
|
a94205b…
|
leo
|
127 |
provider.analyze_image.return_value = "image analysis" |
|
a94205b…
|
leo
|
128 |
provider.transcribe_audio.return_value = { |
|
a94205b…
|
leo
|
129 |
"text": "hello world", |
|
a94205b…
|
leo
|
130 |
"segments": [], |
|
a94205b…
|
leo
|
131 |
"provider": name, |
|
a94205b…
|
leo
|
132 |
"model": "test", |
|
a94205b…
|
leo
|
133 |
} |
|
a94205b…
|
leo
|
134 |
return provider |
|
a94205b…
|
leo
|
135 |
|
|
a94205b…
|
leo
|
136 |
def test_init_with_explicit_models(self): |
|
a94205b…
|
leo
|
137 |
mgr = ProviderManager( |
|
a94205b…
|
leo
|
138 |
vision_model="gpt-4o", |
|
a94205b…
|
leo
|
139 |
chat_model="claude-sonnet-4-5-20250929", |
|
a94205b…
|
leo
|
140 |
transcription_model="whisper-1", |
|
a94205b…
|
leo
|
141 |
) |
|
a94205b…
|
leo
|
142 |
assert mgr.vision_model == "gpt-4o" |
|
a94205b…
|
leo
|
143 |
assert mgr.chat_model == "claude-sonnet-4-5-20250929" |
|
a94205b…
|
leo
|
144 |
assert mgr.transcription_model == "whisper-1" |
|
a94205b…
|
leo
|
145 |
|
|
a94205b…
|
leo
|
146 |
def test_init_forced_provider(self): |
|
a94205b…
|
leo
|
147 |
mgr = ProviderManager(provider="gemini") |
|
a94205b…
|
leo
|
148 |
assert mgr.vision_model == "gemini-2.5-flash" |
|
a94205b…
|
leo
|
149 |
assert mgr.chat_model == "gemini-2.5-flash" |
|
a94205b…
|
leo
|
150 |
assert mgr.transcription_model == "gemini-2.5-flash" |
|
a94205b…
|
leo
|
151 |
|
|
0981a08…
|
noreply
|
152 |
def test_init_forced_provider_ollama(self): |
|
0981a08…
|
noreply
|
153 |
mgr = ProviderManager(provider="ollama") |
|
0981a08…
|
noreply
|
154 |
assert mgr.vision_model == "" |
|
0981a08…
|
noreply
|
155 |
assert mgr.chat_model == "" |
|
0981a08…
|
noreply
|
156 |
assert mgr.transcription_model == "" |
|
0981a08…
|
noreply
|
157 |
|
|
0981a08…
|
noreply
|
158 |
def test_init_no_overrides(self): |
|
0981a08…
|
noreply
|
159 |
mgr = ProviderManager() |
|
0981a08…
|
noreply
|
160 |
assert mgr.vision_model is None |
|
0981a08…
|
noreply
|
161 |
assert mgr.chat_model is None |
|
0981a08…
|
noreply
|
162 |
assert mgr.transcription_model is None |
|
0981a08…
|
noreply
|
163 |
assert mgr.auto is True |
|
0981a08…
|
noreply
|
164 |
|
|
0981a08…
|
noreply
|
165 |
def test_default_for_provider_gemini(self): |
|
0981a08…
|
noreply
|
166 |
result = ProviderManager._default_for_provider("gemini", "vision") |
|
0981a08…
|
noreply
|
167 |
assert result == "gemini-2.5-flash" |
|
0981a08…
|
noreply
|
168 |
|
|
0981a08…
|
noreply
|
169 |
def test_default_for_provider_openai(self): |
|
0981a08…
|
noreply
|
170 |
result = ProviderManager._default_for_provider("openai", "chat") |
|
0981a08…
|
noreply
|
171 |
assert isinstance(result, str) |
|
0981a08…
|
noreply
|
172 |
assert len(result) > 0 |
|
0981a08…
|
noreply
|
173 |
|
|
0981a08…
|
noreply
|
174 |
def test_default_for_provider_unknown(self): |
|
0981a08…
|
noreply
|
175 |
result = ProviderManager._default_for_provider("nonexistent_xyz", "chat") |
|
0981a08…
|
noreply
|
176 |
assert result == "" |
|
0981a08…
|
noreply
|
177 |
|
|
a94205b…
|
leo
|
178 |
def test_provider_for_model(self): |
|
a94205b…
|
leo
|
179 |
mgr = ProviderManager() |
|
a94205b…
|
leo
|
180 |
assert mgr._provider_for_model("gpt-4o") == "openai" |
|
a94205b…
|
leo
|
181 |
assert mgr._provider_for_model("claude-sonnet-4-5-20250929") == "anthropic" |
|
a94205b…
|
leo
|
182 |
assert mgr._provider_for_model("gemini-2.5-flash") == "gemini" |
|
a94205b…
|
leo
|
183 |
assert mgr._provider_for_model("whisper-1") == "openai" |
|
0981a08…
|
noreply
|
184 |
|
|
0981a08…
|
noreply
|
185 |
def test_provider_for_model_ollama_via_discovery(self): |
|
0981a08…
|
noreply
|
186 |
mgr = ProviderManager() |
|
0981a08…
|
noreply
|
187 |
mgr._available_models = [ |
|
0981a08…
|
noreply
|
188 |
ModelInfo(id="llama3.2:latest", provider="ollama", capabilities=["chat"]), |
|
0981a08…
|
noreply
|
189 |
] |
|
0981a08…
|
noreply
|
190 |
assert mgr._provider_for_model("llama3.2:latest") == "ollama" |
|
0981a08…
|
noreply
|
191 |
|
|
0981a08…
|
noreply
|
192 |
def test_provider_for_model_ollama_fuzzy_tag(self): |
|
0981a08…
|
noreply
|
193 |
mgr = ProviderManager() |
|
0981a08…
|
noreply
|
194 |
mgr._available_models = [ |
|
0981a08…
|
noreply
|
195 |
ModelInfo(id="llama3.2:latest", provider="ollama", capabilities=["chat"]), |
|
0981a08…
|
noreply
|
196 |
] |
|
0981a08…
|
noreply
|
197 |
assert mgr._provider_for_model("llama3.2") == "ollama" |
|
a94205b…
|
leo
|
198 |
|
|
a94205b…
|
leo
|
199 |
@patch.dict("os.environ", {"OPENAI_API_KEY": "test-key"}) |
|
a94205b…
|
leo
|
200 |
def test_chat_routes_to_provider(self): |
|
a94205b…
|
leo
|
201 |
mgr = ProviderManager(chat_model="gpt-4o") |
|
a94205b…
|
leo
|
202 |
mock_prov = self._make_mock_provider("openai") |
|
a94205b…
|
leo
|
203 |
mgr._providers["openai"] = mock_prov |
|
a94205b…
|
leo
|
204 |
|
|
a94205b…
|
leo
|
205 |
result = mgr.chat([{"role": "user", "content": "hello"}]) |
|
a94205b…
|
leo
|
206 |
assert result == "test response" |
|
a94205b…
|
leo
|
207 |
mock_prov.chat.assert_called_once() |
|
a94205b…
|
leo
|
208 |
|
|
a94205b…
|
leo
|
209 |
@patch.dict("os.environ", {"OPENAI_API_KEY": "test-key"}) |
|
a94205b…
|
leo
|
210 |
def test_analyze_image_routes(self): |
|
a94205b…
|
leo
|
211 |
mgr = ProviderManager(vision_model="gpt-4o") |
|
a94205b…
|
leo
|
212 |
mock_prov = self._make_mock_provider("openai") |
|
a94205b…
|
leo
|
213 |
mgr._providers["openai"] = mock_prov |
|
a94205b…
|
leo
|
214 |
|
|
a94205b…
|
leo
|
215 |
result = mgr.analyze_image(b"fake-image", "describe this") |
|
a94205b…
|
leo
|
216 |
assert result == "image analysis" |
|
a94205b…
|
leo
|
217 |
mock_prov.analyze_image.assert_called_once() |
|
a94205b…
|
leo
|
218 |
|
|
a94205b…
|
leo
|
219 |
@patch.dict("os.environ", {"OPENAI_API_KEY": "test-key"}) |
|
a94205b…
|
leo
|
220 |
def test_transcribe_routes(self): |
|
a94205b…
|
leo
|
221 |
mgr = ProviderManager(transcription_model="whisper-1") |
|
a94205b…
|
leo
|
222 |
mock_prov = self._make_mock_provider("openai") |
|
a94205b…
|
leo
|
223 |
mgr._providers["openai"] = mock_prov |
|
a94205b…
|
leo
|
224 |
|
|
a94205b…
|
leo
|
225 |
result = mgr.transcribe_audio("/tmp/test.wav") |
|
a94205b…
|
leo
|
226 |
assert result["text"] == "hello world" |
|
a94205b…
|
leo
|
227 |
mock_prov.transcribe_audio.assert_called_once() |
|
a94205b…
|
leo
|
228 |
|
|
a94205b…
|
leo
|
229 |
def test_get_models_used(self): |
|
a94205b…
|
leo
|
230 |
mgr = ProviderManager( |
|
a94205b…
|
leo
|
231 |
vision_model="gpt-4o", |
|
a94205b…
|
leo
|
232 |
chat_model="claude-sonnet-4-5-20250929", |
|
a94205b…
|
leo
|
233 |
transcription_model="whisper-1", |
|
a94205b…
|
leo
|
234 |
) |
|
a94205b…
|
leo
|
235 |
for name in ["openai", "anthropic"]: |
|
a94205b…
|
leo
|
236 |
mgr._providers[name] = self._make_mock_provider(name) |
|
a94205b…
|
leo
|
237 |
|
|
a94205b…
|
leo
|
238 |
used = mgr.get_models_used() |
|
a94205b…
|
leo
|
239 |
assert "vision" in used |
|
0981a08…
|
noreply
|
240 |
assert used["vision"] == "openai/gpt-4o" |
|
0981a08…
|
noreply
|
241 |
assert used["chat"] == "anthropic/claude-sonnet-4-5-20250929" |
|
0981a08…
|
noreply
|
242 |
|
|
0981a08…
|
noreply
|
243 |
def test_track_records_usage(self): |
|
0981a08…
|
noreply
|
244 |
mgr = ProviderManager(chat_model="gpt-4o") |
|
0981a08…
|
noreply
|
245 |
mock_prov = self._make_mock_provider("openai") |
|
0981a08…
|
noreply
|
246 |
mock_prov._last_usage = {"input_tokens": 10, "output_tokens": 20} |
|
0981a08…
|
noreply
|
247 |
mgr._providers["openai"] = mock_prov |
|
0981a08…
|
noreply
|
248 |
|
|
0981a08…
|
noreply
|
249 |
mgr.chat([{"role": "user", "content": "hi"}]) |
|
0981a08…
|
noreply
|
250 |
assert mgr.usage.total_input_tokens == 10 |
|
0981a08…
|
noreply
|
251 |
assert mgr.usage.total_output_tokens == 20 |
|
0981a08…
|
noreply
|
252 |
|
|
0981a08…
|
noreply
|
253 |
|
|
0981a08…
|
noreply
|
254 |
# --------------------------------------------------------------------------- |
|
0981a08…
|
noreply
|
255 |
# OpenAICompatibleProvider |
|
0981a08…
|
noreply
|
256 |
# --------------------------------------------------------------------------- |
|
0981a08…
|
noreply
|
257 |
|
|
0981a08…
|
noreply
|
258 |
|
|
0981a08…
|
noreply
|
259 |
class TestOpenAICompatibleProvider: |
|
0981a08…
|
noreply
|
260 |
@patch("openai.OpenAI") |
|
0981a08…
|
noreply
|
261 |
def test_chat(self, mock_openai_cls): |
|
0981a08…
|
noreply
|
262 |
mock_client = MagicMock() |
|
0981a08…
|
noreply
|
263 |
mock_openai_cls.return_value = mock_client |
|
0981a08…
|
noreply
|
264 |
|
|
0981a08…
|
noreply
|
265 |
mock_choice = MagicMock() |
|
0981a08…
|
noreply
|
266 |
mock_choice.message.content = "hello back" |
|
0981a08…
|
noreply
|
267 |
mock_response = MagicMock() |
|
0981a08…
|
noreply
|
268 |
mock_response.choices = [mock_choice] |
|
0981a08…
|
noreply
|
269 |
mock_response.usage.prompt_tokens = 5 |
|
0981a08…
|
noreply
|
270 |
mock_response.usage.completion_tokens = 10 |
|
0981a08…
|
noreply
|
271 |
mock_client.chat.completions.create.return_value = mock_response |
|
0981a08…
|
noreply
|
272 |
|
|
0981a08…
|
noreply
|
273 |
provider = OpenAICompatibleProvider(api_key="test", base_url="http://test") |
|
0981a08…
|
noreply
|
274 |
result = provider.chat([{"role": "user", "content": "hi"}], model="test-model") |
|
0981a08…
|
noreply
|
275 |
assert result == "hello back" |
|
0981a08…
|
noreply
|
276 |
assert provider._last_usage == {"input_tokens": 5, "output_tokens": 10} |
|
0981a08…
|
noreply
|
277 |
|
|
0981a08…
|
noreply
|
278 |
@patch("openai.OpenAI") |
|
0981a08…
|
noreply
|
279 |
def test_analyze_image(self, mock_openai_cls): |
|
0981a08…
|
noreply
|
280 |
mock_client = MagicMock() |
|
0981a08…
|
noreply
|
281 |
mock_openai_cls.return_value = mock_client |
|
0981a08…
|
noreply
|
282 |
|
|
0981a08…
|
noreply
|
283 |
mock_choice = MagicMock() |
|
0981a08…
|
noreply
|
284 |
mock_choice.message.content = "a cat" |
|
0981a08…
|
noreply
|
285 |
mock_response = MagicMock() |
|
0981a08…
|
noreply
|
286 |
mock_response.choices = [mock_choice] |
|
0981a08…
|
noreply
|
287 |
mock_response.usage.prompt_tokens = 100 |
|
0981a08…
|
noreply
|
288 |
mock_response.usage.completion_tokens = 5 |
|
0981a08…
|
noreply
|
289 |
mock_client.chat.completions.create.return_value = mock_response |
|
0981a08…
|
noreply
|
290 |
|
|
0981a08…
|
noreply
|
291 |
provider = OpenAICompatibleProvider(api_key="test", base_url="http://test") |
|
0981a08…
|
noreply
|
292 |
result = provider.analyze_image(b"\x89PNG", "what is this?") |
|
0981a08…
|
noreply
|
293 |
assert result == "a cat" |
|
0981a08…
|
noreply
|
294 |
assert provider._last_usage["input_tokens"] == 100 |
|
0981a08…
|
noreply
|
295 |
|
|
0981a08…
|
noreply
|
296 |
@patch("openai.OpenAI") |
|
0981a08…
|
noreply
|
297 |
def test_transcribe_raises(self, mock_openai_cls): |
|
0981a08…
|
noreply
|
298 |
provider = OpenAICompatibleProvider(api_key="test", base_url="http://test") |
|
0981a08…
|
noreply
|
299 |
with pytest.raises(NotImplementedError): |
|
0981a08…
|
noreply
|
300 |
provider.transcribe_audio("/tmp/audio.wav") |
|
0981a08…
|
noreply
|
301 |
|
|
0981a08…
|
noreply
|
302 |
@patch("openai.OpenAI") |
|
0981a08…
|
noreply
|
303 |
def test_list_models(self, mock_openai_cls): |
|
0981a08…
|
noreply
|
304 |
mock_client = MagicMock() |
|
0981a08…
|
noreply
|
305 |
mock_openai_cls.return_value = mock_client |
|
0981a08…
|
noreply
|
306 |
|
|
0981a08…
|
noreply
|
307 |
mock_model = MagicMock() |
|
0981a08…
|
noreply
|
308 |
mock_model.id = "test-model-1" |
|
0981a08…
|
noreply
|
309 |
mock_client.models.list.return_value = [mock_model] |
|
0981a08…
|
noreply
|
310 |
|
|
0981a08…
|
noreply
|
311 |
provider = OpenAICompatibleProvider(api_key="test", base_url="http://test") |
|
0981a08…
|
noreply
|
312 |
provider.provider_name = "testprov" |
|
0981a08…
|
noreply
|
313 |
models = provider.list_models() |
|
0981a08…
|
noreply
|
314 |
assert len(models) == 1 |
|
0981a08…
|
noreply
|
315 |
assert models[0].id == "test-model-1" |
|
0981a08…
|
noreply
|
316 |
assert models[0].provider == "testprov" |
|
0981a08…
|
noreply
|
317 |
|
|
0981a08…
|
noreply
|
318 |
@patch("openai.OpenAI") |
|
0981a08…
|
noreply
|
319 |
def test_list_models_handles_error(self, mock_openai_cls): |
|
0981a08…
|
noreply
|
320 |
mock_client = MagicMock() |
|
0981a08…
|
noreply
|
321 |
mock_openai_cls.return_value = mock_client |
|
0981a08…
|
noreply
|
322 |
mock_client.models.list.side_effect = Exception("connection error") |
|
0981a08…
|
noreply
|
323 |
|
|
0981a08…
|
noreply
|
324 |
provider = OpenAICompatibleProvider(api_key="test", base_url="http://test") |
|
0981a08…
|
noreply
|
325 |
models = provider.list_models() |
|
0981a08…
|
noreply
|
326 |
assert models == [] |
|
0981a08…
|
noreply
|
327 |
|
|
0981a08…
|
noreply
|
328 |
|
|
0981a08…
|
noreply
|
329 |
# --------------------------------------------------------------------------- |
|
0981a08…
|
noreply
|
330 |
# Discovery |
|
0981a08…
|
noreply
|
331 |
# --------------------------------------------------------------------------- |
|
a94205b…
|
leo
|
332 |
|
|
a94205b…
|
leo
|
333 |
|
|
a94205b…
|
leo
|
334 |
class TestDiscovery: |
|
a94205b…
|
leo
|
335 |
@patch("video_processor.providers.discovery._cached_models", None) |
|
a0146a5…
|
noreply
|
336 |
@patch( |
|
0981a08…
|
noreply
|
337 |
"video_processor.providers.ollama_provider.OllamaProvider.is_available", |
|
0981a08…
|
noreply
|
338 |
return_value=False, |
|
a0146a5…
|
noreply
|
339 |
) |
|
287a3bb…
|
leo
|
340 |
@patch.dict("os.environ", {}, clear=True) |
|
a0146a5…
|
noreply
|
341 |
def test_discover_skips_missing_keys(self, mock_ollama): |
|
a94205b…
|
leo
|
342 |
from video_processor.providers.discovery import discover_available_models |
|
829e24a…
|
leo
|
343 |
|
|
a94205b…
|
leo
|
344 |
models = discover_available_models(api_keys={"openai": "", "anthropic": "", "gemini": ""}) |
|
a94205b…
|
leo
|
345 |
assert models == [] |
|
a94205b…
|
leo
|
346 |
|
|
287a3bb…
|
leo
|
347 |
@patch.dict("os.environ", {}, clear=True) |
|
a0146a5…
|
noreply
|
348 |
@patch( |
|
0981a08…
|
noreply
|
349 |
"video_processor.providers.ollama_provider.OllamaProvider.is_available", |
|
0981a08…
|
noreply
|
350 |
return_value=False, |
|
a0146a5…
|
noreply
|
351 |
) |
|
a94205b…
|
leo
|
352 |
@patch("video_processor.providers.discovery._cached_models", None) |
|
a0146a5…
|
noreply
|
353 |
def test_discover_caches_results(self, mock_ollama): |
|
a94205b…
|
leo
|
354 |
from video_processor.providers import discovery |
|
a94205b…
|
leo
|
355 |
|
|
829e24a…
|
leo
|
356 |
models = discovery.discover_available_models( |
|
829e24a…
|
leo
|
357 |
api_keys={"openai": "", "anthropic": "", "gemini": ""} |
|
829e24a…
|
leo
|
358 |
) |
|
a94205b…
|
leo
|
359 |
assert models == [] |
|
a94205b…
|
leo
|
360 |
# Second call should use cache |
|
a94205b…
|
leo
|
361 |
models2 = discovery.discover_available_models(api_keys={"openai": "key"}) |
|
a94205b…
|
leo
|
362 |
assert models2 == [] # Still cached empty result |
|
a94205b…
|
leo
|
363 |
|
|
0981a08…
|
noreply
|
364 |
discovery.clear_discovery_cache() |
|
0981a08…
|
noreply
|
365 |
|
|
0981a08…
|
noreply
|
366 |
@patch("video_processor.providers.discovery._cached_models", None) |
|
0981a08…
|
noreply
|
367 |
@patch( |
|
0981a08…
|
noreply
|
368 |
"video_processor.providers.ollama_provider.OllamaProvider.is_available", |
|
0981a08…
|
noreply
|
369 |
return_value=False, |
|
0981a08…
|
noreply
|
370 |
) |
|
0981a08…
|
noreply
|
371 |
@patch.dict("os.environ", {}, clear=True) |
|
0981a08…
|
noreply
|
372 |
def test_force_refresh_clears_cache(self, mock_ollama): |
|
0981a08…
|
noreply
|
373 |
from video_processor.providers import discovery |
|
0981a08…
|
noreply
|
374 |
|
|
0981a08…
|
noreply
|
375 |
# Warm the cache |
|
0981a08…
|
noreply
|
376 |
discovery.discover_available_models(api_keys={"openai": "", "anthropic": "", "gemini": ""}) |
|
0981a08…
|
noreply
|
377 |
# Force refresh should re-run |
|
0981a08…
|
noreply
|
378 |
models = discovery.discover_available_models( |
|
0981a08…
|
noreply
|
379 |
api_keys={"openai": "", "anthropic": "", "gemini": ""}, |
|
0981a08…
|
noreply
|
380 |
force_refresh=True, |
|
0981a08…
|
noreply
|
381 |
) |
|
0981a08…
|
noreply
|
382 |
assert models == [] |
|
0981a08…
|
noreply
|
383 |
|
|
0981a08…
|
noreply
|
384 |
def test_clear_discovery_cache(self): |
|
0981a08…
|
noreply
|
385 |
from video_processor.providers import discovery |
|
0981a08…
|
noreply
|
386 |
|
|
0981a08…
|
noreply
|
387 |
discovery._cached_models = [ModelInfo(id="x", provider="y")] |
|
a94205b…
|
leo
|
388 |
discovery.clear_discovery_cache() |
|
0981a08…
|
noreply
|
389 |
assert discovery._cached_models is None |
|
0981a08…
|
noreply
|
390 |
|
|
0981a08…
|
noreply
|
391 |
|
|
0981a08…
|
noreply
|
392 |
# --------------------------------------------------------------------------- |
|
0981a08…
|
noreply
|
393 |
# OllamaProvider |
|
0981a08…
|
noreply
|
394 |
# --------------------------------------------------------------------------- |
|
a0146a5…
|
noreply
|
395 |
|
|
a0146a5…
|
noreply
|
396 |
|
|
a0146a5…
|
noreply
|
397 |
class TestOllamaProvider: |
|
a0146a5…
|
noreply
|
398 |
@patch("video_processor.providers.ollama_provider.requests") |
|
a0146a5…
|
noreply
|
399 |
def test_is_available_when_running(self, mock_requests): |
|
a0146a5…
|
noreply
|
400 |
mock_resp = MagicMock() |
|
a0146a5…
|
noreply
|
401 |
mock_resp.status_code = 200 |
|
a0146a5…
|
noreply
|
402 |
mock_requests.get.return_value = mock_resp |
|
a0146a5…
|
noreply
|
403 |
|
|
a0146a5…
|
noreply
|
404 |
from video_processor.providers.ollama_provider import OllamaProvider |
|
a0146a5…
|
noreply
|
405 |
|
|
a0146a5…
|
noreply
|
406 |
assert OllamaProvider.is_available() |
|
a0146a5…
|
noreply
|
407 |
|
|
a0146a5…
|
noreply
|
408 |
@patch("video_processor.providers.ollama_provider.requests") |
|
a0146a5…
|
noreply
|
409 |
def test_is_available_when_not_running(self, mock_requests): |
|
a0146a5…
|
noreply
|
410 |
mock_requests.get.side_effect = ConnectionError |
|
a0146a5…
|
noreply
|
411 |
|
|
a0146a5…
|
noreply
|
412 |
from video_processor.providers.ollama_provider import OllamaProvider |
|
a0146a5…
|
noreply
|
413 |
|
|
a0146a5…
|
noreply
|
414 |
assert not OllamaProvider.is_available() |
|
a0146a5…
|
noreply
|
415 |
|
|
a0146a5…
|
noreply
|
416 |
@patch("video_processor.providers.ollama_provider.requests") |
|
a0146a5…
|
noreply
|
417 |
@patch("video_processor.providers.ollama_provider.OpenAI") |
|
a0146a5…
|
noreply
|
418 |
def test_transcribe_raises(self, mock_openai, mock_requests): |
|
a0146a5…
|
noreply
|
419 |
from video_processor.providers.ollama_provider import OllamaProvider |
|
a0146a5…
|
noreply
|
420 |
|
|
a0146a5…
|
noreply
|
421 |
provider = OllamaProvider() |
|
a0146a5…
|
noreply
|
422 |
with pytest.raises(NotImplementedError): |
|
a0146a5…
|
noreply
|
423 |
provider.transcribe_audio("/tmp/test.wav") |
|
a0146a5…
|
noreply
|
424 |
|
|
a0146a5…
|
noreply
|
425 |
@patch("video_processor.providers.ollama_provider.requests") |
|
a0146a5…
|
noreply
|
426 |
@patch("video_processor.providers.ollama_provider.OpenAI") |
|
a0146a5…
|
noreply
|
427 |
def test_list_models(self, mock_openai, mock_requests): |
|
a0146a5…
|
noreply
|
428 |
mock_resp = MagicMock() |
|
a0146a5…
|
noreply
|
429 |
mock_resp.status_code = 200 |
|
a0146a5…
|
noreply
|
430 |
mock_resp.json.return_value = { |
|
a0146a5…
|
noreply
|
431 |
"models": [ |
|
a0146a5…
|
noreply
|
432 |
{"name": "llama3.2:latest", "details": {"family": "llama"}}, |
|
a0146a5…
|
noreply
|
433 |
{"name": "llava:13b", "details": {"family": "llava"}}, |
|
a0146a5…
|
noreply
|
434 |
] |
|
a0146a5…
|
noreply
|
435 |
} |
|
a0146a5…
|
noreply
|
436 |
mock_requests.get.return_value = mock_resp |
|
a0146a5…
|
noreply
|
437 |
|
|
a0146a5…
|
noreply
|
438 |
from video_processor.providers.ollama_provider import OllamaProvider |
|
a0146a5…
|
noreply
|
439 |
|
|
a0146a5…
|
noreply
|
440 |
provider = OllamaProvider() |
|
a0146a5…
|
noreply
|
441 |
models = provider.list_models() |
|
a0146a5…
|
noreply
|
442 |
assert len(models) == 2 |
|
a0146a5…
|
noreply
|
443 |
assert models[0].provider == "ollama" |
|
a0146a5…
|
noreply
|
444 |
|
|
a0146a5…
|
noreply
|
445 |
llava = [m for m in models if "llava" in m.id][0] |
|
a0146a5…
|
noreply
|
446 |
assert "vision" in llava.capabilities |
|
a0146a5…
|
noreply
|
447 |
|
|
a0146a5…
|
noreply
|
448 |
llama = [m for m in models if "llama" in m.id][0] |
|
a0146a5…
|
noreply
|
449 |
assert "chat" in llama.capabilities |
|
a0146a5…
|
noreply
|
450 |
assert "vision" not in llama.capabilities |
|
a0146a5…
|
noreply
|
451 |
|
|
0981a08…
|
noreply
|
452 |
|
|
0981a08…
|
noreply
|
453 |
# --------------------------------------------------------------------------- |
|
0981a08…
|
noreply
|
454 |
# Provider module imports |
|
0981a08…
|
noreply
|
455 |
# --------------------------------------------------------------------------- |
|
0981a08…
|
noreply
|
456 |
|
|
0981a08…
|
noreply
|
457 |
|
|
0981a08…
|
noreply
|
458 |
class TestProviderImports: |
|
0981a08…
|
noreply
|
459 |
"""Verify that all provider modules import without errors.""" |
|
0981a08…
|
noreply
|
460 |
|
|
0981a08…
|
noreply
|
461 |
PROVIDER_MODULES = [ |
|
0981a08…
|
noreply
|
462 |
"video_processor.providers.openai_provider", |
|
0981a08…
|
noreply
|
463 |
"video_processor.providers.anthropic_provider", |
|
0981a08…
|
noreply
|
464 |
"video_processor.providers.gemini_provider", |
|
0981a08…
|
noreply
|
465 |
"video_processor.providers.ollama_provider", |
|
0981a08…
|
noreply
|
466 |
"video_processor.providers.azure_provider", |
|
0981a08…
|
noreply
|
467 |
"video_processor.providers.together_provider", |
|
0981a08…
|
noreply
|
468 |
"video_processor.providers.fireworks_provider", |
|
0981a08…
|
noreply
|
469 |
"video_processor.providers.cerebras_provider", |
|
0981a08…
|
noreply
|
470 |
"video_processor.providers.xai_provider", |
|
0981a08…
|
noreply
|
471 |
] |
|
0981a08…
|
noreply
|
472 |
|
|
0981a08…
|
noreply
|
473 |
@pytest.mark.parametrize("module_name", PROVIDER_MODULES) |
|
0981a08…
|
noreply
|
474 |
def test_import(self, module_name): |
|
0981a08…
|
noreply
|
475 |
mod = importlib.import_module(module_name) |
|
0981a08…
|
noreply
|
476 |
assert mod is not None |