Navegador
feat: LLM backend abstraction — unified provider interface with auto-discovery LLMProvider ABC with Anthropic, OpenAI, and Ollama implementations. Auto-discovery of installed SDKs, priority-ordered provider selection. Closes #64
Commit
13d6a44dde2023109309ab7a9ae66399c5b43d3f7811289ddf8caa17a592a1b0
Parent
a24deda54b88d4e…
2 files changed
+276
+489
+276
| --- a/navegador/llm.py | ||
| +++ b/navegador/llm.py | ||
| @@ -0,0 +1,276 @@ | ||
| 1 | +""" | |
| 2 | +LLM backend abstraction — unified provider interface with auto-discovery. | |
| 3 | + | |
| 4 | +Provides a common interface for multiple LLM providers (Anthropic, OpenAI, | |
| 5 | +Ollama). SDK imports are lazy and guarded so that missing optional dependencies | |
| 6 | +produce a clear, actionable ImportError rather than a confusing traceback. | |
| 7 | + | |
| 8 | +Usage:: | |
| 9 | + | |
| 10 | + from navegador.llm import get_provider, auto_provider, discover_providers | |
| 11 | + | |
| 12 | + # Explicit provider | |
| 13 | + provider = get_provider("anthropic", model="claude-3-5-haiku-20241022") | |
| 14 | + response = provider.complete("Explain this function: ...") | |
| 15 | + | |
| 16 | + # Auto-detect the first available SDK | |
| 17 | + provider = auto_provider() | |
| 18 | + | |
| 19 | + # See what is installed | |
| 20 | + available = discover_providers() # e.g. ["anthropic", "openai"] | |
| 21 | +""" | |
| 22 | + | |
| 23 | +from __future__ import annotations | |
| 24 | + | |
| 25 | +from abc iimport ABC, abstractmethod | |
| 26 | + | |
| 27 | +# ── Abstract base ───────────────────────────────────────────────────────────── | |
| 28 | + | |
| 29 | + | |
| 30 | +class LLMProvider(ABC): | |
| 31 | + """Abstract interface that every concrete LLM provider must satisfy.""" | |
| 32 | + | |
| 33 | + @abstractmethod | |
| 34 | + def complete(self, prompt: str, **kwargs) -> str: | |
| 35 | + """ | |
| 36 | + Send *prompt* to the model and return the completion as a string. | |
| 37 | + | |
| 38 | + Args: | |
| 39 | + prompt: The user/system prompt text. | |
| 40 | + **kwargs: Provider-specific options (temperature, max_tokens, …). | |
| 41 | + | |
| 42 | + Returns: | |
| 43 | + The model's text response. | |
| 44 | + """ | |
| 45 | + | |
| 46 | + @abstractmethod | |
| 47 | + def embed(self, text: str) -> list[float]: | |
| 48 | + """ | |
| 49 | + Return an embedding vector for *text*. | |
| 50 | + | |
| 51 | + Args: | |
| 52 | + text: The input string to embed. | |
| 53 | + | |
| 54 | + Returns: | |
| 55 | + A list of floats representing the embedding. | |
| 56 | + """ | |
| 57 | + | |
| 58 | + @property | |
| 59 | + @abstractmethod | |
| 60 | + def name(self) -> str: | |
| 61 | + """Short identifier for the provider, e.g. ``"anthropic"``.""" | |
| 62 | + | |
| 63 | + @property | |
| 64 | + @abstractmethod | |
| 65 | + def model(self) -> str: | |
| 66 | + """Model identifier used for API calls, e.g. ``"claude-3-5-haiku-20241022"``.""" | |
| 67 | + | |
| 68 | + | |
| 69 | +# ── Concrete providers ──────────────────────────────────────────────────────── | |
| 70 | + | |
| 71 | + | |
| 72 | +class AnthropicProvider(LLMProvider): | |
| 73 | + """ | |
| 74 | + LLM provider backed by the ``anthropic`` Python SDK. | |
| 75 | + | |
| 76 | + Install:: | |
| 77 | + | |
| 78 | + pip install anthropic | |
| 79 | + | |
| 80 | + Args: | |
| 81 | + model: Anthropic model ID (default ``"claude-3-5-haiku-20241022"``). | |
| 82 | + """ | |
| 83 | + | |
| 84 | + _DEFAULT_MODEL = "claude-3-5-haiku-20241022" | |
| 85 | + | |
| 86 | + def __init__(self, model: str = "") -> None: | |
| 87 | + try: | |
| 88 | + import anthropic # noqa: F401 | |
| 89 | + except ImportError as exc: | |
| 90 | + raise ImportError( | |
| 91 | + "The 'anthropic' package is required to use AnthropicProvider. " | |
| 92 | + "Install it with: pip install anthropic" | |
| 93 | + ) from exc | |
| 94 | + | |
| 95 | + self._model = model or self._DEFAULT_MODEL | |
| 96 | + | |
| 97 | + import anthropic | |
| 98 | + | |
| 99 | + self._client = anthropic.Anthropic() | |
| 100 | + | |
| 101 | + @property | |
| 102 | + def name(self) -> str: | |
| 103 | + return "anthropic" | |
| 104 | + | |
| 105 | + @property | |
| 106 | + def model(self) -> str: | |
| 107 | + return self._model | |
| 108 | + | |
| 109 | + def complete(self, prompt: str, **kwargs) -> str: | |
| 110 | + """Call the Anthropic Messages API and return the first text block.""" | |
| 111 | + max_tokens = kwargs.pop("max_tokens", 1024) | |
| 112 | + message = self._client.messages.create( | |
| 113 | + model=self._model, | |
| 114 | + max_tokens=max_tokens, | |
| 115 | + messages=[{"role": "user", "content": prompt}], | |
| 116 | + **kwargs, | |
| 117 | + ) | |
| 118 | + return message.content[0].text | |
| 119 | + | |
| 120 | + def embed(self, text: str) -> list[float]: | |
| 121 | + """ | |
| 122 | + Anthropic does not currently expose a public embeddings API. | |
| 123 | + | |
| 124 | + Raises: | |
| 125 | + NotImplementedError: Always. | |
| 126 | + """ | |
| 127 | + raise NotImplementedError( | |
| 128 | + "Anthropic does not provide a public embeddings API. " | |
| 129 | + "Use OpenAIProvider or OllamaProvider for embeddings." | |
| 130 | + ) | |
| 131 | + | |
| 132 | + | |
| 133 | +class OpenAIProvider(LLMProvider): | |
| 134 | + """ | |
| 135 | + LLM provider backed by the ``openai`` Python SDK. | |
| 136 | + | |
| 137 | + Install:: | |
| 138 | + | |
| 139 | + pip install openai | |
| 140 | + | |
| 141 | + Args: | |
| 142 | + model: OpenAI model ID (default ``"gpt-4o-mini"``). | |
| 143 | + """ | |
| 144 | + | |
| 145 | + _DEFAULT_MODEL = "gpt-4o-mini" | |
| 146 | + | |
| 147 | + def __init__(self, model: str = "") -> None: | |
| 148 | + try: | |
| 149 | + import openai # noqa: F401 | |
| 150 | + except ImportError as exc: | |
| 151 | + raise ImportError( | |
| 152 | + "The 'openai' package is required to use OpenAIProvider. " | |
| 153 | + "Install it with: pip install openai" | |
| 154 | + ) from exc | |
| 155 | + | |
| 156 | + self._model = model or self._DEFAULT_MODEL | |
| 157 | + | |
| 158 | + import openai | |
| 159 | + | |
| 160 | + self._client = openai.OpenAI() | |
| 161 | + | |
| 162 | + @property | |
| 163 | + def name(self) -> str: | |
| 164 | + return "openai" | |
| 165 | + | |
| 166 | + @property | |
| 167 | + def model(self) -> str: | |
| 168 | + return self._model | |
| 169 | + | |
| 170 | + def complete(self, prompt: str, **kwargs) -> str: | |
| 171 | + """Call the OpenAI Chat Completions API and return the assistant message.""" | |
| 172 | + response = self._client.chat.completions.create( | |
| 173 | + model=self._model, | |
| 174 | + messages=[{"role": "user", "content": prompt}], | |
| 175 | + **kwargs, | |
| 176 | + ) | |
| 177 | + return response.choices[0].message.content | |
| 178 | + | |
| 179 | + def embed(self, text: str) -> list[float]: | |
| 180 | + """Call the OpenAI Embeddings API and return the embedding vector.""" | |
| 181 | + embed_model = "text-embedding-3-small" | |
| 182 | + response = self._client.embeddings.create(input=text, model=embed_model) | |
| 183 | + return response.data[0].embedding | |
| 184 | + | |
| 185 | + | |
| 186 | +class OllamaProvider(LLMProvider): | |
| 187 | + """ | |
| 188 | + LLM provider backed by the ``ollama`` Python SDK (local models via Ollama). | |
| 189 | + | |
| 190 | + Install:: | |
| 191 | + | |
| 192 | + pip install ollama | |
| 193 | + | |
| 194 | + The Ollama server must be running locally (``ollama serve``). | |
| 195 | + | |
| 196 | + Args: | |
| 197 | + model: Ollama model tag (default ``"llama3.2"``). | |
| 198 | + """ | |
| 199 | + | |
| 200 | + _DEFAULT_MODEL = "llama3.2" | |
| 201 | + | |
| 202 | + def __init__(self, model: str = "") -> None: | |
| 203 | + try: | |
| 204 | + import ollama # noqa: F401 | |
| 205 | + except ImportError as exc: | |
| 206 | + raise ImportError( | |
| 207 | + "The 'ollama' package is required to use OllamaProvider. " | |
| 208 | + "Install it with: pip install ollama" | |
| 209 | + ) from exc | |
| 210 | + | |
| 211 | + self._model = model or self._DEFAULT_MODEL | |
| 212 | + | |
| 213 | + import ollama | |
| 214 | + | |
| 215 | + self._client = ollama.Client() | |
| 216 | + | |
| 217 | + @property | |
| 218 | + def name(self) -> str: | |
| 219 | + return "ollama" | |
| 220 | + | |
| 221 | + @property | |
| 222 | + def model(self) -> str: | |
| 223 | + return self._model | |
| 224 | + | |
| 225 | + def complete(self, prompt: str, **kwargs) -> str: | |
| 226 | + """Call the Ollama chat API and return the assistant message content.""" | |
| 227 | + response = self._client.chat( | |
| 228 | + model=self._model, | |
| 229 | + messages=[{"role": "user", "content": prompt}], | |
| 230 | + **kwargs, | |
| 231 | + ) | |
| 232 | + return response["message"]["content"] | |
| 233 | + | |
| 234 | + def embed(self, text: str) -> list[float]: | |
| 235 | + """Call the Ollama embeddings API and return the embedding vector.""" | |
| 236 | + response = self._client.embeddings(model=self._model, prompt=text) | |
| 237 | + return response["embedding"] | |
| 238 | + | |
| 239 | + | |
| 240 | +# ── Discovery & factory ───────" | |
| 241 | + rovider_namf"Valid options are: {sorted(_PROVIDER_CLASS_MAP)}" | |
| 242 | + ) | |
| 243 | + cls = _PROVIDER_CLASS_MAP[name] | |
| 244 | + return cls(model=model) | |
| 245 | + | |
| 246 | + | |
| 247 | +def auto_provider(model: str = "") -> LLMProvider: | |
| 248 | + """ | |
| 249 | + Return the first available LLM provider based on installed SDKs. | |
| 250 | + | |
| 251 | + Priority order: anthropic → openai → ollama. | |
| 252 | + | |
| 253 | + Args: | |
| 254 | + model: Optional model ID forwarded to the provider constructor. | |
| 255 | + | |
| 256 | + Returns: | |
| 257 | + An :class:`LLMProvider` instance for the first available SDK. | |
| 258 | + | |
| 259 | + Raises: | |
| 260 | + RuntimeError: If no supported LLM SDK is installed. | |
| 261 | + """ | |
| 262 | + for provider_name in _PROVIDER_NAMES: | |
| 263 | + sdk_name = _PROVIDER_SDK_MAP[provider_name] | |
| 264 | + try: | |
| 265 | + __import__(sdk_name) | |
| 266 | + except ImportError: | |
| 267 | + continue | |
| 268 | + return get_provider(provider_name, model=model) | |
| 269 | + | |
| 270 | + raise RuntimeError( | |
| 271 | + "No LLM SDK is installed. Install at least one of: " | |
| 272 | + "anthropic, openai, ollama.\n" | |
| 273 | + " pip install anthropic # Anthropic Claude\n" | |
| 274 | + " pip install openai # OpenAI GPT\n" | |
| 275 | + " pip install ollama # Ollama (local models)" | |
| 276 | + ) |
| --- a/navegador/llm.py | |
| +++ b/navegador/llm.py | |
| @@ -0,0 +1,276 @@ | |
| --- a/navegador/llm.py | |
| +++ b/navegador/llm.py | |
| @@ -0,0 +1,276 @@ | |
| 1 | """ |
| 2 | LLM backend abstraction — unified provider interface with auto-discovery. |
| 3 | |
| 4 | Provides a common interface for multiple LLM providers (Anthropic, OpenAI, |
| 5 | Ollama). SDK imports are lazy and guarded so that missing optional dependencies |
| 6 | produce a clear, actionable ImportError rather than a confusing traceback. |
| 7 | |
| 8 | Usage:: |
| 9 | |
| 10 | from navegador.llm import get_provider, auto_provider, discover_providers |
| 11 | |
| 12 | # Explicit provider |
| 13 | provider = get_provider("anthropic", model="claude-3-5-haiku-20241022") |
| 14 | response = provider.complete("Explain this function: ...") |
| 15 | |
| 16 | # Auto-detect the first available SDK |
| 17 | provider = auto_provider() |
| 18 | |
| 19 | # See what is installed |
| 20 | available = discover_providers() # e.g. ["anthropic", "openai"] |
| 21 | """ |
| 22 | |
| 23 | from __future__ import annotations |
| 24 | |
| 25 | from abc iimport ABC, abstractmethod |
| 26 | |
| 27 | # ── Abstract base ───────────────────────────────────────────────────────────── |
| 28 | |
| 29 | |
| 30 | class LLMProvider(ABC): |
| 31 | """Abstract interface that every concrete LLM provider must satisfy.""" |
| 32 | |
| 33 | @abstractmethod |
| 34 | def complete(self, prompt: str, **kwargs) -> str: |
| 35 | """ |
| 36 | Send *prompt* to the model and return the completion as a string. |
| 37 | |
| 38 | Args: |
| 39 | prompt: The user/system prompt text. |
| 40 | **kwargs: Provider-specific options (temperature, max_tokens, …). |
| 41 | |
| 42 | Returns: |
| 43 | The model's text response. |
| 44 | """ |
| 45 | |
| 46 | @abstractmethod |
| 47 | def embed(self, text: str) -> list[float]: |
| 48 | """ |
| 49 | Return an embedding vector for *text*. |
| 50 | |
| 51 | Args: |
| 52 | text: The input string to embed. |
| 53 | |
| 54 | Returns: |
| 55 | A list of floats representing the embedding. |
| 56 | """ |
| 57 | |
| 58 | @property |
| 59 | @abstractmethod |
| 60 | def name(self) -> str: |
| 61 | """Short identifier for the provider, e.g. ``"anthropic"``.""" |
| 62 | |
| 63 | @property |
| 64 | @abstractmethod |
| 65 | def model(self) -> str: |
| 66 | """Model identifier used for API calls, e.g. ``"claude-3-5-haiku-20241022"``.""" |
| 67 | |
| 68 | |
| 69 | # ── Concrete providers ──────────────────────────────────────────────────────── |
| 70 | |
| 71 | |
| 72 | class AnthropicProvider(LLMProvider): |
| 73 | """ |
| 74 | LLM provider backed by the ``anthropic`` Python SDK. |
| 75 | |
| 76 | Install:: |
| 77 | |
| 78 | pip install anthropic |
| 79 | |
| 80 | Args: |
| 81 | model: Anthropic model ID (default ``"claude-3-5-haiku-20241022"``). |
| 82 | """ |
| 83 | |
| 84 | _DEFAULT_MODEL = "claude-3-5-haiku-20241022" |
| 85 | |
| 86 | def __init__(self, model: str = "") -> None: |
| 87 | try: |
| 88 | import anthropic # noqa: F401 |
| 89 | except ImportError as exc: |
| 90 | raise ImportError( |
| 91 | "The 'anthropic' package is required to use AnthropicProvider. " |
| 92 | "Install it with: pip install anthropic" |
| 93 | ) from exc |
| 94 | |
| 95 | self._model = model or self._DEFAULT_MODEL |
| 96 | |
| 97 | import anthropic |
| 98 | |
| 99 | self._client = anthropic.Anthropic() |
| 100 | |
| 101 | @property |
| 102 | def name(self) -> str: |
| 103 | return "anthropic" |
| 104 | |
| 105 | @property |
| 106 | def model(self) -> str: |
| 107 | return self._model |
| 108 | |
| 109 | def complete(self, prompt: str, **kwargs) -> str: |
| 110 | """Call the Anthropic Messages API and return the first text block.""" |
| 111 | max_tokens = kwargs.pop("max_tokens", 1024) |
| 112 | message = self._client.messages.create( |
| 113 | model=self._model, |
| 114 | max_tokens=max_tokens, |
| 115 | messages=[{"role": "user", "content": prompt}], |
| 116 | **kwargs, |
| 117 | ) |
| 118 | return message.content[0].text |
| 119 | |
| 120 | def embed(self, text: str) -> list[float]: |
| 121 | """ |
| 122 | Anthropic does not currently expose a public embeddings API. |
| 123 | |
| 124 | Raises: |
| 125 | NotImplementedError: Always. |
| 126 | """ |
| 127 | raise NotImplementedError( |
| 128 | "Anthropic does not provide a public embeddings API. " |
| 129 | "Use OpenAIProvider or OllamaProvider for embeddings." |
| 130 | ) |
| 131 | |
| 132 | |
| 133 | class OpenAIProvider(LLMProvider): |
| 134 | """ |
| 135 | LLM provider backed by the ``openai`` Python SDK. |
| 136 | |
| 137 | Install:: |
| 138 | |
| 139 | pip install openai |
| 140 | |
| 141 | Args: |
| 142 | model: OpenAI model ID (default ``"gpt-4o-mini"``). |
| 143 | """ |
| 144 | |
| 145 | _DEFAULT_MODEL = "gpt-4o-mini" |
| 146 | |
| 147 | def __init__(self, model: str = "") -> None: |
| 148 | try: |
| 149 | import openai # noqa: F401 |
| 150 | except ImportError as exc: |
| 151 | raise ImportError( |
| 152 | "The 'openai' package is required to use OpenAIProvider. " |
| 153 | "Install it with: pip install openai" |
| 154 | ) from exc |
| 155 | |
| 156 | self._model = model or self._DEFAULT_MODEL |
| 157 | |
| 158 | import openai |
| 159 | |
| 160 | self._client = openai.OpenAI() |
| 161 | |
| 162 | @property |
| 163 | def name(self) -> str: |
| 164 | return "openai" |
| 165 | |
| 166 | @property |
| 167 | def model(self) -> str: |
| 168 | return self._model |
| 169 | |
| 170 | def complete(self, prompt: str, **kwargs) -> str: |
| 171 | """Call the OpenAI Chat Completions API and return the assistant message.""" |
| 172 | response = self._client.chat.completions.create( |
| 173 | model=self._model, |
| 174 | messages=[{"role": "user", "content": prompt}], |
| 175 | **kwargs, |
| 176 | ) |
| 177 | return response.choices[0].message.content |
| 178 | |
| 179 | def embed(self, text: str) -> list[float]: |
| 180 | """Call the OpenAI Embeddings API and return the embedding vector.""" |
| 181 | embed_model = "text-embedding-3-small" |
| 182 | response = self._client.embeddings.create(input=text, model=embed_model) |
| 183 | return response.data[0].embedding |
| 184 | |
| 185 | |
| 186 | class OllamaProvider(LLMProvider): |
| 187 | """ |
| 188 | LLM provider backed by the ``ollama`` Python SDK (local models via Ollama). |
| 189 | |
| 190 | Install:: |
| 191 | |
| 192 | pip install ollama |
| 193 | |
| 194 | The Ollama server must be running locally (``ollama serve``). |
| 195 | |
| 196 | Args: |
| 197 | model: Ollama model tag (default ``"llama3.2"``). |
| 198 | """ |
| 199 | |
| 200 | _DEFAULT_MODEL = "llama3.2" |
| 201 | |
| 202 | def __init__(self, model: str = "") -> None: |
| 203 | try: |
| 204 | import ollama # noqa: F401 |
| 205 | except ImportError as exc: |
| 206 | raise ImportError( |
| 207 | "The 'ollama' package is required to use OllamaProvider. " |
| 208 | "Install it with: pip install ollama" |
| 209 | ) from exc |
| 210 | |
| 211 | self._model = model or self._DEFAULT_MODEL |
| 212 | |
| 213 | import ollama |
| 214 | |
| 215 | self._client = ollama.Client() |
| 216 | |
| 217 | @property |
| 218 | def name(self) -> str: |
| 219 | return "ollama" |
| 220 | |
| 221 | @property |
| 222 | def model(self) -> str: |
| 223 | return self._model |
| 224 | |
| 225 | def complete(self, prompt: str, **kwargs) -> str: |
| 226 | """Call the Ollama chat API and return the assistant message content.""" |
| 227 | response = self._client.chat( |
| 228 | model=self._model, |
| 229 | messages=[{"role": "user", "content": prompt}], |
| 230 | **kwargs, |
| 231 | ) |
| 232 | return response["message"]["content"] |
| 233 | |
| 234 | def embed(self, text: str) -> list[float]: |
| 235 | """Call the Ollama embeddings API and return the embedding vector.""" |
| 236 | response = self._client.embeddings(model=self._model, prompt=text) |
| 237 | return response["embedding"] |
| 238 | |
| 239 | |
| 240 | # ── Discovery & factory ───────" |
| 241 | rovider_namf"Valid options are: {sorted(_PROVIDER_CLASS_MAP)}" |
| 242 | ) |
| 243 | cls = _PROVIDER_CLASS_MAP[name] |
| 244 | return cls(model=model) |
| 245 | |
| 246 | |
| 247 | def auto_provider(model: str = "") -> LLMProvider: |
| 248 | """ |
| 249 | Return the first available LLM provider based on installed SDKs. |
| 250 | |
| 251 | Priority order: anthropic → openai → ollama. |
| 252 | |
| 253 | Args: |
| 254 | model: Optional model ID forwarded to the provider constructor. |
| 255 | |
| 256 | Returns: |
| 257 | An :class:`LLMProvider` instance for the first available SDK. |
| 258 | |
| 259 | Raises: |
| 260 | RuntimeError: If no supported LLM SDK is installed. |
| 261 | """ |
| 262 | for provider_name in _PROVIDER_NAMES: |
| 263 | sdk_name = _PROVIDER_SDK_MAP[provider_name] |
| 264 | try: |
| 265 | __import__(sdk_name) |
| 266 | except ImportError: |
| 267 | continue |
| 268 | return get_provider(provider_name, model=model) |
| 269 | |
| 270 | raise RuntimeError( |
| 271 | "No LLM SDK is installed. Install at least one of: " |
| 272 | "anthropic, openai, ollama.\n" |
| 273 | " pip install anthropic # Anthropic Claude\n" |
| 274 | " pip install openai # OpenAI GPT\n" |
| 275 | " pip install ollama # Ollama (local models)" |
| 276 | ) |
+489
| --- a/tests/test_llm.py | ||
| +++ b/tests/test_llm.py | ||
| @@ -0,0 +1,489 @@ | ||
| 1 | +""" | |
| 2 | +Tests for navegador/llm.py — LLM backend abstraction. | |
| 3 | + | |
| 4 | +All tests are fully offline. SDK imports are patched to avoid requiring | |
| 5 | +any LLM SDK to be installed in the test environment. | |
| 6 | +""" | |
| 7 | + | |
| 8 | +from __future__ import annotations | |
| 9 | + | |
| 10 | +import sys | |
| 11 | +from unittest.mock import MagicMock, patch | |
| 12 | + | |
| 13 | +import pytest | |
| 14 | + | |
| 15 | + | |
| 16 | +# ── Helpers ─────────────────────────────────────────────────────────────────── | |
| 17 | + | |
| 18 | + | |
| 19 | +def _block_import(name: str): | |
| 20 | + """ | |
| 21 | + Context manager that makes ``import <name>`` raise ImportError for the | |
| 22 | + duration of the block, even if the package is installed. | |
| 23 | + """ | |
| 24 | + | |
| 25 | + class _Blocker: | |
| 26 | + def __enter__(self): | |
| 27 | + self._original = sys.modules.get(name, None) | |
| 28 | + sys.modules[name] = None # type: ignore[assignment] | |
| 29 | + return self | |
| 30 | + | |
| 31 | + def __exit__(self, *_): | |
| 32 | + if self._original is None: | |
| 33 | + sys.modules.pop(name, None) | |
| 34 | + else: | |
| 35 | + sys.modules[name] = self._original | |
| 36 | + | |
| 37 | + return _Blocker() | |
| 38 | + | |
| 39 | + | |
| 40 | +def _fake_anthropic_module(): | |
| 41 | + """Return a minimal mock that satisfies AnthropicProvider's usage.""" | |
| 42 | + mod = MagicMock() | |
| 43 | + client = MagicMock() | |
| 44 | + message = MagicMock() | |
| 45 | + message.content = [MagicMock(text="hello from anthropic")] | |
| 46 | + client.messages.create.return_value = message | |
| 47 | + mod.Anthropic.return_value = client | |
| 48 | + return mod, client | |
| 49 | + | |
| 50 | + | |
| 51 | +def _fake_openai_module(): | |
| 52 | + """Return a minimal mock that satisfies OpenAIProvider's usage.""" | |
| 53 | + mod = MagicMock() | |
| 54 | + client = MagicMock() | |
| 55 | + choice = MagicMock() | |
| 56 | + choice.message.content = "hello from openai" | |
| 57 | + response = MagicMock() | |
| 58 | + response.choices = [choice] | |
| 59 | + client.chat.completions.create.return_value = response | |
| 60 | + embed_data = MagicMock() | |
| 61 | + embed_data.embedding = [0.1, 0.2, 0.3] | |
| 62 | + embed_response = MagicMock() | |
| 63 | + embed_response.data = [embed_data] | |
| 64 | + client.embeddings.create.return_value = embed_response | |
| 65 | + mod.OpenAI.return_value = client | |
| 66 | + return mod, client | |
| 67 | + | |
| 68 | + | |
| 69 | +def _fake_ollama_module(): | |
| 70 | + """Return a minimal mock that satisfies OllamaProvider's usage.""" | |
| 71 | + mod = MagicMock() | |
| 72 | + client = MagicMock() | |
| 73 | + client.chat.return_value = {"message": {"content": "hello from ollama"}} | |
| 74 | + client.embeddings.return_value = {"embedding": [0.4, 0.5, 0.6]} | |
| 75 | + mod.Client.return_value = client | |
| 76 | + return mod, client | |
| 77 | + | |
| 78 | + | |
| 79 | +# ── AnthropicProvider ───────────────────────────────────────────────────────── | |
| 80 | + | |
| 81 | + | |
| 82 | +class TestAnthropicProvider: | |
| 83 | + def test_raises_import_error_when_sdk_missing(self): | |
| 84 | + with _block_import("anthropic"): | |
| 85 | + # Remove cached module from navegador.llm so the guard re-runs | |
| 86 | + import importlib | |
| 87 | + | |
| 88 | + import navegador.llm as llm_mod | |
| 89 | + | |
| 90 | + importlib.reload(llm_mod) | |
| 91 | + with pytest.raises(ImportError, match="pip install anthropic"): | |
| 92 | + llm_mod.AnthropicProvider() | |
| 93 | + | |
| 94 | + def test_name_is_anthropic(self): | |
| 95 | + fake_mod, _ = _fake_anthropic_module() | |
| 96 | + with patch.dict(sys.modules, {"anthropic": fake_mod}): | |
| 97 | + import importlib | |
| 98 | + | |
| 99 | + import navegador.llm as llm_mod | |
| 100 | + | |
| 101 | + importlib.reload(llm_mod) | |
| 102 | + p = llm_mod.AnthropicProvider() | |
| 103 | + assert p.name == "anthropic" | |
| 104 | + | |
| 105 | + def test_default_model(self): | |
| 106 | + fake_mod, _ = _fake_anthropic_module() | |
| 107 | + with patch.dict(sys.modules, {"anthropic": fake_mod}): | |
| 108 | + import importlib | |
| 109 | + | |
| 110 | + import navegador.llm as llm_mod | |
| 111 | + | |
| 112 | + importlib.reload(llm_mod) | |
| 113 | + p = llm_mod.AnthropicProvider() | |
| 114 | + assert p.model == "claude-3-5-haiku-20241022" | |
| 115 | + | |
| 116 | + def test_custom_model(self): | |
| 117 | + fake_mod, _ = _fake_anthropic_module() | |
| 118 | + with patch.dict(sys.modules, {"anthropic": fake_mod}): | |
| 119 | + import importlib | |
| 120 | + | |
| 121 | + import navegador.llm as llm_mod | |
| 122 | + | |
| 123 | + importlib.reload(llm_mod) | |
| 124 | + p = llm_mod.AnthropicProvider(model="claude-opus-4") | |
| 125 | + assert p.model == "claude-opus-4" | |
| 126 | + | |
| 127 | + def test_complete_returns_text(self): | |
| 128 | + fake_mod, client = _fake_anthropic_module() | |
| 129 | + with patch.dict(sys.modules, {"anthropic": fake_mod}): | |
| 130 | + import importlib | |
| 131 | + | |
| 132 | + import navegador.llm as llm_mod | |
| 133 | + | |
| 134 | + importlib.reload(llm_mod) | |
| 135 | + p = llm_mod.AnthropicProvider() | |
| 136 | + result = p.complete("say hello") | |
| 137 | + assert result == "hello from anthropic" | |
| 138 | + client.messages.create.assert_called_once() | |
| 139 | + | |
| 140 | + def test_complete_passes_max_tokens(self): | |
| 141 | + fake_mod, client = _fake_anthropic_module() | |
| 142 | + with patch.dict(sys.modules, {"anthropic": fake_mod}): | |
| 143 | + import importlib | |
| 144 | + | |
| 145 | + import navegador.llm as llm_mod | |
| 146 | + | |
| 147 | + importlib.reload(llm_mod) | |
| 148 | + p = llm_mod.AnthropicProvider() | |
| 149 | + p.complete("hi", max_tokens=512) | |
| 150 | + _, kwargs = client.messages.create.call_args | |
| 151 | + assert kwargs["max_tokens"] == 512 | |
| 152 | + | |
| 153 | + def test_embed_raises_not_implemented(self): | |
| 154 | + fake_mod, _ = _fake_anthropic_module() | |
| 155 | + with patch.dict(sys.modules, {"anthropic": fake_mod}): | |
| 156 | + import importlib | |
| 157 | + | |
| 158 | + import navegador.llm as llm_mod | |
| 159 | + | |
| 160 | + importlib.reload(llm_mod) | |
| 161 | + p = llm_mod.AnthropicProvider() | |
| 162 | + with pytest.raises(NotImplementedError): | |
| 163 | + p.embed("text") | |
| 164 | + | |
| 165 | + | |
| 166 | +# ── OpenAIProvider ──────────────────────────────────────────────────────────── | |
| 167 | + | |
| 168 | + | |
| 169 | +class TestOpenAIProvider: | |
| 170 | + def test_raises_import_error_when_sdk_missing(self): | |
| 171 | + with _block_import("openai"): | |
| 172 | + import importlib | |
| 173 | + | |
| 174 | + import navegador.llm as llm_mod | |
| 175 | + | |
| 176 | + importlib.reload(llm_mod) | |
| 177 | + with pytest.raises(ImportError, match="pip install openai"): | |
| 178 | + llm_mod.OpenAIProvider() | |
| 179 | + | |
| 180 | + def test_name_is_openai(self): | |
| 181 | + fake_mod, _ = _fake_openai_module() | |
| 182 | + with patch.dict(sys.modules, {"openai": fake_mod}): | |
| 183 | + import importlib | |
| 184 | + | |
| 185 | + import navegador.llm as llm_mod | |
| 186 | + | |
| 187 | + importlib.reload(llm_mod) | |
| 188 | + p = llm_mod.OpenAIProvider() | |
| 189 | + assert p.name == "openai" | |
| 190 | + | |
| 191 | + def test_default_model(self): | |
| 192 | + fake_mod, _ = _fake_openai_module() | |
| 193 | + with patch.dict(sys.modules, {"openai": fake_mod}): | |
| 194 | + import importlib | |
| 195 | + | |
| 196 | + import navegador.llm as llm_mod | |
| 197 | + | |
| 198 | + importlib.reload(llm_mod) | |
| 199 | + p = llm_mod.OpenAIProvider() | |
| 200 | + assert p.model == "gpt-4o-mini" | |
| 201 | + | |
| 202 | + def test_custom_model(self): | |
| 203 | + fake_mod, _ = _fake_openai_module() | |
| 204 | + with patch.dict(sys.modules, {"openai": fake_mod}): | |
| 205 | + import importlib | |
| 206 | + | |
| 207 | + import navegador.llm as llm_mod | |
| 208 | + | |
| 209 | + importlib.reload(llm_mod) | |
| 210 | + p = llm_mod.OpenAIProvider(model="gpt-4o") | |
| 211 | + assert p.model == "gpt-4o" | |
| 212 | + | |
| 213 | + def test_complete_returns_text(self): | |
| 214 | + fake_mod, client = _fake_openai_module() | |
| 215 | + with patch.dict(sys.modules, {"openai": fake_mod}): | |
| 216 | + import importlib | |
| 217 | + | |
| 218 | + import navegador.llm as llm_mod | |
| 219 | + | |
| 220 | + importlib.reload(llm_mod) | |
| 221 | + p = llm_mod.OpenAIProvider() | |
| 222 | + result = p.complete("say hello") | |
| 223 | + assert result == "hello from openai" | |
| 224 | + client.chat.completions.create.assert_called_once() | |
| 225 | + | |
| 226 | + def test_embed_returns_list_of_floats(self): | |
| 227 | + fake_mod, client = _fake_openai_module() | |
| 228 | + with patch.dict(sys.modules, {"openai": fake_mod}): | |
| 229 | + import importlib | |
| 230 | + | |
| 231 | + import navegador.llm as llm_mod | |
| 232 | + | |
| 233 | + importlib.reload(llm_mod) | |
| 234 | + p = llm_mod.OpenAIProvider() | |
| 235 | + result = p.embed("hello world") | |
| 236 | + assert result == [0.1, 0.2, 0.3] | |
| 237 | + client.embeddings.create.assert_called_once() | |
| 238 | + | |
| 239 | + | |
| 240 | +# ── OllamaProvider ──────────────────────────────────────────────────────────── | |
| 241 | + | |
| 242 | + | |
| 243 | +class TestOllamaProvider: | |
| 244 | + def test_raises_import_error_when_sdk_missing(self): | |
| 245 | + with _block_import("ollama"): | |
| 246 | + import importlib | |
| 247 | + | |
| 248 | + import navegador.llm as llm_mod | |
| 249 | + | |
| 250 | + importlib.reload(llm_mod) | |
| 251 | + with pytest.raises(ImportError, match="pip install ollama"): | |
| 252 | + llm_mod.OllamaProvider() | |
| 253 | + | |
| 254 | + def test_name_is_ollama(self): | |
| 255 | + fake_mod, _ = _fake_ollama_module() | |
| 256 | + with patch.dict(sys.modules, {"ollama": fake_mod}): | |
| 257 | + import importlib | |
| 258 | + | |
| 259 | + import navegador.llm as llm_mod | |
| 260 | + | |
| 261 | + importlib.reload(llm_mod) | |
| 262 | + p = llm_mod.OllamaProvider() | |
| 263 | + assert p.name == "ollama" | |
| 264 | + | |
| 265 | + def test_default_model(self): | |
| 266 | + fake_mod, _ = _fake_ollama_module() | |
| 267 | + with patch.dict(sys.modules, {"ollama": fake_mod}): | |
| 268 | + import importlib | |
| 269 | + | |
| 270 | + import navegador.llm as llm_mod | |
| 271 | + | |
| 272 | + importlib.reload(llm_mod) | |
| 273 | + p = llm_mod.OllamaProvider() | |
| 274 | + assert p.model == "llama3.2" | |
| 275 | + | |
| 276 | + def test_custom_model(self): | |
| 277 | + fake_mod, _ = _fake_ollama_module() | |
| 278 | + with patch.dict(sys.modules, {"ollama": fake_mod}): | |
| 279 | + import importlib | |
| 280 | + | |
| 281 | + import navegador.llm as llm_mod | |
| 282 | + | |
| 283 | + importlib.reload(llm_mod) | |
| 284 | + p = llm_mod.OllamaProvider(model="mistral") | |
| 285 | + assert p.model == "mistral" | |
| 286 | + | |
| 287 | + def test_complete_returns_text(self): | |
| 288 | + fake_mod, client = _fake_ollama_module() | |
| 289 | + with patch.dict(sys.modules, {"ollama": fake_mod}): | |
| 290 | + import importlib | |
| 291 | + | |
| 292 | + import navegador.llm as llm_mod | |
| 293 | + | |
| 294 | + importlib.reload(llm_mod) | |
| 295 | + p = llm_mod.OllamaProvider() | |
| 296 | + result = p.complete("say hello") | |
| 297 | + assert result == "hello from ollama" | |
| 298 | + client.chat.assert_called_once() | |
| 299 | + | |
| 300 | + def test_embed_returns_list_of_floats(self): | |
| 301 | + fake_mod, client = _fake_ollama_module() | |
| 302 | + with patch.dict(sys.modules, {"ollama": fake_mod}): | |
| 303 | + import importlib | |
| 304 | + | |
| 305 | + import navegador.llm as llm_mod | |
| 306 | + | |
| 307 | + importlib.reload(llm_mod) | |
| 308 | + p = llm_mod.OllamaProvider() | |
| 309 | + result = p.embed("hello world") | |
| 310 | + assert result == [0.4, 0.5, 0.6] | |
| 311 | + client.embeddings.assert_called_once() | |
| 312 | + | |
| 313 | + | |
| 314 | +# ── discover_providers ──────────────────────────────────────────────────────── | |
| 315 | + | |
| 316 | + | |
| 317 | +class TestDiscoverProviders: | |
| 318 | + def _reload(self, modules: dict): | |
| 319 | + import importlib | |
| 320 | + | |
| 321 | + import navegador.llm as llm_mod | |
| 322 | + | |
| 323 | + importlib.reload(llm_mod) | |
| 324 | + return llm_mod | |
| 325 | + | |
| 326 | + def test_all_available(self): | |
| 327 | + fake_a, _ = _fake_anthropic_module() | |
| 328 | + fake_o, _ = _fake_openai_module() | |
| 329 | + fake_ol, _ = _fake_ollama_module() | |
| 330 | + with patch.dict( | |
| 331 | + sys.modules, | |
| 332 | + {"anthropic": fake_a, "openai": fake_o, "ollama": fake_ol}, | |
| 333 | + ): | |
| 334 | + llm_mod = self._reload({}) | |
| 335 | + result = llm_mod.discover_providers() | |
| 336 | + assert result == ["anthropic", "openai", "ollama"] | |
| 337 | + | |
| 338 | + def test_only_openai_available(self): | |
| 339 | + fake_o, _ = _fake_openai_module() | |
| 340 | + with ( | |
| 341 | + _block_import("anthropic"), | |
| 342 | + patch.dict(sys.modules, {"openai": fake_o}), | |
| 343 | + _block_import("ollama"), | |
| 344 | + ): | |
| 345 | + llm_mod = self._reload({}) | |
| 346 | + result = llm_mod.discover_providers() | |
| 347 | + assert result == ["openai"] | |
| 348 | + | |
| 349 | + def test_none_available(self): | |
| 350 | + with _block_import("anthropic"), _block_import("openai"), _block_import("ollama"): | |
| 351 | + llm_mod = self._reload({}) | |
| 352 | + result = llm_mod.discover_providers() | |
| 353 | + assert result == [] | |
| 354 | + | |
| 355 | + def test_preserves_priority_order(self): | |
| 356 | + fake_a, _ = _fake_anthropic_module() | |
| 357 | + fake_ol, _ = _fake_ollama_module() | |
| 358 | + with ( | |
| 359 | + patch.dict(sys.modules, {"anthropic": fake_a, "ollama": fake_ol}), | |
| 360 | + _block_import("openai"), | |
| 361 | + ): | |
| 362 | + llm_mod = self._reload({}) | |
| 363 | + result = llm_mod.discover_providers() | |
| 364 | + assert result == ["anthropic", "ollama"] | |
| 365 | + | |
| 366 | + | |
| 367 | +# ── get_provider ────────────────────────────────────────────────────────────── | |
| 368 | + | |
| 369 | + | |
| 370 | +class TestGetProvider: | |
| 371 | + def _reload(self): | |
| 372 | + import importlib | |
| 373 | + | |
| 374 | + import navegador.llm as llm_mod | |
| 375 | + | |
| 376 | + importlib.reload(llm_mod) | |
| 377 | + return llm_mod | |
| 378 | + | |
| 379 | + def test_returns_anthropic_provider(self): | |
| 380 | + fake_mod, _ = _fake_anthropic_module() | |
| 381 | + with patch.dict(sys.modules, {"anthropic": fake_mod}): | |
| 382 | + llm_mod = self._reload() | |
| 383 | + p = llm_mod.get_provider("anthropic") | |
| 384 | + assert p.name == "anthropic" | |
| 385 | + | |
| 386 | + def test_returns_openai_provider(self): | |
| 387 | + fake_mod, _ = _fake_openai_module() | |
| 388 | + with patch.dict(sys.modules, {"openai": fake_mod}): | |
| 389 | + llm_mod = self._reload() | |
| 390 | + p = llm_mod.get_provider("openai") | |
| 391 | + assert p.name == "openai" | |
| 392 | + | |
| 393 | + def test_returns_ollama_provider(self): | |
| 394 | + fake_mod, _ = _fake_ollama_module() | |
| 395 | + with patch.dict(sys.modules, {"ollama": fake_mod}): | |
| 396 | + llm_mod = self._reload() | |
| 397 | + p = llm_mod.get_provider("ollama") | |
| 398 | + assert p.name == "ollama" | |
| 399 | + | |
| 400 | + def test_passes_model_argument(self): | |
| 401 | + fake_mod, _ = _fake_anthropic_module() | |
| 402 | + with patch.dict(sys.modules, {"anthropic": fake_mod}): | |
| 403 | + llm_mod = self._reload() | |
| 404 | + p = llm_mod.get_provider("anthropic", model="claude-opus-4") | |
| 405 | + assert p.model == "claude-opus-4" | |
| 406 | + | |
| 407 | + def test_unknown_provider_raises_value_error(self): | |
| 408 | + import importlib | |
| 409 | + | |
| 410 | + import navegador.llm as llm_mod | |
| 411 | + | |
| 412 | + importlib.reload(llm_mod) | |
| 413 | + with pytest.raises(ValueError, match="Unknown LLM provider"): | |
| 414 | + llm_mod.get_provider("grok") | |
| 415 | + | |
| 416 | + def test_unknown_provider_message_includes_valid_options(self): | |
| 417 | + import importlib | |
| 418 | + | |
| 419 | + import navegador.llm as llm_mod | |
| 420 | + | |
| 421 | + importlib.reload(llm_mod) | |
| 422 | + with pytest.raises(ValueError, match="anthropic"): | |
| 423 | + llm_mod.get_provider("nonexistent") | |
| 424 | + | |
| 425 | + | |
| 426 | +# ── auto_provider ───────────────────────────────────────────────────────────── | |
| 427 | + | |
| 428 | + | |
| 429 | +class TestAutoProvider: | |
| 430 | + def _reload(self): | |
| 431 | + import importlib | |
| 432 | + | |
| 433 | + import navegador.llm as llm_mod | |
| 434 | + | |
| 435 | + importlib.reload(llm_mod) | |
| 436 | + return llm_mod | |
| 437 | + | |
| 438 | + def test_prefers_anthropic_when_all_available(self): | |
| 439 | + fake_a, _ = _fake_anthropic_module() | |
| 440 | + fake_o, _ = _fake_openai_module() | |
| 441 | + fake_ol, _ = _fake_ollama_module() | |
| 442 | + with patch.dict( | |
| 443 | + sys.modules, | |
| 444 | + {"anthropic": fake_a, "openai": fake_o, "ollama": fake_ol}, | |
| 445 | + ): | |
| 446 | + llm_mod = self._reload() | |
| 447 | + p = llm_mod.auto_provider() | |
| 448 | + assert p.name == "anthropic" | |
| 449 | + | |
| 450 | + def test_falls_back_to_openai_when_anthropic_missing(self): | |
| 451 | + fake_o, _ = _fake_openai_module() | |
| 452 | + fake_ol, _ = _fake_ollama_module() | |
| 453 | + with ( | |
| 454 | + _block_import("anthropic"), | |
| 455 | + patch.dict(sys.modules, {"openai": fake_o, "ollama": fake_ol}), | |
| 456 | + ): | |
| 457 | + llm_mod = self._reload() | |
| 458 | + p = llm_mod.auto_provider() | |
| 459 | + assert p.name == "openai" | |
| 460 | + | |
| 461 | + def test_falls_back_to_ollama_when_anthropic_and_openai_missing(self): | |
| 462 | + fake_ol, _ = _fake_ollama_module() | |
| 463 | + with ( | |
| 464 | + _block_import("anthropic"), | |
| 465 | + _block_import("openai"), | |
| 466 | + patch.dict(sys.modules, {"ollama": fake_ol}), | |
| 467 | + ): | |
| 468 | + llm_mod = self._reload() | |
| 469 | + p = llm_mod.auto_provider() | |
| 470 | + assert p.name == "ollama" | |
| 471 | + | |
| 472 | + def test_raises_runtime_error_when_no_sdk_available(self): | |
| 473 | + with _block_import("anthropic"), _block_import("openai"), _block_import("ollama"): | |
| 474 | + llm_mod = self._reload() | |
| 475 | + with pytest.raises(RuntimeError, match="No LLM SDK is installed"): | |
| 476 | + llm_mod.auto_provider() | |
| 477 | + | |
| 478 | + def test_runtime_error_message_includes_install_hints(self): | |
| 479 | + with _block_import("anthropic"), _block_import("openai"), _block_import("ollama"): | |
| 480 | + llm_mod = self._reload() | |
| 481 | + with pytest.raises(RuntimeError, match="pip install"): | |
| 482 | + llm_mod.auto_provider() | |
| 483 | + | |
| 484 | + def test_passes_model_to_provider(self): | |
| 485 | + fake_a, _ = _fake_anthropic_module() | |
| 486 | + with patch.dict(sys.modules, {"anthropic": fake_a}): | |
| 487 | + llm_mod = self._reload() | |
| 488 | + p = llm_mod.auto_provider(model="claude-opus-4") | |
| 489 | + assert p.model == "claude-opus-4" |
| --- a/tests/test_llm.py | |
| +++ b/tests/test_llm.py | |
| @@ -0,0 +1,489 @@ | |
| --- a/tests/test_llm.py | |
| +++ b/tests/test_llm.py | |
| @@ -0,0 +1,489 @@ | |
| 1 | """ |
| 2 | Tests for navegador/llm.py — LLM backend abstraction. |
| 3 | |
| 4 | All tests are fully offline. SDK imports are patched to avoid requiring |
| 5 | any LLM SDK to be installed in the test environment. |
| 6 | """ |
| 7 | |
| 8 | from __future__ import annotations |
| 9 | |
| 10 | import sys |
| 11 | from unittest.mock import MagicMock, patch |
| 12 | |
| 13 | import pytest |
| 14 | |
| 15 | |
| 16 | # ── Helpers ─────────────────────────────────────────────────────────────────── |
| 17 | |
| 18 | |
| 19 | def _block_import(name: str): |
| 20 | """ |
| 21 | Context manager that makes ``import <name>`` raise ImportError for the |
| 22 | duration of the block, even if the package is installed. |
| 23 | """ |
| 24 | |
| 25 | class _Blocker: |
| 26 | def __enter__(self): |
| 27 | self._original = sys.modules.get(name, None) |
| 28 | sys.modules[name] = None # type: ignore[assignment] |
| 29 | return self |
| 30 | |
| 31 | def __exit__(self, *_): |
| 32 | if self._original is None: |
| 33 | sys.modules.pop(name, None) |
| 34 | else: |
| 35 | sys.modules[name] = self._original |
| 36 | |
| 37 | return _Blocker() |
| 38 | |
| 39 | |
| 40 | def _fake_anthropic_module(): |
| 41 | """Return a minimal mock that satisfies AnthropicProvider's usage.""" |
| 42 | mod = MagicMock() |
| 43 | client = MagicMock() |
| 44 | message = MagicMock() |
| 45 | message.content = [MagicMock(text="hello from anthropic")] |
| 46 | client.messages.create.return_value = message |
| 47 | mod.Anthropic.return_value = client |
| 48 | return mod, client |
| 49 | |
| 50 | |
| 51 | def _fake_openai_module(): |
| 52 | """Return a minimal mock that satisfies OpenAIProvider's usage.""" |
| 53 | mod = MagicMock() |
| 54 | client = MagicMock() |
| 55 | choice = MagicMock() |
| 56 | choice.message.content = "hello from openai" |
| 57 | response = MagicMock() |
| 58 | response.choices = [choice] |
| 59 | client.chat.completions.create.return_value = response |
| 60 | embed_data = MagicMock() |
| 61 | embed_data.embedding = [0.1, 0.2, 0.3] |
| 62 | embed_response = MagicMock() |
| 63 | embed_response.data = [embed_data] |
| 64 | client.embeddings.create.return_value = embed_response |
| 65 | mod.OpenAI.return_value = client |
| 66 | return mod, client |
| 67 | |
| 68 | |
| 69 | def _fake_ollama_module(): |
| 70 | """Return a minimal mock that satisfies OllamaProvider's usage.""" |
| 71 | mod = MagicMock() |
| 72 | client = MagicMock() |
| 73 | client.chat.return_value = {"message": {"content": "hello from ollama"}} |
| 74 | client.embeddings.return_value = {"embedding": [0.4, 0.5, 0.6]} |
| 75 | mod.Client.return_value = client |
| 76 | return mod, client |
| 77 | |
| 78 | |
| 79 | # ── AnthropicProvider ───────────────────────────────────────────────────────── |
| 80 | |
| 81 | |
| 82 | class TestAnthropicProvider: |
| 83 | def test_raises_import_error_when_sdk_missing(self): |
| 84 | with _block_import("anthropic"): |
| 85 | # Remove cached module from navegador.llm so the guard re-runs |
| 86 | import importlib |
| 87 | |
| 88 | import navegador.llm as llm_mod |
| 89 | |
| 90 | importlib.reload(llm_mod) |
| 91 | with pytest.raises(ImportError, match="pip install anthropic"): |
| 92 | llm_mod.AnthropicProvider() |
| 93 | |
| 94 | def test_name_is_anthropic(self): |
| 95 | fake_mod, _ = _fake_anthropic_module() |
| 96 | with patch.dict(sys.modules, {"anthropic": fake_mod}): |
| 97 | import importlib |
| 98 | |
| 99 | import navegador.llm as llm_mod |
| 100 | |
| 101 | importlib.reload(llm_mod) |
| 102 | p = llm_mod.AnthropicProvider() |
| 103 | assert p.name == "anthropic" |
| 104 | |
| 105 | def test_default_model(self): |
| 106 | fake_mod, _ = _fake_anthropic_module() |
| 107 | with patch.dict(sys.modules, {"anthropic": fake_mod}): |
| 108 | import importlib |
| 109 | |
| 110 | import navegador.llm as llm_mod |
| 111 | |
| 112 | importlib.reload(llm_mod) |
| 113 | p = llm_mod.AnthropicProvider() |
| 114 | assert p.model == "claude-3-5-haiku-20241022" |
| 115 | |
| 116 | def test_custom_model(self): |
| 117 | fake_mod, _ = _fake_anthropic_module() |
| 118 | with patch.dict(sys.modules, {"anthropic": fake_mod}): |
| 119 | import importlib |
| 120 | |
| 121 | import navegador.llm as llm_mod |
| 122 | |
| 123 | importlib.reload(llm_mod) |
| 124 | p = llm_mod.AnthropicProvider(model="claude-opus-4") |
| 125 | assert p.model == "claude-opus-4" |
| 126 | |
| 127 | def test_complete_returns_text(self): |
| 128 | fake_mod, client = _fake_anthropic_module() |
| 129 | with patch.dict(sys.modules, {"anthropic": fake_mod}): |
| 130 | import importlib |
| 131 | |
| 132 | import navegador.llm as llm_mod |
| 133 | |
| 134 | importlib.reload(llm_mod) |
| 135 | p = llm_mod.AnthropicProvider() |
| 136 | result = p.complete("say hello") |
| 137 | assert result == "hello from anthropic" |
| 138 | client.messages.create.assert_called_once() |
| 139 | |
| 140 | def test_complete_passes_max_tokens(self): |
| 141 | fake_mod, client = _fake_anthropic_module() |
| 142 | with patch.dict(sys.modules, {"anthropic": fake_mod}): |
| 143 | import importlib |
| 144 | |
| 145 | import navegador.llm as llm_mod |
| 146 | |
| 147 | importlib.reload(llm_mod) |
| 148 | p = llm_mod.AnthropicProvider() |
| 149 | p.complete("hi", max_tokens=512) |
| 150 | _, kwargs = client.messages.create.call_args |
| 151 | assert kwargs["max_tokens"] == 512 |
| 152 | |
| 153 | def test_embed_raises_not_implemented(self): |
| 154 | fake_mod, _ = _fake_anthropic_module() |
| 155 | with patch.dict(sys.modules, {"anthropic": fake_mod}): |
| 156 | import importlib |
| 157 | |
| 158 | import navegador.llm as llm_mod |
| 159 | |
| 160 | importlib.reload(llm_mod) |
| 161 | p = llm_mod.AnthropicProvider() |
| 162 | with pytest.raises(NotImplementedError): |
| 163 | p.embed("text") |
| 164 | |
| 165 | |
| 166 | # ── OpenAIProvider ──────────────────────────────────────────────────────────── |
| 167 | |
| 168 | |
| 169 | class TestOpenAIProvider: |
| 170 | def test_raises_import_error_when_sdk_missing(self): |
| 171 | with _block_import("openai"): |
| 172 | import importlib |
| 173 | |
| 174 | import navegador.llm as llm_mod |
| 175 | |
| 176 | importlib.reload(llm_mod) |
| 177 | with pytest.raises(ImportError, match="pip install openai"): |
| 178 | llm_mod.OpenAIProvider() |
| 179 | |
| 180 | def test_name_is_openai(self): |
| 181 | fake_mod, _ = _fake_openai_module() |
| 182 | with patch.dict(sys.modules, {"openai": fake_mod}): |
| 183 | import importlib |
| 184 | |
| 185 | import navegador.llm as llm_mod |
| 186 | |
| 187 | importlib.reload(llm_mod) |
| 188 | p = llm_mod.OpenAIProvider() |
| 189 | assert p.name == "openai" |
| 190 | |
| 191 | def test_default_model(self): |
| 192 | fake_mod, _ = _fake_openai_module() |
| 193 | with patch.dict(sys.modules, {"openai": fake_mod}): |
| 194 | import importlib |
| 195 | |
| 196 | import navegador.llm as llm_mod |
| 197 | |
| 198 | importlib.reload(llm_mod) |
| 199 | p = llm_mod.OpenAIProvider() |
| 200 | assert p.model == "gpt-4o-mini" |
| 201 | |
| 202 | def test_custom_model(self): |
| 203 | fake_mod, _ = _fake_openai_module() |
| 204 | with patch.dict(sys.modules, {"openai": fake_mod}): |
| 205 | import importlib |
| 206 | |
| 207 | import navegador.llm as llm_mod |
| 208 | |
| 209 | importlib.reload(llm_mod) |
| 210 | p = llm_mod.OpenAIProvider(model="gpt-4o") |
| 211 | assert p.model == "gpt-4o" |
| 212 | |
| 213 | def test_complete_returns_text(self): |
| 214 | fake_mod, client = _fake_openai_module() |
| 215 | with patch.dict(sys.modules, {"openai": fake_mod}): |
| 216 | import importlib |
| 217 | |
| 218 | import navegador.llm as llm_mod |
| 219 | |
| 220 | importlib.reload(llm_mod) |
| 221 | p = llm_mod.OpenAIProvider() |
| 222 | result = p.complete("say hello") |
| 223 | assert result == "hello from openai" |
| 224 | client.chat.completions.create.assert_called_once() |
| 225 | |
| 226 | def test_embed_returns_list_of_floats(self): |
| 227 | fake_mod, client = _fake_openai_module() |
| 228 | with patch.dict(sys.modules, {"openai": fake_mod}): |
| 229 | import importlib |
| 230 | |
| 231 | import navegador.llm as llm_mod |
| 232 | |
| 233 | importlib.reload(llm_mod) |
| 234 | p = llm_mod.OpenAIProvider() |
| 235 | result = p.embed("hello world") |
| 236 | assert result == [0.1, 0.2, 0.3] |
| 237 | client.embeddings.create.assert_called_once() |
| 238 | |
| 239 | |
| 240 | # ── OllamaProvider ──────────────────────────────────────────────────────────── |
| 241 | |
| 242 | |
| 243 | class TestOllamaProvider: |
| 244 | def test_raises_import_error_when_sdk_missing(self): |
| 245 | with _block_import("ollama"): |
| 246 | import importlib |
| 247 | |
| 248 | import navegador.llm as llm_mod |
| 249 | |
| 250 | importlib.reload(llm_mod) |
| 251 | with pytest.raises(ImportError, match="pip install ollama"): |
| 252 | llm_mod.OllamaProvider() |
| 253 | |
| 254 | def test_name_is_ollama(self): |
| 255 | fake_mod, _ = _fake_ollama_module() |
| 256 | with patch.dict(sys.modules, {"ollama": fake_mod}): |
| 257 | import importlib |
| 258 | |
| 259 | import navegador.llm as llm_mod |
| 260 | |
| 261 | importlib.reload(llm_mod) |
| 262 | p = llm_mod.OllamaProvider() |
| 263 | assert p.name == "ollama" |
| 264 | |
| 265 | def test_default_model(self): |
| 266 | fake_mod, _ = _fake_ollama_module() |
| 267 | with patch.dict(sys.modules, {"ollama": fake_mod}): |
| 268 | import importlib |
| 269 | |
| 270 | import navegador.llm as llm_mod |
| 271 | |
| 272 | importlib.reload(llm_mod) |
| 273 | p = llm_mod.OllamaProvider() |
| 274 | assert p.model == "llama3.2" |
| 275 | |
| 276 | def test_custom_model(self): |
| 277 | fake_mod, _ = _fake_ollama_module() |
| 278 | with patch.dict(sys.modules, {"ollama": fake_mod}): |
| 279 | import importlib |
| 280 | |
| 281 | import navegador.llm as llm_mod |
| 282 | |
| 283 | importlib.reload(llm_mod) |
| 284 | p = llm_mod.OllamaProvider(model="mistral") |
| 285 | assert p.model == "mistral" |
| 286 | |
| 287 | def test_complete_returns_text(self): |
| 288 | fake_mod, client = _fake_ollama_module() |
| 289 | with patch.dict(sys.modules, {"ollama": fake_mod}): |
| 290 | import importlib |
| 291 | |
| 292 | import navegador.llm as llm_mod |
| 293 | |
| 294 | importlib.reload(llm_mod) |
| 295 | p = llm_mod.OllamaProvider() |
| 296 | result = p.complete("say hello") |
| 297 | assert result == "hello from ollama" |
| 298 | client.chat.assert_called_once() |
| 299 | |
| 300 | def test_embed_returns_list_of_floats(self): |
| 301 | fake_mod, client = _fake_ollama_module() |
| 302 | with patch.dict(sys.modules, {"ollama": fake_mod}): |
| 303 | import importlib |
| 304 | |
| 305 | import navegador.llm as llm_mod |
| 306 | |
| 307 | importlib.reload(llm_mod) |
| 308 | p = llm_mod.OllamaProvider() |
| 309 | result = p.embed("hello world") |
| 310 | assert result == [0.4, 0.5, 0.6] |
| 311 | client.embeddings.assert_called_once() |
| 312 | |
| 313 | |
| 314 | # ── discover_providers ──────────────────────────────────────────────────────── |
| 315 | |
| 316 | |
| 317 | class TestDiscoverProviders: |
| 318 | def _reload(self, modules: dict): |
| 319 | import importlib |
| 320 | |
| 321 | import navegador.llm as llm_mod |
| 322 | |
| 323 | importlib.reload(llm_mod) |
| 324 | return llm_mod |
| 325 | |
| 326 | def test_all_available(self): |
| 327 | fake_a, _ = _fake_anthropic_module() |
| 328 | fake_o, _ = _fake_openai_module() |
| 329 | fake_ol, _ = _fake_ollama_module() |
| 330 | with patch.dict( |
| 331 | sys.modules, |
| 332 | {"anthropic": fake_a, "openai": fake_o, "ollama": fake_ol}, |
| 333 | ): |
| 334 | llm_mod = self._reload({}) |
| 335 | result = llm_mod.discover_providers() |
| 336 | assert result == ["anthropic", "openai", "ollama"] |
| 337 | |
| 338 | def test_only_openai_available(self): |
| 339 | fake_o, _ = _fake_openai_module() |
| 340 | with ( |
| 341 | _block_import("anthropic"), |
| 342 | patch.dict(sys.modules, {"openai": fake_o}), |
| 343 | _block_import("ollama"), |
| 344 | ): |
| 345 | llm_mod = self._reload({}) |
| 346 | result = llm_mod.discover_providers() |
| 347 | assert result == ["openai"] |
| 348 | |
| 349 | def test_none_available(self): |
| 350 | with _block_import("anthropic"), _block_import("openai"), _block_import("ollama"): |
| 351 | llm_mod = self._reload({}) |
| 352 | result = llm_mod.discover_providers() |
| 353 | assert result == [] |
| 354 | |
| 355 | def test_preserves_priority_order(self): |
| 356 | fake_a, _ = _fake_anthropic_module() |
| 357 | fake_ol, _ = _fake_ollama_module() |
| 358 | with ( |
| 359 | patch.dict(sys.modules, {"anthropic": fake_a, "ollama": fake_ol}), |
| 360 | _block_import("openai"), |
| 361 | ): |
| 362 | llm_mod = self._reload({}) |
| 363 | result = llm_mod.discover_providers() |
| 364 | assert result == ["anthropic", "ollama"] |
| 365 | |
| 366 | |
| 367 | # ── get_provider ────────────────────────────────────────────────────────────── |
| 368 | |
| 369 | |
| 370 | class TestGetProvider: |
| 371 | def _reload(self): |
| 372 | import importlib |
| 373 | |
| 374 | import navegador.llm as llm_mod |
| 375 | |
| 376 | importlib.reload(llm_mod) |
| 377 | return llm_mod |
| 378 | |
| 379 | def test_returns_anthropic_provider(self): |
| 380 | fake_mod, _ = _fake_anthropic_module() |
| 381 | with patch.dict(sys.modules, {"anthropic": fake_mod}): |
| 382 | llm_mod = self._reload() |
| 383 | p = llm_mod.get_provider("anthropic") |
| 384 | assert p.name == "anthropic" |
| 385 | |
| 386 | def test_returns_openai_provider(self): |
| 387 | fake_mod, _ = _fake_openai_module() |
| 388 | with patch.dict(sys.modules, {"openai": fake_mod}): |
| 389 | llm_mod = self._reload() |
| 390 | p = llm_mod.get_provider("openai") |
| 391 | assert p.name == "openai" |
| 392 | |
| 393 | def test_returns_ollama_provider(self): |
| 394 | fake_mod, _ = _fake_ollama_module() |
| 395 | with patch.dict(sys.modules, {"ollama": fake_mod}): |
| 396 | llm_mod = self._reload() |
| 397 | p = llm_mod.get_provider("ollama") |
| 398 | assert p.name == "ollama" |
| 399 | |
| 400 | def test_passes_model_argument(self): |
| 401 | fake_mod, _ = _fake_anthropic_module() |
| 402 | with patch.dict(sys.modules, {"anthropic": fake_mod}): |
| 403 | llm_mod = self._reload() |
| 404 | p = llm_mod.get_provider("anthropic", model="claude-opus-4") |
| 405 | assert p.model == "claude-opus-4" |
| 406 | |
| 407 | def test_unknown_provider_raises_value_error(self): |
| 408 | import importlib |
| 409 | |
| 410 | import navegador.llm as llm_mod |
| 411 | |
| 412 | importlib.reload(llm_mod) |
| 413 | with pytest.raises(ValueError, match="Unknown LLM provider"): |
| 414 | llm_mod.get_provider("grok") |
| 415 | |
| 416 | def test_unknown_provider_message_includes_valid_options(self): |
| 417 | import importlib |
| 418 | |
| 419 | import navegador.llm as llm_mod |
| 420 | |
| 421 | importlib.reload(llm_mod) |
| 422 | with pytest.raises(ValueError, match="anthropic"): |
| 423 | llm_mod.get_provider("nonexistent") |
| 424 | |
| 425 | |
| 426 | # ── auto_provider ───────────────────────────────────────────────────────────── |
| 427 | |
| 428 | |
| 429 | class TestAutoProvider: |
| 430 | def _reload(self): |
| 431 | import importlib |
| 432 | |
| 433 | import navegador.llm as llm_mod |
| 434 | |
| 435 | importlib.reload(llm_mod) |
| 436 | return llm_mod |
| 437 | |
| 438 | def test_prefers_anthropic_when_all_available(self): |
| 439 | fake_a, _ = _fake_anthropic_module() |
| 440 | fake_o, _ = _fake_openai_module() |
| 441 | fake_ol, _ = _fake_ollama_module() |
| 442 | with patch.dict( |
| 443 | sys.modules, |
| 444 | {"anthropic": fake_a, "openai": fake_o, "ollama": fake_ol}, |
| 445 | ): |
| 446 | llm_mod = self._reload() |
| 447 | p = llm_mod.auto_provider() |
| 448 | assert p.name == "anthropic" |
| 449 | |
| 450 | def test_falls_back_to_openai_when_anthropic_missing(self): |
| 451 | fake_o, _ = _fake_openai_module() |
| 452 | fake_ol, _ = _fake_ollama_module() |
| 453 | with ( |
| 454 | _block_import("anthropic"), |
| 455 | patch.dict(sys.modules, {"openai": fake_o, "ollama": fake_ol}), |
| 456 | ): |
| 457 | llm_mod = self._reload() |
| 458 | p = llm_mod.auto_provider() |
| 459 | assert p.name == "openai" |
| 460 | |
| 461 | def test_falls_back_to_ollama_when_anthropic_and_openai_missing(self): |
| 462 | fake_ol, _ = _fake_ollama_module() |
| 463 | with ( |
| 464 | _block_import("anthropic"), |
| 465 | _block_import("openai"), |
| 466 | patch.dict(sys.modules, {"ollama": fake_ol}), |
| 467 | ): |
| 468 | llm_mod = self._reload() |
| 469 | p = llm_mod.auto_provider() |
| 470 | assert p.name == "ollama" |
| 471 | |
| 472 | def test_raises_runtime_error_when_no_sdk_available(self): |
| 473 | with _block_import("anthropic"), _block_import("openai"), _block_import("ollama"): |
| 474 | llm_mod = self._reload() |
| 475 | with pytest.raises(RuntimeError, match="No LLM SDK is installed"): |
| 476 | llm_mod.auto_provider() |
| 477 | |
| 478 | def test_runtime_error_message_includes_install_hints(self): |
| 479 | with _block_import("anthropic"), _block_import("openai"), _block_import("ollama"): |
| 480 | llm_mod = self._reload() |
| 481 | with pytest.raises(RuntimeError, match="pip install"): |
| 482 | llm_mod.auto_provider() |
| 483 | |
| 484 | def test_passes_model_to_provider(self): |
| 485 | fake_a, _ = _fake_anthropic_module() |
| 486 | with patch.dict(sys.modules, {"anthropic": fake_a}): |
| 487 | llm_mod = self._reload() |
| 488 | p = llm_mod.auto_provider(model="claude-opus-4") |
| 489 | assert p.model == "claude-opus-4" |