PlanOpticon
Fix Ollama provider handling None token counts in usage tracking
Commit
ab8aa49d396db8b7642c7f04cb4847dde7bd4dda181317fc559b230f10090552
Parent
a0146a58f34f111…
1 file changed
+8
-4
| --- video_processor/providers/ollama_provider.py | ||
| +++ video_processor/providers/ollama_provider.py | ||
| @@ -86,12 +86,14 @@ | ||
| 86 | 86 | messages=messages, |
| 87 | 87 | max_tokens=max_tokens, |
| 88 | 88 | temperature=temperature, |
| 89 | 89 | ) |
| 90 | 90 | self._last_usage = { |
| 91 | - "input_tokens": getattr(response.usage, "prompt_tokens", 0) if response.usage else 0, | |
| 92 | - "output_tokens": getattr(response.usage, "completion_tokens", 0) | |
| 91 | + "input_tokens": (getattr(response.usage, "prompt_tokens", 0) or 0) | |
| 92 | + if response.usage | |
| 93 | + else 0, | |
| 94 | + "output_tokens": (getattr(response.usage, "completion_tokens", 0) or 0) | |
| 93 | 95 | if response.usage |
| 94 | 96 | else 0, |
| 95 | 97 | } |
| 96 | 98 | return response.choices[0].message.content or "" |
| 97 | 99 | |
| @@ -123,12 +125,14 @@ | ||
| 123 | 125 | } |
| 124 | 126 | ], |
| 125 | 127 | max_tokens=max_tokens, |
| 126 | 128 | ) |
| 127 | 129 | self._last_usage = { |
| 128 | - "input_tokens": getattr(response.usage, "prompt_tokens", 0) if response.usage else 0, | |
| 129 | - "output_tokens": getattr(response.usage, "completion_tokens", 0) | |
| 130 | + "input_tokens": (getattr(response.usage, "prompt_tokens", 0) or 0) | |
| 131 | + if response.usage | |
| 132 | + else 0, | |
| 133 | + "output_tokens": (getattr(response.usage, "completion_tokens", 0) or 0) | |
| 130 | 134 | if response.usage |
| 131 | 135 | else 0, |
| 132 | 136 | } |
| 133 | 137 | return response.choices[0].message.content or "" |
| 134 | 138 | |
| 135 | 139 |
| --- video_processor/providers/ollama_provider.py | |
| +++ video_processor/providers/ollama_provider.py | |
| @@ -86,12 +86,14 @@ | |
| 86 | messages=messages, |
| 87 | max_tokens=max_tokens, |
| 88 | temperature=temperature, |
| 89 | ) |
| 90 | self._last_usage = { |
| 91 | "input_tokens": getattr(response.usage, "prompt_tokens", 0) if response.usage else 0, |
| 92 | "output_tokens": getattr(response.usage, "completion_tokens", 0) |
| 93 | if response.usage |
| 94 | else 0, |
| 95 | } |
| 96 | return response.choices[0].message.content or "" |
| 97 | |
| @@ -123,12 +125,14 @@ | |
| 123 | } |
| 124 | ], |
| 125 | max_tokens=max_tokens, |
| 126 | ) |
| 127 | self._last_usage = { |
| 128 | "input_tokens": getattr(response.usage, "prompt_tokens", 0) if response.usage else 0, |
| 129 | "output_tokens": getattr(response.usage, "completion_tokens", 0) |
| 130 | if response.usage |
| 131 | else 0, |
| 132 | } |
| 133 | return response.choices[0].message.content or "" |
| 134 | |
| 135 |
| --- video_processor/providers/ollama_provider.py | |
| +++ video_processor/providers/ollama_provider.py | |
| @@ -86,12 +86,14 @@ | |
| 86 | messages=messages, |
| 87 | max_tokens=max_tokens, |
| 88 | temperature=temperature, |
| 89 | ) |
| 90 | self._last_usage = { |
| 91 | "input_tokens": (getattr(response.usage, "prompt_tokens", 0) or 0) |
| 92 | if response.usage |
| 93 | else 0, |
| 94 | "output_tokens": (getattr(response.usage, "completion_tokens", 0) or 0) |
| 95 | if response.usage |
| 96 | else 0, |
| 97 | } |
| 98 | return response.choices[0].message.content or "" |
| 99 | |
| @@ -123,12 +125,14 @@ | |
| 125 | } |
| 126 | ], |
| 127 | max_tokens=max_tokens, |
| 128 | ) |
| 129 | self._last_usage = { |
| 130 | "input_tokens": (getattr(response.usage, "prompt_tokens", 0) or 0) |
| 131 | if response.usage |
| 132 | else 0, |
| 133 | "output_tokens": (getattr(response.usage, "completion_tokens", 0) or 0) |
| 134 | if response.usage |
| 135 | else 0, |
| 136 | } |
| 137 | return response.choices[0].message.content or "" |
| 138 | |
| 139 |