PlanOpticon

Merge pull request #19 from ConflictHQ/fix-ollama-none-tokens Fix Ollama None token counts in usage tracking

noreply 2026-02-16 22:53 trunk merge
Commit fd032773d6bc2c1342e6baab115c9dd633090e0e9a4bf70d954547e7c988947d
--- video_processor/providers/ollama_provider.py
+++ video_processor/providers/ollama_provider.py
@@ -86,12 +86,14 @@
8686
messages=messages,
8787
max_tokens=max_tokens,
8888
temperature=temperature,
8989
)
9090
self._last_usage = {
91
- "input_tokens": getattr(response.usage, "prompt_tokens", 0) if response.usage else 0,
92
- "output_tokens": getattr(response.usage, "completion_tokens", 0)
91
+ "input_tokens": (getattr(response.usage, "prompt_tokens", 0) or 0)
92
+ if response.usage
93
+ else 0,
94
+ "output_tokens": (getattr(response.usage, "completion_tokens", 0) or 0)
9395
if response.usage
9496
else 0,
9597
}
9698
return response.choices[0].message.content or ""
9799
@@ -123,12 +125,14 @@
123125
}
124126
],
125127
max_tokens=max_tokens,
126128
)
127129
self._last_usage = {
128
- "input_tokens": getattr(response.usage, "prompt_tokens", 0) if response.usage else 0,
129
- "output_tokens": getattr(response.usage, "completion_tokens", 0)
130
+ "input_tokens": (getattr(response.usage, "prompt_tokens", 0) or 0)
131
+ if response.usage
132
+ else 0,
133
+ "output_tokens": (getattr(response.usage, "completion_tokens", 0) or 0)
130134
if response.usage
131135
else 0,
132136
}
133137
return response.choices[0].message.content or ""
134138
135139
--- video_processor/providers/ollama_provider.py
+++ video_processor/providers/ollama_provider.py
@@ -86,12 +86,14 @@
86 messages=messages,
87 max_tokens=max_tokens,
88 temperature=temperature,
89 )
90 self._last_usage = {
91 "input_tokens": getattr(response.usage, "prompt_tokens", 0) if response.usage else 0,
92 "output_tokens": getattr(response.usage, "completion_tokens", 0)
 
 
93 if response.usage
94 else 0,
95 }
96 return response.choices[0].message.content or ""
97
@@ -123,12 +125,14 @@
123 }
124 ],
125 max_tokens=max_tokens,
126 )
127 self._last_usage = {
128 "input_tokens": getattr(response.usage, "prompt_tokens", 0) if response.usage else 0,
129 "output_tokens": getattr(response.usage, "completion_tokens", 0)
 
 
130 if response.usage
131 else 0,
132 }
133 return response.choices[0].message.content or ""
134
135
--- video_processor/providers/ollama_provider.py
+++ video_processor/providers/ollama_provider.py
@@ -86,12 +86,14 @@
86 messages=messages,
87 max_tokens=max_tokens,
88 temperature=temperature,
89 )
90 self._last_usage = {
91 "input_tokens": (getattr(response.usage, "prompt_tokens", 0) or 0)
92 if response.usage
93 else 0,
94 "output_tokens": (getattr(response.usage, "completion_tokens", 0) or 0)
95 if response.usage
96 else 0,
97 }
98 return response.choices[0].message.content or ""
99
@@ -123,12 +125,14 @@
125 }
126 ],
127 max_tokens=max_tokens,
128 )
129 self._last_usage = {
130 "input_tokens": (getattr(response.usage, "prompt_tokens", 0) or 0)
131 if response.usage
132 else 0,
133 "output_tokens": (getattr(response.usage, "completion_tokens", 0) or 0)
134 if response.usage
135 else 0,
136 }
137 return response.choices[0].message.content or ""
138
139

Keyboard Shortcuts

Open search /
Next entry (timeline) j
Previous entry (timeline) k
Open focused entry Enter
Show this help ?
Toggle theme Top nav button