PlanOpticon

fix(anthropic): extract system messages into top-level parameter Anthropic Messages API requires system content as a top-level parameter, not as a message with role "system". Extracts system messages from the messages array before calling the API.

lmata 2026-03-07 23:41 trunk
Commit 6fcb9f3c95d3b6b5782fe323771f4526271c61c45dc2d41e34448ebb59fc13dd
--- video_processor/providers/anthropic_provider.py
+++ video_processor/providers/anthropic_provider.py
@@ -32,16 +32,30 @@
3232
max_tokens: int = 4096,
3333
temperature: float = 0.7,
3434
model: Optional[str] = None,
3535
) -> str:
3636
model = model or "claude-sonnet-4-5-20250929"
37
- response = self.client.messages.create(
38
- model=model,
39
- messages=messages,
40
- max_tokens=max_tokens,
41
- temperature=temperature,
42
- )
37
+
38
+ # Anthropic requires system messages as a top-level parameter
39
+ system_parts = []
40
+ chat_messages = []
41
+ for msg in messages:
42
+ if msg.get("role") == "system":
43
+ system_parts.append(msg["content"])
44
+ else:
45
+ chat_messages.append(msg)
46
+
47
+ kwargs = {
48
+ "model": model,
49
+ "messages": chat_messages,
50
+ "max_tokens": max_tokens,
51
+ "temperature": temperature,
52
+ }
53
+ if system_parts:
54
+ kwargs["system"] = "\n\n".join(system_parts)
55
+
56
+ response = self.client.messages.create(**kwargs)
4357
self._last_usage = {
4458
"input_tokens": getattr(response.usage, "input_tokens", 0),
4559
"output_tokens": getattr(response.usage, "output_tokens", 0),
4660
}
4761
return response.content[0].text
4862
--- video_processor/providers/anthropic_provider.py
+++ video_processor/providers/anthropic_provider.py
@@ -32,16 +32,30 @@
32 max_tokens: int = 4096,
33 temperature: float = 0.7,
34 model: Optional[str] = None,
35 ) -> str:
36 model = model or "claude-sonnet-4-5-20250929"
37 response = self.client.messages.create(
38 model=model,
39 messages=messages,
40 max_tokens=max_tokens,
41 temperature=temperature,
42 )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43 self._last_usage = {
44 "input_tokens": getattr(response.usage, "input_tokens", 0),
45 "output_tokens": getattr(response.usage, "output_tokens", 0),
46 }
47 return response.content[0].text
48
--- video_processor/providers/anthropic_provider.py
+++ video_processor/providers/anthropic_provider.py
@@ -32,16 +32,30 @@
32 max_tokens: int = 4096,
33 temperature: float = 0.7,
34 model: Optional[str] = None,
35 ) -> str:
36 model = model or "claude-sonnet-4-5-20250929"
37
38 # Anthropic requires system messages as a top-level parameter
39 system_parts = []
40 chat_messages = []
41 for msg in messages:
42 if msg.get("role") == "system":
43 system_parts.append(msg["content"])
44 else:
45 chat_messages.append(msg)
46
47 kwargs = {
48 "model": model,
49 "messages": chat_messages,
50 "max_tokens": max_tokens,
51 "temperature": temperature,
52 }
53 if system_parts:
54 kwargs["system"] = "\n\n".join(system_parts)
55
56 response = self.client.messages.create(**kwargs)
57 self._last_usage = {
58 "input_tokens": getattr(response.usage, "input_tokens", 0),
59 "output_tokens": getattr(response.usage, "output_tokens", 0),
60 }
61 return response.content[0].text
62

Keyboard Shortcuts

Open search /
Next entry (timeline) j
Previous entry (timeline) k
Open focused entry Enter
Show this help ?
Toggle theme Top nav button