PlanOpticon
fix(anthropic): extract system messages into top-level parameter Anthropic Messages API requires system content as a top-level parameter, not as a message with role "system". Extracts system messages from the messages array before calling the API.
Commit
6fcb9f3c95d3b6b5782fe323771f4526271c61c45dc2d41e34448ebb59fc13dd
Parent
96f5e6e5f807c0f…
1 file changed
+20
-6
| --- video_processor/providers/anthropic_provider.py | ||
| +++ video_processor/providers/anthropic_provider.py | ||
| @@ -32,16 +32,30 @@ | ||
| 32 | 32 | max_tokens: int = 4096, |
| 33 | 33 | temperature: float = 0.7, |
| 34 | 34 | model: Optional[str] = None, |
| 35 | 35 | ) -> str: |
| 36 | 36 | model = model or "claude-sonnet-4-5-20250929" |
| 37 | - response = self.client.messages.create( | |
| 38 | - model=model, | |
| 39 | - messages=messages, | |
| 40 | - max_tokens=max_tokens, | |
| 41 | - temperature=temperature, | |
| 42 | - ) | |
| 37 | + | |
| 38 | + # Anthropic requires system messages as a top-level parameter | |
| 39 | + system_parts = [] | |
| 40 | + chat_messages = [] | |
| 41 | + for msg in messages: | |
| 42 | + if msg.get("role") == "system": | |
| 43 | + system_parts.append(msg["content"]) | |
| 44 | + else: | |
| 45 | + chat_messages.append(msg) | |
| 46 | + | |
| 47 | + kwargs = { | |
| 48 | + "model": model, | |
| 49 | + "messages": chat_messages, | |
| 50 | + "max_tokens": max_tokens, | |
| 51 | + "temperature": temperature, | |
| 52 | + } | |
| 53 | + if system_parts: | |
| 54 | + kwargs["system"] = "\n\n".join(system_parts) | |
| 55 | + | |
| 56 | + response = self.client.messages.create(**kwargs) | |
| 43 | 57 | self._last_usage = { |
| 44 | 58 | "input_tokens": getattr(response.usage, "input_tokens", 0), |
| 45 | 59 | "output_tokens": getattr(response.usage, "output_tokens", 0), |
| 46 | 60 | } |
| 47 | 61 | return response.content[0].text |
| 48 | 62 |
| --- video_processor/providers/anthropic_provider.py | |
| +++ video_processor/providers/anthropic_provider.py | |
| @@ -32,16 +32,30 @@ | |
| 32 | max_tokens: int = 4096, |
| 33 | temperature: float = 0.7, |
| 34 | model: Optional[str] = None, |
| 35 | ) -> str: |
| 36 | model = model or "claude-sonnet-4-5-20250929" |
| 37 | response = self.client.messages.create( |
| 38 | model=model, |
| 39 | messages=messages, |
| 40 | max_tokens=max_tokens, |
| 41 | temperature=temperature, |
| 42 | ) |
| 43 | self._last_usage = { |
| 44 | "input_tokens": getattr(response.usage, "input_tokens", 0), |
| 45 | "output_tokens": getattr(response.usage, "output_tokens", 0), |
| 46 | } |
| 47 | return response.content[0].text |
| 48 |
| --- video_processor/providers/anthropic_provider.py | |
| +++ video_processor/providers/anthropic_provider.py | |
| @@ -32,16 +32,30 @@ | |
| 32 | max_tokens: int = 4096, |
| 33 | temperature: float = 0.7, |
| 34 | model: Optional[str] = None, |
| 35 | ) -> str: |
| 36 | model = model or "claude-sonnet-4-5-20250929" |
| 37 | |
| 38 | # Anthropic requires system messages as a top-level parameter |
| 39 | system_parts = [] |
| 40 | chat_messages = [] |
| 41 | for msg in messages: |
| 42 | if msg.get("role") == "system": |
| 43 | system_parts.append(msg["content"]) |
| 44 | else: |
| 45 | chat_messages.append(msg) |
| 46 | |
| 47 | kwargs = { |
| 48 | "model": model, |
| 49 | "messages": chat_messages, |
| 50 | "max_tokens": max_tokens, |
| 51 | "temperature": temperature, |
| 52 | } |
| 53 | if system_parts: |
| 54 | kwargs["system"] = "\n\n".join(system_parts) |
| 55 | |
| 56 | response = self.client.messages.create(**kwargs) |
| 57 | self._last_usage = { |
| 58 | "input_tokens": getattr(response.usage, "input_tokens", 0), |
| 59 | "output_tokens": getattr(response.usage, "output_tokens", 0), |
| 60 | } |
| 61 | return response.content[0].text |
| 62 |