PlanOpticon

fix(companion): use cheap model defaults and PlanOpticon-aware system prompt Chat preferences now default to haiku/gpt-4o-mini/gemini-flash instead of sonnet/gpt-4o. Agent system prompt includes all built-in slash commands and CLI commands so the LLM gives PlanOpticon-specific advice. Auth error messages now show which env vars to set.

lmata 2026-03-07 23:55 trunk
Commit 750b7d8defba0390fa626a38c56f9dae15e547ec8c2a8929c8b76458db943797
--- video_processor/agent/agent_loop.py
+++ video_processor/agent/agent_loop.py
@@ -118,16 +118,40 @@
118118
artifacts_summary = "\n\nGenerated artifacts:\n" + "\n".join(
119119
f"- {a.name} ({a.artifact_type})" for a in self.context.artifacts
120120
)
121121
122122
system_msg = (
123
- "You are PlanOpticon, a planning agent that helps users create project plans "
124
- "from extracted knowledge. You have access to a knowledge graph built from "
125
- "videos and documents."
123
+ "You are PlanOpticon, an AI planning companion built into the PlanOpticon CLI. "
124
+ "PlanOpticon is a video analysis and knowledge extraction tool that processes "
125
+ "recordings into structured knowledge graphs.\n\n"
126
+ "You are running inside the interactive companion REPL. The user can use these "
127
+ "built-in commands (suggest them when relevant):\n"
128
+ " /status - Show workspace status (loaded KG, videos, docs)\n"
129
+ " /entities [--type T] - List knowledge graph entities\n"
130
+ " /search TERM - Search entities by name\n"
131
+ " /neighbors ENTITY - Show entity relationships\n"
132
+ " /export FORMAT - Export KG (markdown, obsidian, notion, csv)\n"
133
+ " /analyze PATH - Analyze a video or document\n"
134
+ " /ingest PATH - Ingest a file into the knowledge graph\n"
135
+ " /auth SERVICE - Authenticate with a service "
136
+ "(zoom, google, microsoft, notion, dropbox, github)\n"
137
+ " /provider [NAME] - List or switch LLM provider\n"
138
+ " /model [NAME] - Show or switch chat model\n"
139
+ " /plan - Generate a project plan\n"
140
+ " /prd - Generate a PRD\n"
141
+ " /tasks - Generate a task breakdown\n\n"
142
+ "PlanOpticon CLI commands the user can run outside the REPL:\n"
143
+ " planopticon auth zoom|google|microsoft - Authenticate with cloud services\n"
144
+ " planopticon recordings zoom-list|teams-list|meet-list - List cloud recordings\n"
145
+ " planopticon analyze -i VIDEO - Analyze a video file\n"
146
+ " planopticon query - Query the knowledge graph\n"
147
+ " planopticon export FORMAT PATH - Export knowledge graph\n\n"
126148
f"{kb_summary}{artifacts_summary}\n\n"
127
- "Help the user plan their project. Ask clarifying questions to gather "
128
- "requirements. When ready, suggest using specific skills to generate artifacts."
149
+ "Help the user with their planning tasks. When they ask about capabilities, "
150
+ "refer them to the appropriate built-in commands. Ask clarifying questions "
151
+ "to gather requirements. When ready, suggest using specific skills or commands "
152
+ "to generate artifacts."
129153
)
130154
131155
messages = [{"role": "system", "content": system_msg}] + self.context.conversation_history
132156
133157
response = self.context.provider_manager.chat(messages, max_tokens=2048, temperature=0.5)
134158
--- video_processor/agent/agent_loop.py
+++ video_processor/agent/agent_loop.py
@@ -118,16 +118,40 @@
118 artifacts_summary = "\n\nGenerated artifacts:\n" + "\n".join(
119 f"- {a.name} ({a.artifact_type})" for a in self.context.artifacts
120 )
121
122 system_msg = (
123 "You are PlanOpticon, a planning agent that helps users create project plans "
124 "from extracted knowledge. You have access to a knowledge graph built from "
125 "videos and documents."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126 f"{kb_summary}{artifacts_summary}\n\n"
127 "Help the user plan their project. Ask clarifying questions to gather "
128 "requirements. When ready, suggest using specific skills to generate artifacts."
 
 
129 )
130
131 messages = [{"role": "system", "content": system_msg}] + self.context.conversation_history
132
133 response = self.context.provider_manager.chat(messages, max_tokens=2048, temperature=0.5)
134
--- video_processor/agent/agent_loop.py
+++ video_processor/agent/agent_loop.py
@@ -118,16 +118,40 @@
118 artifacts_summary = "\n\nGenerated artifacts:\n" + "\n".join(
119 f"- {a.name} ({a.artifact_type})" for a in self.context.artifacts
120 )
121
122 system_msg = (
123 "You are PlanOpticon, an AI planning companion built into the PlanOpticon CLI. "
124 "PlanOpticon is a video analysis and knowledge extraction tool that processes "
125 "recordings into structured knowledge graphs.\n\n"
126 "You are running inside the interactive companion REPL. The user can use these "
127 "built-in commands (suggest them when relevant):\n"
128 " /status - Show workspace status (loaded KG, videos, docs)\n"
129 " /entities [--type T] - List knowledge graph entities\n"
130 " /search TERM - Search entities by name\n"
131 " /neighbors ENTITY - Show entity relationships\n"
132 " /export FORMAT - Export KG (markdown, obsidian, notion, csv)\n"
133 " /analyze PATH - Analyze a video or document\n"
134 " /ingest PATH - Ingest a file into the knowledge graph\n"
135 " /auth SERVICE - Authenticate with a service "
136 "(zoom, google, microsoft, notion, dropbox, github)\n"
137 " /provider [NAME] - List or switch LLM provider\n"
138 " /model [NAME] - Show or switch chat model\n"
139 " /plan - Generate a project plan\n"
140 " /prd - Generate a PRD\n"
141 " /tasks - Generate a task breakdown\n\n"
142 "PlanOpticon CLI commands the user can run outside the REPL:\n"
143 " planopticon auth zoom|google|microsoft - Authenticate with cloud services\n"
144 " planopticon recordings zoom-list|teams-list|meet-list - List cloud recordings\n"
145 " planopticon analyze -i VIDEO - Analyze a video file\n"
146 " planopticon query - Query the knowledge graph\n"
147 " planopticon export FORMAT PATH - Export knowledge graph\n\n"
148 f"{kb_summary}{artifacts_summary}\n\n"
149 "Help the user with their planning tasks. When they ask about capabilities, "
150 "refer them to the appropriate built-in commands. Ask clarifying questions "
151 "to gather requirements. When ready, suggest using specific skills or commands "
152 "to generate artifacts."
153 )
154
155 messages = [{"role": "system", "content": system_msg}] + self.context.conversation_history
156
157 response = self.context.provider_manager.chat(messages, max_tokens=2048, temperature=0.5)
158
--- video_processor/auth.py
+++ video_processor/auth.py
@@ -163,13 +163,23 @@
163163
success=True,
164164
access_token=api_key,
165165
method="api_key",
166166
)
167167
168
+ # Build a helpful error message
169
+ hints = []
170
+ if self.config.supports_oauth and self.config.client_id_env:
171
+ hints.append(f"Set {self.config.client_id_env} for OAuth")
172
+ if self.config.client_secret_env:
173
+ hints.append(f"and {self.config.client_secret_env}")
174
+ if self.config.api_key_env:
175
+ hints.append(f"or set {self.config.api_key_env} for API key access")
176
+ hint_str = (" (" + " ".join(hints) + ")") if hints else ""
177
+
168178
return AuthResult(
169179
success=False,
170
- error=f"No auth method available for {self.config.service}",
180
+ error=f"No auth method available for {self.config.service}.{hint_str}",
171181
)
172182
173183
def get_token(self) -> Optional[str]:
174184
"""Convenience: authenticate and return just the token."""
175185
result = self.authenticate()
176186
--- video_processor/auth.py
+++ video_processor/auth.py
@@ -163,13 +163,23 @@
163 success=True,
164 access_token=api_key,
165 method="api_key",
166 )
167
 
 
 
 
 
 
 
 
 
 
168 return AuthResult(
169 success=False,
170 error=f"No auth method available for {self.config.service}",
171 )
172
173 def get_token(self) -> Optional[str]:
174 """Convenience: authenticate and return just the token."""
175 result = self.authenticate()
176
--- video_processor/auth.py
+++ video_processor/auth.py
@@ -163,13 +163,23 @@
163 success=True,
164 access_token=api_key,
165 method="api_key",
166 )
167
168 # Build a helpful error message
169 hints = []
170 if self.config.supports_oauth and self.config.client_id_env:
171 hints.append(f"Set {self.config.client_id_env} for OAuth")
172 if self.config.client_secret_env:
173 hints.append(f"and {self.config.client_secret_env}")
174 if self.config.api_key_env:
175 hints.append(f"or set {self.config.api_key_env} for API key access")
176 hint_str = (" (" + " ".join(hints) + ")") if hints else ""
177
178 return AuthResult(
179 success=False,
180 error=f"No auth method available for {self.config.service}.{hint_str}",
181 )
182
183 def get_token(self) -> Optional[str]:
184 """Convenience: authenticate and return just the token."""
185 result = self.authenticate()
186
--- video_processor/providers/manager.py
+++ video_processor/providers/manager.py
@@ -31,17 +31,17 @@
3131
3232
3333
# Default model preference rankings (tried in order)
3434
_VISION_PREFERENCES = [
3535
("gemini", "gemini-2.5-flash"),
36
- ("openai", "gpt-4o"),
37
- ("anthropic", "claude-sonnet-4-5-20250929"),
36
+ ("openai", "gpt-4o-mini"),
37
+ ("anthropic", "claude-haiku-4-5-20251001"),
3838
]
3939
4040
_CHAT_PREFERENCES = [
41
- ("anthropic", "claude-sonnet-4-5-20250929"),
42
- ("openai", "gpt-4o"),
41
+ ("anthropic", "claude-haiku-4-5-20251001"),
42
+ ("openai", "gpt-4o-mini"),
4343
("gemini", "gemini-2.5-flash"),
4444
]
4545
4646
_TRANSCRIPTION_PREFERENCES = [
4747
("openai", "whisper-1"),
4848
--- video_processor/providers/manager.py
+++ video_processor/providers/manager.py
@@ -31,17 +31,17 @@
31
32
33 # Default model preference rankings (tried in order)
34 _VISION_PREFERENCES = [
35 ("gemini", "gemini-2.5-flash"),
36 ("openai", "gpt-4o"),
37 ("anthropic", "claude-sonnet-4-5-20250929"),
38 ]
39
40 _CHAT_PREFERENCES = [
41 ("anthropic", "claude-sonnet-4-5-20250929"),
42 ("openai", "gpt-4o"),
43 ("gemini", "gemini-2.5-flash"),
44 ]
45
46 _TRANSCRIPTION_PREFERENCES = [
47 ("openai", "whisper-1"),
48
--- video_processor/providers/manager.py
+++ video_processor/providers/manager.py
@@ -31,17 +31,17 @@
31
32
33 # Default model preference rankings (tried in order)
34 _VISION_PREFERENCES = [
35 ("gemini", "gemini-2.5-flash"),
36 ("openai", "gpt-4o-mini"),
37 ("anthropic", "claude-haiku-4-5-20251001"),
38 ]
39
40 _CHAT_PREFERENCES = [
41 ("anthropic", "claude-haiku-4-5-20251001"),
42 ("openai", "gpt-4o-mini"),
43 ("gemini", "gemini-2.5-flash"),
44 ]
45
46 _TRANSCRIPTION_PREFERENCES = [
47 ("openai", "whisper-1"),
48

Keyboard Shortcuts

Open search /
Next entry (timeline) j
Previous entry (timeline) k
Open focused entry Enter
Show this help ?
Toggle theme Top nav button