PlanOpticon
feat(companion): add /provider and /model commands for runtime switching Switch LLM providers and models mid-session without restarting. /provider lists available providers with key status, /provider NAME switches. /model shows or switches the chat model.
Commit
9a500cfc9e68ed1cc014c1a9ad0eaf9d69e340460e4a7ea625b6db9986bfb585
Parent
3d5f08e1507c6b1…
2 files changed
+32
+77
-2
+32
| --- tests/test_companion.py | ||
| +++ tests/test_companion.py | ||
| @@ -110,5 +110,37 @@ | ||
| 110 | 110 | |
| 111 | 111 | def test_neighbors_no_kg(self): |
| 112 | 112 | repl = CompanionREPL() |
| 113 | 113 | output = repl.handle_input("/neighbors Alice") |
| 114 | 114 | assert "No knowledge graph loaded" in output |
| 115 | + | |
| 116 | + | |
| 117 | +class TestProviderCommand: | |
| 118 | + def test_provider_list(self): | |
| 119 | + repl = CompanionREPL() | |
| 120 | + output = repl.handle_input("/provider") | |
| 121 | + assert "Available providers" in output | |
| 122 | + assert "openai" in output | |
| 123 | + assert "anthropic" in output | |
| 124 | + | |
| 125 | + def test_provider_switch(self): | |
| 126 | + repl = CompanionREPL() | |
| 127 | + output = repl.handle_input("/provider openai") | |
| 128 | + # Will fail to init without key, but shouldn't crash | |
| 129 | + assert "openai" in output.lower() | |
| 130 | + | |
| 131 | + def test_model_show(self): | |
| 132 | + repl = CompanionREPL() | |
| 133 | + output = repl.handle_input("/model") | |
| 134 | + assert "Current model" in output | |
| 135 | + | |
| 136 | + def test_model_switch(self): | |
| 137 | + repl = CompanionREPL() | |
| 138 | + output = repl.handle_input("/model gpt-4o") | |
| 139 | + # Will fail without provider, but shouldn't crash | |
| 140 | + assert "gpt-4o" in output | |
| 141 | + | |
| 142 | + def test_help_includes_provider(self): | |
| 143 | + repl = CompanionREPL() | |
| 144 | + output = repl.handle_input("/help") | |
| 145 | + assert "/provider" in output | |
| 146 | + assert "/model" in output | |
| 115 | 147 |
| --- tests/test_companion.py | |
| +++ tests/test_companion.py | |
| @@ -110,5 +110,37 @@ | |
| 110 | |
| 111 | def test_neighbors_no_kg(self): |
| 112 | repl = CompanionREPL() |
| 113 | output = repl.handle_input("/neighbors Alice") |
| 114 | assert "No knowledge graph loaded" in output |
| 115 |
| --- tests/test_companion.py | |
| +++ tests/test_companion.py | |
| @@ -110,5 +110,37 @@ | |
| 110 | |
| 111 | def test_neighbors_no_kg(self): |
| 112 | repl = CompanionREPL() |
| 113 | output = repl.handle_input("/neighbors Alice") |
| 114 | assert "No knowledge graph loaded" in output |
| 115 | |
| 116 | |
| 117 | class TestProviderCommand: |
| 118 | def test_provider_list(self): |
| 119 | repl = CompanionREPL() |
| 120 | output = repl.handle_input("/provider") |
| 121 | assert "Available providers" in output |
| 122 | assert "openai" in output |
| 123 | assert "anthropic" in output |
| 124 | |
| 125 | def test_provider_switch(self): |
| 126 | repl = CompanionREPL() |
| 127 | output = repl.handle_input("/provider openai") |
| 128 | # Will fail to init without key, but shouldn't crash |
| 129 | assert "openai" in output.lower() |
| 130 | |
| 131 | def test_model_show(self): |
| 132 | repl = CompanionREPL() |
| 133 | output = repl.handle_input("/model") |
| 134 | assert "Current model" in output |
| 135 | |
| 136 | def test_model_switch(self): |
| 137 | repl = CompanionREPL() |
| 138 | output = repl.handle_input("/model gpt-4o") |
| 139 | # Will fail without provider, but shouldn't crash |
| 140 | assert "gpt-4o" in output |
| 141 | |
| 142 | def test_help_includes_provider(self): |
| 143 | repl = CompanionREPL() |
| 144 | output = repl.handle_input("/help") |
| 145 | assert "/provider" in output |
| 146 | assert "/model" in output |
| 147 |
+77
-2
| --- video_processor/cli/companion.py | ||
| +++ video_processor/cli/companion.py | ||
| @@ -133,12 +133,16 @@ | ||
| 133 | 133 | if self._docs: |
| 134 | 134 | names = ", ".join(d.name for d in self._docs[:3]) |
| 135 | 135 | suffix = f" (+{len(self._docs) - 3} more)" if len(self._docs) > 3 else "" |
| 136 | 136 | lines.append(f" Docs: {names}{suffix}") |
| 137 | 137 | |
| 138 | - provider_label = "active" if self.provider_manager else "none" | |
| 139 | - lines.append(f" LLM provider: {provider_label}") | |
| 138 | + if self.provider_manager: | |
| 139 | + prov = getattr(self.provider_manager, "provider", self._provider_name) | |
| 140 | + model = self._chat_model or "default" | |
| 141 | + lines.append(f" LLM provider: {prov} (model: {model})") | |
| 142 | + else: | |
| 143 | + lines.append(" LLM provider: none") | |
| 140 | 144 | lines.append("") |
| 141 | 145 | lines.append(" Type /help for commands, or ask a question.") |
| 142 | 146 | lines.append("") |
| 143 | 147 | return "\n".join(lines) |
| 144 | 148 | |
| @@ -154,10 +158,12 @@ | ||
| 154 | 158 | " /search TERM Search entities by name", |
| 155 | 159 | " /neighbors ENTITY Show entity relationships", |
| 156 | 160 | " /export FORMAT Export KG (markdown, obsidian, notion, csv)", |
| 157 | 161 | " /analyze PATH Analyze a video/doc", |
| 158 | 162 | " /ingest PATH Ingest a file into the KG", |
| 163 | + " /provider [NAME] List or switch LLM provider", | |
| 164 | + " /model [NAME] Show or switch chat model", | |
| 159 | 165 | " /run SKILL Run a skill by name", |
| 160 | 166 | " /plan Run project_plan skill", |
| 161 | 167 | " /prd Run PRD skill", |
| 162 | 168 | " /tasks Run task_breakdown skill", |
| 163 | 169 | " /quit, /exit Exit companion", |
| @@ -281,10 +287,75 @@ | ||
| 281 | 287 | artifact = skill.execute(self.agent.context) |
| 282 | 288 | return f"--- {artifact.name} ({artifact.artifact_type}) ---\n{artifact.content}" |
| 283 | 289 | except Exception as exc: |
| 284 | 290 | return f"Skill execution failed: {exc}" |
| 285 | 291 | |
| 292 | + def _cmd_provider(self, args: str) -> str: | |
| 293 | + """List available providers or switch to a specific one.""" | |
| 294 | + args = args.strip().lower() | |
| 295 | + if not args or args == "list": | |
| 296 | + lines = ["Available providers:"] | |
| 297 | + known = [ | |
| 298 | + "openai", | |
| 299 | + "anthropic", | |
| 300 | + "gemini", | |
| 301 | + "ollama", | |
| 302 | + "azure", | |
| 303 | + "together", | |
| 304 | + "fireworks", | |
| 305 | + "cerebras", | |
| 306 | + "xai", | |
| 307 | + ] | |
| 308 | + import os | |
| 309 | + | |
| 310 | + key_map = { | |
| 311 | + "openai": "OPENAI_API_KEY", | |
| 312 | + "anthropic": "ANTHROPIC_API_KEY", | |
| 313 | + "gemini": "GEMINI_API_KEY", | |
| 314 | + "azure": "AZURE_OPENAI_API_KEY", | |
| 315 | + "together": "TOGETHER_API_KEY", | |
| 316 | + "fireworks": "FIREWORKS_API_KEY", | |
| 317 | + "cerebras": "CEREBRAS_API_KEY", | |
| 318 | + "xai": "XAI_API_KEY", | |
| 319 | + } | |
| 320 | + current = getattr(self.provider_manager, "provider", self._provider_name) | |
| 321 | + for name in known: | |
| 322 | + env = key_map.get(name) | |
| 323 | + has_key = bool(os.environ.get(env, "")) if env else None | |
| 324 | + if name == "ollama": | |
| 325 | + status = "local" | |
| 326 | + elif has_key: | |
| 327 | + status = "ready" | |
| 328 | + else: | |
| 329 | + status = "no key" | |
| 330 | + active = " (active)" if name == current else "" | |
| 331 | + lines.append(f" {name}: {status}{active}") | |
| 332 | + lines.append(f"\nCurrent: {current or 'none'}") | |
| 333 | + return "\n".join(lines) | |
| 334 | + | |
| 335 | + # Switch provider | |
| 336 | + self._provider_name = args | |
| 337 | + self._chat_model = None | |
| 338 | + self._init_provider() | |
| 339 | + self._init_agent() | |
| 340 | + if self.provider_manager: | |
| 341 | + return f"Switched to provider: {args}" | |
| 342 | + return f"Failed to initialise provider: {args}" | |
| 343 | + | |
| 344 | + def _cmd_model(self, args: str) -> str: | |
| 345 | + """Switch the chat model.""" | |
| 346 | + args = args.strip() | |
| 347 | + if not args: | |
| 348 | + current = self._chat_model or "default" | |
| 349 | + return f"Current model: {current}\nUsage: /model MODEL_NAME" | |
| 350 | + self._chat_model = args | |
| 351 | + self._init_provider() | |
| 352 | + self._init_agent() | |
| 353 | + if self.provider_manager: | |
| 354 | + return f"Switched to model: {args}" | |
| 355 | + return f"Failed to initialise with model: {args}" | |
| 356 | + | |
| 286 | 357 | def _cmd_chat(self, message: str) -> str: |
| 287 | 358 | if not self.provider_manager or not self.agent: |
| 288 | 359 | return ( |
| 289 | 360 | "Chat requires an LLM provider. Set one of:\n" |
| 290 | 361 | " OPENAI_API_KEY\n" |
| @@ -330,10 +401,14 @@ | ||
| 330 | 401 | return self._cmd_export(args) |
| 331 | 402 | if cmd == "/analyze": |
| 332 | 403 | return self._cmd_analyze(args) |
| 333 | 404 | if cmd == "/ingest": |
| 334 | 405 | return self._cmd_ingest(args) |
| 406 | + if cmd == "/provider": | |
| 407 | + return self._cmd_provider(args) | |
| 408 | + if cmd == "/model": | |
| 409 | + return self._cmd_model(args) | |
| 335 | 410 | if cmd == "/run": |
| 336 | 411 | return self._cmd_run_skill(args) |
| 337 | 412 | if cmd == "/plan": |
| 338 | 413 | return self._cmd_run_skill("project_plan") |
| 339 | 414 | if cmd == "/prd": |
| 340 | 415 |
| --- video_processor/cli/companion.py | |
| +++ video_processor/cli/companion.py | |
| @@ -133,12 +133,16 @@ | |
| 133 | if self._docs: |
| 134 | names = ", ".join(d.name for d in self._docs[:3]) |
| 135 | suffix = f" (+{len(self._docs) - 3} more)" if len(self._docs) > 3 else "" |
| 136 | lines.append(f" Docs: {names}{suffix}") |
| 137 | |
| 138 | provider_label = "active" if self.provider_manager else "none" |
| 139 | lines.append(f" LLM provider: {provider_label}") |
| 140 | lines.append("") |
| 141 | lines.append(" Type /help for commands, or ask a question.") |
| 142 | lines.append("") |
| 143 | return "\n".join(lines) |
| 144 | |
| @@ -154,10 +158,12 @@ | |
| 154 | " /search TERM Search entities by name", |
| 155 | " /neighbors ENTITY Show entity relationships", |
| 156 | " /export FORMAT Export KG (markdown, obsidian, notion, csv)", |
| 157 | " /analyze PATH Analyze a video/doc", |
| 158 | " /ingest PATH Ingest a file into the KG", |
| 159 | " /run SKILL Run a skill by name", |
| 160 | " /plan Run project_plan skill", |
| 161 | " /prd Run PRD skill", |
| 162 | " /tasks Run task_breakdown skill", |
| 163 | " /quit, /exit Exit companion", |
| @@ -281,10 +287,75 @@ | |
| 281 | artifact = skill.execute(self.agent.context) |
| 282 | return f"--- {artifact.name} ({artifact.artifact_type}) ---\n{artifact.content}" |
| 283 | except Exception as exc: |
| 284 | return f"Skill execution failed: {exc}" |
| 285 | |
| 286 | def _cmd_chat(self, message: str) -> str: |
| 287 | if not self.provider_manager or not self.agent: |
| 288 | return ( |
| 289 | "Chat requires an LLM provider. Set one of:\n" |
| 290 | " OPENAI_API_KEY\n" |
| @@ -330,10 +401,14 @@ | |
| 330 | return self._cmd_export(args) |
| 331 | if cmd == "/analyze": |
| 332 | return self._cmd_analyze(args) |
| 333 | if cmd == "/ingest": |
| 334 | return self._cmd_ingest(args) |
| 335 | if cmd == "/run": |
| 336 | return self._cmd_run_skill(args) |
| 337 | if cmd == "/plan": |
| 338 | return self._cmd_run_skill("project_plan") |
| 339 | if cmd == "/prd": |
| 340 |
| --- video_processor/cli/companion.py | |
| +++ video_processor/cli/companion.py | |
| @@ -133,12 +133,16 @@ | |
| 133 | if self._docs: |
| 134 | names = ", ".join(d.name for d in self._docs[:3]) |
| 135 | suffix = f" (+{len(self._docs) - 3} more)" if len(self._docs) > 3 else "" |
| 136 | lines.append(f" Docs: {names}{suffix}") |
| 137 | |
| 138 | if self.provider_manager: |
| 139 | prov = getattr(self.provider_manager, "provider", self._provider_name) |
| 140 | model = self._chat_model or "default" |
| 141 | lines.append(f" LLM provider: {prov} (model: {model})") |
| 142 | else: |
| 143 | lines.append(" LLM provider: none") |
| 144 | lines.append("") |
| 145 | lines.append(" Type /help for commands, or ask a question.") |
| 146 | lines.append("") |
| 147 | return "\n".join(lines) |
| 148 | |
| @@ -154,10 +158,12 @@ | |
| 158 | " /search TERM Search entities by name", |
| 159 | " /neighbors ENTITY Show entity relationships", |
| 160 | " /export FORMAT Export KG (markdown, obsidian, notion, csv)", |
| 161 | " /analyze PATH Analyze a video/doc", |
| 162 | " /ingest PATH Ingest a file into the KG", |
| 163 | " /provider [NAME] List or switch LLM provider", |
| 164 | " /model [NAME] Show or switch chat model", |
| 165 | " /run SKILL Run a skill by name", |
| 166 | " /plan Run project_plan skill", |
| 167 | " /prd Run PRD skill", |
| 168 | " /tasks Run task_breakdown skill", |
| 169 | " /quit, /exit Exit companion", |
| @@ -281,10 +287,75 @@ | |
| 287 | artifact = skill.execute(self.agent.context) |
| 288 | return f"--- {artifact.name} ({artifact.artifact_type}) ---\n{artifact.content}" |
| 289 | except Exception as exc: |
| 290 | return f"Skill execution failed: {exc}" |
| 291 | |
| 292 | def _cmd_provider(self, args: str) -> str: |
| 293 | """List available providers or switch to a specific one.""" |
| 294 | args = args.strip().lower() |
| 295 | if not args or args == "list": |
| 296 | lines = ["Available providers:"] |
| 297 | known = [ |
| 298 | "openai", |
| 299 | "anthropic", |
| 300 | "gemini", |
| 301 | "ollama", |
| 302 | "azure", |
| 303 | "together", |
| 304 | "fireworks", |
| 305 | "cerebras", |
| 306 | "xai", |
| 307 | ] |
| 308 | import os |
| 309 | |
| 310 | key_map = { |
| 311 | "openai": "OPENAI_API_KEY", |
| 312 | "anthropic": "ANTHROPIC_API_KEY", |
| 313 | "gemini": "GEMINI_API_KEY", |
| 314 | "azure": "AZURE_OPENAI_API_KEY", |
| 315 | "together": "TOGETHER_API_KEY", |
| 316 | "fireworks": "FIREWORKS_API_KEY", |
| 317 | "cerebras": "CEREBRAS_API_KEY", |
| 318 | "xai": "XAI_API_KEY", |
| 319 | } |
| 320 | current = getattr(self.provider_manager, "provider", self._provider_name) |
| 321 | for name in known: |
| 322 | env = key_map.get(name) |
| 323 | has_key = bool(os.environ.get(env, "")) if env else None |
| 324 | if name == "ollama": |
| 325 | status = "local" |
| 326 | elif has_key: |
| 327 | status = "ready" |
| 328 | else: |
| 329 | status = "no key" |
| 330 | active = " (active)" if name == current else "" |
| 331 | lines.append(f" {name}: {status}{active}") |
| 332 | lines.append(f"\nCurrent: {current or 'none'}") |
| 333 | return "\n".join(lines) |
| 334 | |
| 335 | # Switch provider |
| 336 | self._provider_name = args |
| 337 | self._chat_model = None |
| 338 | self._init_provider() |
| 339 | self._init_agent() |
| 340 | if self.provider_manager: |
| 341 | return f"Switched to provider: {args}" |
| 342 | return f"Failed to initialise provider: {args}" |
| 343 | |
| 344 | def _cmd_model(self, args: str) -> str: |
| 345 | """Switch the chat model.""" |
| 346 | args = args.strip() |
| 347 | if not args: |
| 348 | current = self._chat_model or "default" |
| 349 | return f"Current model: {current}\nUsage: /model MODEL_NAME" |
| 350 | self._chat_model = args |
| 351 | self._init_provider() |
| 352 | self._init_agent() |
| 353 | if self.provider_manager: |
| 354 | return f"Switched to model: {args}" |
| 355 | return f"Failed to initialise with model: {args}" |
| 356 | |
| 357 | def _cmd_chat(self, message: str) -> str: |
| 358 | if not self.provider_manager or not self.agent: |
| 359 | return ( |
| 360 | "Chat requires an LLM provider. Set one of:\n" |
| 361 | " OPENAI_API_KEY\n" |
| @@ -330,10 +401,14 @@ | |
| 401 | return self._cmd_export(args) |
| 402 | if cmd == "/analyze": |
| 403 | return self._cmd_analyze(args) |
| 404 | if cmd == "/ingest": |
| 405 | return self._cmd_ingest(args) |
| 406 | if cmd == "/provider": |
| 407 | return self._cmd_provider(args) |
| 408 | if cmd == "/model": |
| 409 | return self._cmd_model(args) |
| 410 | if cmd == "/run": |
| 411 | return self._cmd_run_skill(args) |
| 412 | if cmd == "/plan": |
| 413 | return self._cmd_run_skill("project_plan") |
| 414 | if cmd == "/prd": |
| 415 |