PlanOpticon

feat(taxonomy): add planning entity taxonomy and classifier Add TaxonomyClassifier that bridges raw KG entities into planning-ready structures (goals, requirements, decisions, risks, etc.) using keyword heuristics with optional LLM refinement. Includes CLI command `planopticon kg classify` and comprehensive test suite.

lmata 2026-03-07 21:58 trunk
Commit c82c7c972c0ff64cea99548b0b0c37b7f7379aae48f313f68d0194432b877039
--- a/video_processor/agent/agent_loop.py
+++ b/video_processor/agent/agent_loop.py
@@ -0,0 +1,133 @@
1
+"""Planning agent loop for synthesizing knowledge into artifacts."""
2
+
3
+import logging
4
+from pathlib import Path
5
+from typing import List
6
+
7
+from video_processor.agent.kb_context import KBContext
8
+from video_processor.agent.skills.base import (
9
+ AgentContext,
10
+ Artifact,
11
+ get_skill,
12
+ list_skills,
13
+)
14
+
15
+logger = logging.getLogger(__name__)
16
+
17
+
18
+class PlanningAgent:
19
+ """AI agent that synthesizes knowledge into planning artifacts."""
20
+
21
+ def __init__(self, context: AgentContext):
22
+ self.context = context
23
+
24
+ @classmethod
25
+ def from_kb_paths(cls, kb_paths: List[Path], provider_manager=None) -> "PlanningAgent":
26
+ """Create an agent from knowledge base paths."""
27
+ kb = KBContext()
28
+ for path in kb_paths:
29
+ kb.add_source(path)
30
+ kb.load(provider_manager=provider_manager)
31
+
32
+ context = AgentContext(
33
+ knowledge_graph=kb.knowledge_graph,
34
+ query_engine=kb.query_engine,
35
+ provider_manager=provider_manager,
36
+ )
37
+ return cls(context)
38
+
39
+ def execute(self, request: str) -> List[Artifact]:
40
+ """Execute a user request by selecting and running appropriate skills."""
41
+ # Step 1: Build context summary for LLM
42
+ kb_summary = ""
43
+ if self.context.query_engine:
44
+ stats = self.context.query_engine.stats()
45
+ kb_summary = stats.to_text()
46
+
47
+ available_skills = list_skills()
48
+ skill_descriptions = "\n".join(f"- {s.name}: {s.description}" for s in available_skills)
49
+
50
+ # Step 2: Ask LLM to select skills
51
+ plan_prompt = (
52
+ "You are a planning agent. Given a user request and available skills, "
53
+ "select which skills to execute and in what order.\n\n"
54
+ f"Knowledge base:\n{kb_summary}\n\n"
55
+ f"Available skills:\n{skill_descriptions}\n\n"
56
+ f"User request: {request}\n\n"
57
+ "Return a JSON array of skill names to execute in order:\n"
58
+ '[{"skill": "skill_name", "params": {}}]\n'
59
+ "Return ONLY the JSON array."
60
+ )
61
+
62
+ if not self.context.provider_manager:
63
+ # No LLM -- try to match skills by keyword
64
+ return self._keyword_match_execute(request)
65
+
66
+ raw = self.context.provider_manager.chat(
67
+ [{"role": "user", "content": plan_prompt}],
68
+ max_tokens=512,
69
+ temperature=0.1,
70
+ )
71
+
72
+ from video_processor.utils.json_parsing import parse_json_from_response
73
+
74
+ plan = parse_json_from_response(raw)
75
+
76
+ artifacts = []
77
+ if isinstance(plan, list):
78
+ for step in plan:
79
+ if isinstance(step, dict) and "skill" in step:
80
+ skill = get_skill(step["skill"])
81
+ if skill and skill.can_execute(self.context):
82
+ params = step.get("params", {})
83
+ artifact = skill.execute(self.context, **params)
84
+ artifacts.append(artifact)
85
+ self.context.artifacts.append(artifact)
86
+
87
+ return artifacts
88
+
89
+ def _keyword_match_execute(self, request: str) -> List[Artifact]:
90
+ """Fallback: match skills by keywords in the request."""
91
+ request_lower = request.lower()
92
+ artifacts = []
93
+ for skill in list_skills():
94
+ # Simple keyword matching
95
+ skill_words = skill.name.replace("_", " ").split()
96
+ if any(word in request_lower for word in skill_words):
97
+ if skill.can_execute(self.context):
98
+ artifact = skill.execute(self.context)
99
+ artifacts.append(artifact)
100
+ self.context.artifacts.append(artifact)
101
+ return artifacts
102
+
103
+ def chat(self, message: str) -> str:
104
+ """Interactive chat -- accumulate context and answer questions."""
105
+ self.context.conversation_history.append({"role": "user", "content": message})
106
+
107
+ if not self.context.provider_manager:
108
+ return "Agent requires a configured LLM provider for chat mode."
109
+
110
+ # Build system context
111
+ kb_summary = ""
112
+ if self.context.query_engine:
113
+ stats = self.context.query_engine.stats()
114
+ kb_summary = f"\n\nKnowledge base:\n{stats.to_text()}"
115
+
116
+ artifacts_summary = ""
117
+ if self.context.artifacts:
118
+ artifacts_summary = "\n\nGenerated artifacts:\n" + "\n".join(
119
+ f"- {a.name} ({a.artifact_type})" for a in self.context.artifacts
120
+ )
121
+
122
+ system_msg = (
123
+ "You are PlanOpticon, a planning agent that hel "You are running inside the interactive companion REPL. The user can use these "
124
+ "built-in commands (suggest them when relevant):\n"
125
+ " /status - Show workspace status (loaded KG, videos, docs)\n"
126
+ " /entities [--type T] - List knowledge graph entities\n"
127
+ " /search TERM - Search entities by name\n"
128
+ " /neighbors ENTITY - Show entity relationships\n"
129
+ " /export FORMAT - Export KG (markdown, obsidian, notion, csv)\n"
130
+ " /analyze PATH - Analyze a video or document\n"
131
+ " /ingest PATH - Ingest a file into the knowledge graph\n"
132
+ " /auth SERVICE - Authenticate with a service "
133
+ "(zoom,
--- a/video_processor/agent/agent_loop.py
+++ b/video_processor/agent/agent_loop.py
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
--- a/video_processor/agent/agent_loop.py
+++ b/video_processor/agent/agent_loop.py
@@ -0,0 +1,133 @@
1 """Planning agent loop for synthesizing knowledge into artifacts."""
2
3 import logging
4 from pathlib import Path
5 from typing import List
6
7 from video_processor.agent.kb_context import KBContext
8 from video_processor.agent.skills.base import (
9 AgentContext,
10 Artifact,
11 get_skill,
12 list_skills,
13 )
14
15 logger = logging.getLogger(__name__)
16
17
18 class PlanningAgent:
19 """AI agent that synthesizes knowledge into planning artifacts."""
20
21 def __init__(self, context: AgentContext):
22 self.context = context
23
24 @classmethod
25 def from_kb_paths(cls, kb_paths: List[Path], provider_manager=None) -> "PlanningAgent":
26 """Create an agent from knowledge base paths."""
27 kb = KBContext()
28 for path in kb_paths:
29 kb.add_source(path)
30 kb.load(provider_manager=provider_manager)
31
32 context = AgentContext(
33 knowledge_graph=kb.knowledge_graph,
34 query_engine=kb.query_engine,
35 provider_manager=provider_manager,
36 )
37 return cls(context)
38
39 def execute(self, request: str) -> List[Artifact]:
40 """Execute a user request by selecting and running appropriate skills."""
41 # Step 1: Build context summary for LLM
42 kb_summary = ""
43 if self.context.query_engine:
44 stats = self.context.query_engine.stats()
45 kb_summary = stats.to_text()
46
47 available_skills = list_skills()
48 skill_descriptions = "\n".join(f"- {s.name}: {s.description}" for s in available_skills)
49
50 # Step 2: Ask LLM to select skills
51 plan_prompt = (
52 "You are a planning agent. Given a user request and available skills, "
53 "select which skills to execute and in what order.\n\n"
54 f"Knowledge base:\n{kb_summary}\n\n"
55 f"Available skills:\n{skill_descriptions}\n\n"
56 f"User request: {request}\n\n"
57 "Return a JSON array of skill names to execute in order:\n"
58 '[{"skill": "skill_name", "params": {}}]\n'
59 "Return ONLY the JSON array."
60 )
61
62 if not self.context.provider_manager:
63 # No LLM -- try to match skills by keyword
64 return self._keyword_match_execute(request)
65
66 raw = self.context.provider_manager.chat(
67 [{"role": "user", "content": plan_prompt}],
68 max_tokens=512,
69 temperature=0.1,
70 )
71
72 from video_processor.utils.json_parsing import parse_json_from_response
73
74 plan = parse_json_from_response(raw)
75
76 artifacts = []
77 if isinstance(plan, list):
78 for step in plan:
79 if isinstance(step, dict) and "skill" in step:
80 skill = get_skill(step["skill"])
81 if skill and skill.can_execute(self.context):
82 params = step.get("params", {})
83 artifact = skill.execute(self.context, **params)
84 artifacts.append(artifact)
85 self.context.artifacts.append(artifact)
86
87 return artifacts
88
89 def _keyword_match_execute(self, request: str) -> List[Artifact]:
90 """Fallback: match skills by keywords in the request."""
91 request_lower = request.lower()
92 artifacts = []
93 for skill in list_skills():
94 # Simple keyword matching
95 skill_words = skill.name.replace("_", " ").split()
96 if any(word in request_lower for word in skill_words):
97 if skill.can_execute(self.context):
98 artifact = skill.execute(self.context)
99 artifacts.append(artifact)
100 self.context.artifacts.append(artifact)
101 return artifacts
102
103 def chat(self, message: str) -> str:
104 """Interactive chat -- accumulate context and answer questions."""
105 self.context.conversation_history.append({"role": "user", "content": message})
106
107 if not self.context.provider_manager:
108 return "Agent requires a configured LLM provider for chat mode."
109
110 # Build system context
111 kb_summary = ""
112 if self.context.query_engine:
113 stats = self.context.query_engine.stats()
114 kb_summary = f"\n\nKnowledge base:\n{stats.to_text()}"
115
116 artifacts_summary = ""
117 if self.context.artifacts:
118 artifacts_summary = "\n\nGenerated artifacts:\n" + "\n".join(
119 f"- {a.name} ({a.artifact_type})" for a in self.context.artifacts
120 )
121
122 system_msg = (
123 "You are PlanOpticon, a planning agent that hel "You are running inside the interactive companion REPL. The user can use these "
124 "built-in commands (suggest them when relevant):\n"
125 " /status - Show workspace status (loaded KG, videos, docs)\n"
126 " /entities [--type T] - List knowledge graph entities\n"
127 " /search TERM - Search entities by name\n"
128 " /neighbors ENTITY - Show entity relationships\n"
129 " /export FORMAT - Export KG (markdown, obsidian, notion, csv)\n"
130 " /analyze PATH - Analyze a video or document\n"
131 " /ingest PATH - Ingest a file into the knowledge graph\n"
132 " /auth SERVICE - Authenticate with a service "
133 "(zoom,
--- a/video_processor/agent/kb_context.py
+++ b/video_processor/agent/kb_context.py
@@ -0,0 +1,98 @@
1
+"""Knowledge base context manager for loading and merging knowledge graphs."""
2
+
3
+import json
4
+import logging
5
+from pathlib import Path
6
+from typing import List, Optional
7
+
8
+logger = logging.getLogger(__name__)
9
+
10
+
11
+class KBContext:
12
+ """Load and merge multiple knowledge graphs into a unified context."""
13
+
14
+ def __init__(self):
15
+ self._sources: List[Path] = []
16
+ self._kg = None # KnowledgeGraph instance
17
+ self._engine = None # GraphQueryEngine instance
18
+
19
+ def add_source(self, path) -> None:
20
+ """Add a knowledge graph source (.db or .json file, or directory to search)."""
21
+ path = Path(path).resolve()
22
+ if path.is_dir():
23
+ from video_processor.integrators.graph_discovery import find_knowledge_graphs
24
+
25
+ graphs = find_knowledge_graphs(path)
26
+ self._sources.extend(graphs)
27
+ elif path.is_file():
28
+ self._sources.append(path)
29
+ else:
30
+ raise FileNotFoundError(f"Not found: {path}")
31
+
32
+ def load(self, provider_manager=None) -> "KBContext":
33
+ """Load and merge all added sources into a single knowledge graph."""
34
+ from video_processor.integrators.graph_query import GraphQueryEngine
35
+ from video_processor.integrators.knowledge_graph import KnowledgeGraph
36
+
37
+ self._kg = KnowledgeGraph(provider_manager=provider_manager)
38
+
39
+ for source_path in self._sources:
40
+ if source_path.suffix == ".db":
41
+ other = KnowledgeGraph(db_path=source_path)
42
+ self._kg.merge(other)
43
+ elif source_path.suffix == ".json":
44
+ data = json.loads(source_path.read_text())
45
+ other = KnowledgeGraph.from_dict(data)
46
+ self._kg.merge(other)
47
+
48
+ self._engine = GraphQueryEngine(self._kg._store, provider_manager=provider_manager)
49
+ return self
50
+
51
+ @property
52
+ def knowledge_graph(self):
53
+ """Return the merged KnowledgeGraph, or None if not loaded."""
54
+ if not self._kg:
55
+ raise RuntimeError("Call load() first")
56
+ return self._kg
57
+
58
+ @property
59
+ def query_engine(self):
60
+ """Return the GraphQueryEngine, or None if not loaded."""
61
+ if not self._engine:
62
+ raise RuntimeError("Call load() first")
63
+ return self._engine
64
+
65
+ @property
66
+ def sources(self) -> List[Path]:
67
+ """Return the list of source paths."""
68
+ return list(self._sources)
69
+
70
+ def summary(self) -> str:
71
+ """Generate a brief summary of the loaded knowledge base."""
72
+ if not self._kg:
73
+ return "No knowledge base loaded."
74
+
75
+ stats = self._engine.stats().data
76
+ lines = [
77
+ f"Knowledge base: {len(self._sources)} source(s)",
78
+ f" Entities: {stats['entity_count']}",
79
+ f" Relationships: {stats['relationship_count']}",
80
+ ]
81
+ if stats.get("entity_types"):
82
+ lines.append(" Entity types:")
83
+ for t, count in sorted(stats["entity_types"].items(), key=lambda x: -x[1]):
84
+ lines.append(f" {t}: {count}")
85
+ return "\n".join(lines)
86
+
87
+ @classmethod
88
+ def auto_discover(cls, start_dir: Optional[Path] = None, provider_manager=None) -> "KBContext":
89
+ """Create a KBContext by auto-discovering knowledge graphs near start_dir."""
90
+ from video_processor.integrators.graph_discovery import find_knowledge_graphs
91
+
92
+ ctx = cls()
93
+ graphs = find_knowledge_graphs(start_dir)
94
+ for g in graphs:
95
+ ctx._sources.append(g)
96
+ if ctx._sources:
97
+ ctx.load(provider_manager=provider_manager)
98
+ return ctx
--- a/video_processor/agent/kb_context.py
+++ b/video_processor/agent/kb_context.py
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
--- a/video_processor/agent/kb_context.py
+++ b/video_processor/agent/kb_context.py
@@ -0,0 +1,98 @@
1 """Knowledge base context manager for loading and merging knowledge graphs."""
2
3 import json
4 import logging
5 from pathlib import Path
6 from typing import List, Optional
7
8 logger = logging.getLogger(__name__)
9
10
11 class KBContext:
12 """Load and merge multiple knowledge graphs into a unified context."""
13
14 def __init__(self):
15 self._sources: List[Path] = []
16 self._kg = None # KnowledgeGraph instance
17 self._engine = None # GraphQueryEngine instance
18
19 def add_source(self, path) -> None:
20 """Add a knowledge graph source (.db or .json file, or directory to search)."""
21 path = Path(path).resolve()
22 if path.is_dir():
23 from video_processor.integrators.graph_discovery import find_knowledge_graphs
24
25 graphs = find_knowledge_graphs(path)
26 self._sources.extend(graphs)
27 elif path.is_file():
28 self._sources.append(path)
29 else:
30 raise FileNotFoundError(f"Not found: {path}")
31
32 def load(self, provider_manager=None) -> "KBContext":
33 """Load and merge all added sources into a single knowledge graph."""
34 from video_processor.integrators.graph_query import GraphQueryEngine
35 from video_processor.integrators.knowledge_graph import KnowledgeGraph
36
37 self._kg = KnowledgeGraph(provider_manager=provider_manager)
38
39 for source_path in self._sources:
40 if source_path.suffix == ".db":
41 other = KnowledgeGraph(db_path=source_path)
42 self._kg.merge(other)
43 elif source_path.suffix == ".json":
44 data = json.loads(source_path.read_text())
45 other = KnowledgeGraph.from_dict(data)
46 self._kg.merge(other)
47
48 self._engine = GraphQueryEngine(self._kg._store, provider_manager=provider_manager)
49 return self
50
51 @property
52 def knowledge_graph(self):
53 """Return the merged KnowledgeGraph, or None if not loaded."""
54 if not self._kg:
55 raise RuntimeError("Call load() first")
56 return self._kg
57
58 @property
59 def query_engine(self):
60 """Return the GraphQueryEngine, or None if not loaded."""
61 if not self._engine:
62 raise RuntimeError("Call load() first")
63 return self._engine
64
65 @property
66 def sources(self) -> List[Path]:
67 """Return the list of source paths."""
68 return list(self._sources)
69
70 def summary(self) -> str:
71 """Generate a brief summary of the loaded knowledge base."""
72 if not self._kg:
73 return "No knowledge base loaded."
74
75 stats = self._engine.stats().data
76 lines = [
77 f"Knowledge base: {len(self._sources)} source(s)",
78 f" Entities: {stats['entity_count']}",
79 f" Relationships: {stats['relationship_count']}",
80 ]
81 if stats.get("entity_types"):
82 lines.append(" Entity types:")
83 for t, count in sorted(stats["entity_types"].items(), key=lambda x: -x[1]):
84 lines.append(f" {t}: {count}")
85 return "\n".join(lines)
86
87 @classmethod
88 def auto_discover(cls, start_dir: Optional[Path] = None, provider_manager=None) -> "KBContext":
89 """Create a KBContext by auto-discovering knowledge graphs near start_dir."""
90 from video_processor.integrators.graph_discovery import find_knowledge_graphs
91
92 ctx = cls()
93 graphs = find_knowledge_graphs(start_dir)
94 for g in graphs:
95 ctx._sources.append(g)
96 if ctx._sources:
97 ctx.load(provider_manager=provider_manager)
98 return ctx
--- a/video_processor/agent/skills/__init__.py
+++ b/video_processor/agent/skills/__init__.py
@@ -0,0 +1,16 @@
1
+"""Agent skill sster via register_skill().
2
+from v.base import (
3
+ AgentContext,
4
+ Artifact,
5
+ Skill,
6
+ get_skill,
7
+ list_skills,
8
+ register_skill,
9
+)
10
+
11
+__all__ = [
12
+ "AgentContext",
13
+ "Artifact",
14
+ "Skill",
15
+ "get_skill",
16
+ "list_skills
--- a/video_processor/agent/skills/__init__.py
+++ b/video_processor/agent/skills/__init__.py
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
--- a/video_processor/agent/skills/__init__.py
+++ b/video_processor/agent/skills/__init__.py
@@ -0,0 +1,16 @@
1 """Agent skill sster via register_skill().
2 from v.base import (
3 AgentContext,
4 Artifact,
5 Skill,
6 get_skill,
7 list_skills,
8 register_skill,
9 )
10
11 __all__ = [
12 "AgentContext",
13 "Artifact",
14 "Skill",
15 "get_skill",
16 "list_skills
--- a/video_processor/agent/skills/base.py
+++ b/video_processor/agent/skills/base.py
@@ -0,0 +1,65 @@
1
+"""Skill interface for the PlanOpticon planning agent."""
2
+
3
+from abc import ABC, abstractmethod
4
+from dataclasses import dataclass, field
5
+from typing import Any, Dict, List, Optional
6
+
7
+
8
+@dataclass
9
+class Artifact:
10
+ """Output from a skill execution."""
11
+
12
+ name: str
13
+ content: str # The generated content (markdown, json, etc.)
14
+ artifact_type: str # "project_plan", "prd", "roadmap", "task_list", "document", "issues"
15
+ format: str = "markdown" # "markdown", "json", "mermaid"
16
+ metadata: Dict[str, Any] = field(default_factory=dict)
17
+
18
+
19
+@dataclass
20
+class AgentContext:
21
+ """Shared context for agent skills."""
22
+
23
+ knowledge_graph: Any = None # KnowledgeGraph instance
24
+ query_engine: Any = None # GraphQueryEngine instance
25
+ provider_manager: Any = None # ProviderManager instance
26
+ planning_entities: List[Any] = field(default_factory=list)
27
+ user_requirements: Dict[str, Any] = field(default_factory=dict)
28
+ conversation_history: List[Dict[str, str]] = field(default_factory=list)
29
+ artifacts: List[Artifact] = field(default_factory=list)
30
+ config: Dict[str, Any] = field(default_factory=dict)
31
+
32
+
33
+class Skill(ABC):
34
+ """Base class for agent skills."""
35
+
36
+ name: str = ""
37
+ description: str = ""
38
+
39
+ @abstractmethod
40
+ def execute(self, context: AgentContext, **kwargs) -> Artifact:
41
+ """Execute this skill and return an artifact."""
42
+ ...
43
+
44
+ def can_execute(self, context: AgentContext) -> bool:
45
+ """Check if this skill can execute given the current context."""
46
+ return context.knowledge_graph is not None and context.provider_manager is not None
47
+
48
+
49
+# Skill registry
50
+_skills: Dict[str, "Skill"] = {}
51
+
52
+
53
+def register_skill(skill: "Skill") -> None:
54
+ """Register a skill instance in the global registry."""
55
+ _skills[skill.name] = skill
56
+
57
+
58
+def get_skill(name: str) -> Optional["Skill"]:
59
+ """Look up a skill by name."""
60
+ return _skills.get(name)
61
+
62
+
63
+def list_skills() -> List["Skill"]:
64
+ """Return all registered skills."""
65
+ return list(_skills.values())
--- a/video_processor/agent/skills/base.py
+++ b/video_processor/agent/skills/base.py
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
--- a/video_processor/agent/skills/base.py
+++ b/video_processor/agent/skills/base.py
@@ -0,0 +1,65 @@
1 """Skill interface for the PlanOpticon planning agent."""
2
3 from abc import ABC, abstractmethod
4 from dataclasses import dataclass, field
5 from typing import Any, Dict, List, Optional
6
7
8 @dataclass
9 class Artifact:
10 """Output from a skill execution."""
11
12 name: str
13 content: str # The generated content (markdown, json, etc.)
14 artifact_type: str # "project_plan", "prd", "roadmap", "task_list", "document", "issues"
15 format: str = "markdown" # "markdown", "json", "mermaid"
16 metadata: Dict[str, Any] = field(default_factory=dict)
17
18
19 @dataclass
20 class AgentContext:
21 """Shared context for agent skills."""
22
23 knowledge_graph: Any = None # KnowledgeGraph instance
24 query_engine: Any = None # GraphQueryEngine instance
25 provider_manager: Any = None # ProviderManager instance
26 planning_entities: List[Any] = field(default_factory=list)
27 user_requirements: Dict[str, Any] = field(default_factory=dict)
28 conversation_history: List[Dict[str, str]] = field(default_factory=list)
29 artifacts: List[Artifact] = field(default_factory=list)
30 config: Dict[str, Any] = field(default_factory=dict)
31
32
33 class Skill(ABC):
34 """Base class for agent skills."""
35
36 name: str = ""
37 description: str = ""
38
39 @abstractmethod
40 def execute(self, context: AgentContext, **kwargs) -> Artifact:
41 """Execute this skill and return an artifact."""
42 ...
43
44 def can_execute(self, context: AgentContext) -> bool:
45 """Check if this skill can execute given the current context."""
46 return context.knowledge_graph is not None and context.provider_manager is not None
47
48
49 # Skill registry
50 _skills: Dict[str, "Skill"] = {}
51
52
53 def register_skill(skill: "Skill") -> None:
54 """Register a skill instance in the global registry."""
55 _skills[skill.name] = skill
56
57
58 def get_skill(name: str) -> Optional["Skill"]:
59 """Look up a skill by name."""
60 return _skills.get(name)
61
62
63 def list_skills() -> List["Skill"]:
64 """Return all registered skills."""
65 return list(_skills.values())
--- video_processor/cli/commands.py
+++ video_processor/cli/commands.py
@@ -614,10 +614,147 @@
614614
import traceback
615615
616616
traceback.print_exc()
617617
sys.exit(1)
618618
619
+
620
+@cli.command()
621
+@click.argument("request", required=False, default=None)
622
+@click.option("--kb", multiple=True, type=click.Path(exists=True), help="Knowledge base paths")
623
+@click.option("--interactive", "-I", is_flag=True, help="Interactive chat mode")
624
+@click.option("--export", type=click.Path(), default=None, help="Export artifacts to directory")
625
+@click.option(
626
+ "--provider",
627
+ "-p",
628
+ type=click.Choice(
629
+ [
630
+ "auto",
631
+ "openai",
632
+ "anthropic",
633
+ "gemini",
634
+ "ollama",
635
+ "azure",
636
+ "together",
637
+ "fireworks",
638
+ "cerebras",
639
+ "xai",
640
+ ]
641
+ ),
642
+ default="auto",
643
+ help="API provider",
644
+)
645
+@click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks")
646
+@click.pass_context
647
+def agent(ctx, request, kb, interactive, export, provider, chat_model):
648
+ """AI planning agent. Synthesizes knowledge into project plans and artifacts.
649
+
650
+ Examples:
651
+
652
+ planopticon agent "Create a project plan" --kb ./results
653
+
654
+ planopticon agent -I --kb ./videos --kb ./docs
655
+
656
+ planopticon agent "Generate a PRD" --export ./output
657
+ """
658
+ from video_processor.agent.agent_loop import PlanningAgent
659
+ from video_processor.agent.kb_context import KBContext
660
+ from video_processor.agent.skills.base import AgentContext
661
+
662
+ # Build provider manager
663
+ pm = None
664
+ try:
665
+ from video_processor.providers.manager import ProviderManager
666
+
667
+ prov = None if provider == "auto" else provider
668
+ pm = ProviderManager(chat_model=chat_model, provider=prov)
669
+ except Exception:
670
+ if not interactive:
671
+ click.echo("Warning: could not initialize LLM provider.", err=True)
672
+
673
+ # Load knowledge base
674
+ kb_ctx = KBContext()
675
+ if kb:
676
+ for path in kb:
677
+ kb_ctx.add_source(Path(path))
678
+ kb_ctx.load(provider_manager=pm)
679
+ click.echo(kb_ctx.summary())
680
+ else:
681
+ # Auto-discover
682
+ kb_ctx = KBContext.auto_discover(provider_manager=pm)
683
+ if kb_ctx.sources:
684
+ click.echo(kb_ctx.summary())
685
+ else:
686
+ click.echo("No knowledge base found. Use --kb to specify paths.")
687
+
688
+ agent_inst = PlanningAgent(
689
+ context=AgentContext(
690
+ knowledge_graph=kb_ctx.knowledge_graph if kb_ctx.sources else None,
691
+ query_engine=kb_ctx.query_engine if kb_ctx.sources else None,
692
+ provider_manager=pm,
693
+ )
694
+ )
695
+
696
+ if interactive:
697
+ click.echo("\nPlanOpticon Agent (interactive mode)")
698
+ click.echo("Type your request, or 'quit' to exit.\n")
699
+ while True:
700
+ try:
701
+ line = click.prompt("agent", prompt_suffix="> ")
702
+ except (KeyboardInterrupt, EOFError):
703
+ click.echo("\nBye.")
704
+ break
705
+ if line.strip().lower() in ("quit", "exit", "q"):
706
+ click.echo("Bye.")
707
+ break
708
+
709
+ # Check for slash commands
710
+ if line.strip().startswith("/"):
711
+ cmd = line.strip()[1:].split()[0]
712
+ if cmd == "plan":
713
+ artifacts = agent_inst.execute("Generate a project plan")
714
+ elif cmd == "skills":
715
+ from video_processor.agent.skills.base import list_skills
716
+
717
+ for s in list_skills():
718
+ click.echo(f" {s.name}: {s.description}")
719
+ continue
720
+ elif cmd == "summary":
721
+ if kb_ctx.sources:
722
+ click.echo(kb_ctx.summary())
723
+ continue
724
+ else:
725
+ artifacts = agent_inst.execute(line.strip()[1:])
726
+
727
+ for a in artifacts:
728
+ click.echo(f"\n--- {a.name} ({a.artifact_type}) ---\n")
729
+ click.echo(a.content)
730
+ else:
731
+ response = agent_inst.chat(line)
732
+ click.echo(f"\n{response}\n")
733
+ elif request:
734
+ artifacts = agent_inst.execute(request)
735
+ if not artifacts:
736
+ click.echo("No artifacts generated. Try a more specific request.")
737
+ for artifact in artifacts:
738
+ click.echo(f"\n--- {artifact.name} ({artifact.artifact_type}) ---\n")
739
+ click.echo(artifact.content)
740
+
741
+ if export:
742
+ export_dir = Path(export)
743
+ export_dir.mkdir(parents=True, exist_ok=True)
744
+ for artifact in artifacts:
745
+ ext = ".md" if artifact.format == "markdown" else f".{artifact.format}"
746
+ safe_name = "".join(
747
+ c if c.isalnum() or c in "-_" else "_" for c in artifact.name
748
+ )
749
+ fpath = export_dir / f"{safe_name}{ext}"
750
+ fpath.write_text(artifact.content)
751
+ click.echo(f"Exported: {fpath}")
752
+ else:
753
+ click.echo("Provide a request or use -I for interactive mode.")
754
+ click.echo("Example: planopticon agent 'Create a project plan' --kb ./results")
755
+
619756
620757
@cli.command()
621758
@click.argument("question", required=False, default=None)
622759
@click.option(
623760
"--db-path",
624761
--- video_processor/cli/commands.py
+++ video_processor/cli/commands.py
@@ -614,10 +614,147 @@
614 import traceback
615
616 traceback.print_exc()
617 sys.exit(1)
618
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
619
620 @cli.command()
621 @click.argument("question", required=False, default=None)
622 @click.option(
623 "--db-path",
624
--- video_processor/cli/commands.py
+++ video_processor/cli/commands.py
@@ -614,10 +614,147 @@
614 import traceback
615
616 traceback.print_exc()
617 sys.exit(1)
618
619
620 @cli.command()
621 @click.argument("request", required=False, default=None)
622 @click.option("--kb", multiple=True, type=click.Path(exists=True), help="Knowledge base paths")
623 @click.option("--interactive", "-I", is_flag=True, help="Interactive chat mode")
624 @click.option("--export", type=click.Path(), default=None, help="Export artifacts to directory")
625 @click.option(
626 "--provider",
627 "-p",
628 type=click.Choice(
629 [
630 "auto",
631 "openai",
632 "anthropic",
633 "gemini",
634 "ollama",
635 "azure",
636 "together",
637 "fireworks",
638 "cerebras",
639 "xai",
640 ]
641 ),
642 default="auto",
643 help="API provider",
644 )
645 @click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks")
646 @click.pass_context
647 def agent(ctx, request, kb, interactive, export, provider, chat_model):
648 """AI planning agent. Synthesizes knowledge into project plans and artifacts.
649
650 Examples:
651
652 planopticon agent "Create a project plan" --kb ./results
653
654 planopticon agent -I --kb ./videos --kb ./docs
655
656 planopticon agent "Generate a PRD" --export ./output
657 """
658 from video_processor.agent.agent_loop import PlanningAgent
659 from video_processor.agent.kb_context import KBContext
660 from video_processor.agent.skills.base import AgentContext
661
662 # Build provider manager
663 pm = None
664 try:
665 from video_processor.providers.manager import ProviderManager
666
667 prov = None if provider == "auto" else provider
668 pm = ProviderManager(chat_model=chat_model, provider=prov)
669 except Exception:
670 if not interactive:
671 click.echo("Warning: could not initialize LLM provider.", err=True)
672
673 # Load knowledge base
674 kb_ctx = KBContext()
675 if kb:
676 for path in kb:
677 kb_ctx.add_source(Path(path))
678 kb_ctx.load(provider_manager=pm)
679 click.echo(kb_ctx.summary())
680 else:
681 # Auto-discover
682 kb_ctx = KBContext.auto_discover(provider_manager=pm)
683 if kb_ctx.sources:
684 click.echo(kb_ctx.summary())
685 else:
686 click.echo("No knowledge base found. Use --kb to specify paths.")
687
688 agent_inst = PlanningAgent(
689 context=AgentContext(
690 knowledge_graph=kb_ctx.knowledge_graph if kb_ctx.sources else None,
691 query_engine=kb_ctx.query_engine if kb_ctx.sources else None,
692 provider_manager=pm,
693 )
694 )
695
696 if interactive:
697 click.echo("\nPlanOpticon Agent (interactive mode)")
698 click.echo("Type your request, or 'quit' to exit.\n")
699 while True:
700 try:
701 line = click.prompt("agent", prompt_suffix="> ")
702 except (KeyboardInterrupt, EOFError):
703 click.echo("\nBye.")
704 break
705 if line.strip().lower() in ("quit", "exit", "q"):
706 click.echo("Bye.")
707 break
708
709 # Check for slash commands
710 if line.strip().startswith("/"):
711 cmd = line.strip()[1:].split()[0]
712 if cmd == "plan":
713 artifacts = agent_inst.execute("Generate a project plan")
714 elif cmd == "skills":
715 from video_processor.agent.skills.base import list_skills
716
717 for s in list_skills():
718 click.echo(f" {s.name}: {s.description}")
719 continue
720 elif cmd == "summary":
721 if kb_ctx.sources:
722 click.echo(kb_ctx.summary())
723 continue
724 else:
725 artifacts = agent_inst.execute(line.strip()[1:])
726
727 for a in artifacts:
728 click.echo(f"\n--- {a.name} ({a.artifact_type}) ---\n")
729 click.echo(a.content)
730 else:
731 response = agent_inst.chat(line)
732 click.echo(f"\n{response}\n")
733 elif request:
734 artifacts = agent_inst.execute(request)
735 if not artifacts:
736 click.echo("No artifacts generated. Try a more specific request.")
737 for artifact in artifacts:
738 click.echo(f"\n--- {artifact.name} ({artifact.artifact_type}) ---\n")
739 click.echo(artifact.content)
740
741 if export:
742 export_dir = Path(export)
743 export_dir.mkdir(parents=True, exist_ok=True)
744 for artifact in artifacts:
745 ext = ".md" if artifact.format == "markdown" else f".{artifact.format}"
746 safe_name = "".join(
747 c if c.isalnum() or c in "-_" else "_" for c in artifact.name
748 )
749 fpath = export_dir / f"{safe_name}{ext}"
750 fpath.write_text(artifact.content)
751 click.echo(f"Exported: {fpath}")
752 else:
753 click.echo("Provide a request or use -I for interactive mode.")
754 click.echo("Example: planopticon agent 'Create a project plan' --kb ./results")
755
756
757 @cli.command()
758 @click.argument("question", required=False, default=None)
759 @click.option(
760 "--db-path",
761

Keyboard Shortcuts

Open search /
Next entry (timeline) j
Previous entry (timeline) k
Open focused entry Enter
Show this help ?
Toggle theme Top nav button