Agent API Reference
video_processor.agent.agent_loop
Planning agent loop for synthesizing knowledge into artifacts.
PlanningAgent
AI agent that synthesizes knowledge into planning artifacts.
Source code in video_processor/agent/agent_loop.py
| class PlanningAgent:
"""AI agent that synthesizes knowledge into planning artifacts."""
def __init__(self, context: AgentContext):
self.context = context
@classmethod
def from_kb_paths(cls, kb_paths: List[Path], provider_manager=None) -> "PlanningAgent":
"""Create an agent from knowledge base paths."""
kb = KBContext()
for path in kb_paths:
kb.add_source(path)
kb.load(provider_manager=provider_manager)
context = AgentContext(
knowledge_graph=kb.knowledge_graph,
query_engine=kb.query_engine,
provider_manager=provider_manager,
)
return cls(context)
def execute(self, request: str) -> List[Artifact]:
"""Execute a user request by selecting and running appropriate skills."""
# Step 1: Build context summary for LLM
kb_summary = ""
if self.context.query_engine:
stats = self.context.query_engine.stats()
kb_summary = stats.to_text()
available_skills = list_skills()
skill_descriptions = "\n".join(f"- {s.name}: {s.description}" for s in available_skills)
# Step 2: Ask LLM to select skills
plan_prompt = (
"You are a planning agent. Given a user request and available skills, "
"select which skills to execute and in what order.\n\n"
f"Knowledge base:\n{kb_summary}\n\n"
f"Available skills:\n{skill_descriptions}\n\n"
f"User request: {request}\n\n"
"Return a JSON array of skill names to execute in order:\n"
'[{"skill": "skill_name", "params": {}}]\n'
"Return ONLY the JSON array."
)
if not self.context.provider_manager:
# No LLM -- try to match skills by keyword
return self._keyword_match_execute(request)
raw = self.context.provider_manager.chat(
[{"role": "user", "content": plan_prompt}],
max_tokens=512,
temperature=0.1,
)
from video_processor.utils.json_parsing import parse_json_from_response
plan = parse_json_from_response(raw)
artifacts = []
if isinstance(plan, list):
for step in plan:
if isinstance(step, dict) and "skill" in step:
skill = get_skill(step["skill"])
if skill and skill.can_execute(self.context):
params = step.get("params", {})
artifact = skill.execute(self.context, **params)
artifacts.append(artifact)
self.context.artifacts.append(artifact)
return artifacts
def _keyword_match_execute(self, request: str) -> List[Artifact]:
"""Fallback: match skills by keywords in the request."""
request_lower = request.lower()
artifacts = []
for skill in list_skills():
# Simple keyword matching
skill_words = skill.name.replace("_", " ").split()
if any(word in request_lower for word in skill_words):
if skill.can_execute(self.context):
artifact = skill.execute(self.context)
artifacts.append(artifact)
self.context.artifacts.append(artifact)
return artifacts
def chat(self, message: str) -> str:
"""Interactive chat -- accumulate context and answer questions."""
self.context.conversation_history.append({"role": "user", "content": message})
if not self.context.provider_manager:
return "Agent requires a configured LLM provider for chat mode."
# Build system context
kb_summary = ""
if self.context.query_engine:
stats = self.context.query_engine.stats()
kb_summary = f"\n\nKnowledge base:\n{stats.to_text()}"
artifacts_summary = ""
if self.context.artifacts:
artifacts_summary = "\n\nGenerated artifacts:\n" + "\n".join(
f"- {a.name} ({a.artifact_type})" for a in self.context.artifacts
)
system_msg = (
"You are PlanOpticon, an AI planning companion built into the PlanOpticon CLI. "
"PlanOpticon is a video analysis and knowledge extraction tool that processes "
"recordings into structured knowledge graphs.\n\n"
"You are running inside the interactive companion REPL. The user can use these "
"built-in commands (suggest them when relevant):\n"
" /status - Show workspace status (loaded KG, videos, docs)\n"
" /entities [--type T] - List knowledge graph entities\n"
" /search TERM - Search entities by name\n"
" /neighbors ENTITY - Show entity relationships\n"
" /export FORMAT - Export KG (markdown, obsidian, notion, csv)\n"
" /analyze PATH - Analyze a video or document\n"
" /ingest PATH - Ingest a file into the knowledge graph\n"
" /auth SERVICE - Authenticate with a service "
"(zoom, google, microsoft, notion, dropbox, github)\n"
" /provider [NAME] - List or switch LLM provider\n"
" /model [NAME] - Show or switch chat model\n"
" /plan - Generate a project plan\n"
" /prd - Generate a PRD\n"
" /tasks - Generate a task breakdown\n\n"
"PlanOpticon CLI commands the user can run outside the REPL:\n"
" planopticon auth zoom|google|microsoft - Authenticate with cloud services\n"
" planopticon recordings zoom-list|teams-list|meet-list - List cloud recordings\n"
" planopticon analyze -i VIDEO - Analyze a video file\n"
" planopticon query - Query the knowledge graph\n"
" planopticon export FORMAT PATH - Export knowledge graph\n\n"
f"{kb_summary}{artifacts_summary}\n\n"
"Help the user with their planning tasks. When they ask about capabilities, "
"refer them to the appropriate built-in commands. Ask clarifying questions "
"to gather requirements. When ready, suggest using specific skills or commands "
"to generate artifacts."
)
messages = [{"role": "system", "content": system_msg}] + self.context.conversation_history
response = self.context.provider_manager.chat(messages, max_tokens=2048, temperature=0.5)
self.context.conversation_history.append({"role": "assistant", "content": response})
return response
|
chat(message)
Interactive chat -- accumulate context and answer questions.
Source code in video_processor/agent/agent_loop.py
| def chat(self, message: str) -> str:
"""Interactive chat -- accumulate context and answer questions."""
self.context.conversation_history.append({"role": "user", "content": message})
if not self.context.provider_manager:
return "Agent requires a configured LLM provider for chat mode."
# Build system context
kb_summary = ""
if self.context.query_engine:
stats = self.context.query_engine.stats()
kb_summary = f"\n\nKnowledge base:\n{stats.to_text()}"
artifacts_summary = ""
if self.context.artifacts:
artifacts_summary = "\n\nGenerated artifacts:\n" + "\n".join(
f"- {a.name} ({a.artifact_type})" for a in self.context.artifacts
)
system_msg = (
"You are PlanOpticon, an AI planning companion built into the PlanOpticon CLI. "
"PlanOpticon is a video analysis and knowledge extraction tool that processes "
"recordings into structured knowledge graphs.\n\n"
"You are running inside the interactive companion REPL. The user can use these "
"built-in commands (suggest them when relevant):\n"
" /status - Show workspace status (loaded KG, videos, docs)\n"
" /entities [--type T] - List knowledge graph entities\n"
" /search TERM - Search entities by name\n"
" /neighbors ENTITY - Show entity relationships\n"
" /export FORMAT - Export KG (markdown, obsidian, notion, csv)\n"
" /analyze PATH - Analyze a video or document\n"
" /ingest PATH - Ingest a file into the knowledge graph\n"
" /auth SERVICE - Authenticate with a service "
"(zoom, google, microsoft, notion, dropbox, github)\n"
" /provider [NAME] - List or switch LLM provider\n"
" /model [NAME] - Show or switch chat model\n"
" /plan - Generate a project plan\n"
" /prd - Generate a PRD\n"
" /tasks - Generate a task breakdown\n\n"
"PlanOpticon CLI commands the user can run outside the REPL:\n"
" planopticon auth zoom|google|microsoft - Authenticate with cloud services\n"
" planopticon recordings zoom-list|teams-list|meet-list - List cloud recordings\n"
" planopticon analyze -i VIDEO - Analyze a video file\n"
" planopticon query - Query the knowledge graph\n"
" planopticon export FORMAT PATH - Export knowledge graph\n\n"
f"{kb_summary}{artifacts_summary}\n\n"
"Help the user with their planning tasks. When they ask about capabilities, "
"refer them to the appropriate built-in commands. Ask clarifying questions "
"to gather requirements. When ready, suggest using specific skills or commands "
"to generate artifacts."
)
messages = [{"role": "system", "content": system_msg}] + self.context.conversation_history
response = self.context.provider_manager.chat(messages, max_tokens=2048, temperature=0.5)
self.context.conversation_history.append({"role": "assistant", "content": response})
return response
|
execute(request)
Execute a user request by selecting and running appropriate skills.
Source code in video_processor/agent/agent_loop.py
| def execute(self, request: str) -> List[Artifact]:
"""Execute a user request by selecting and running appropriate skills."""
# Step 1: Build context summary for LLM
kb_summary = ""
if self.context.query_engine:
stats = self.context.query_engine.stats()
kb_summary = stats.to_text()
available_skills = list_skills()
skill_descriptions = "\n".join(f"- {s.name}: {s.description}" for s in available_skills)
# Step 2: Ask LLM to select skills
plan_prompt = (
"You are a planning agent. Given a user request and available skills, "
"select which skills to execute and in what order.\n\n"
f"Knowledge base:\n{kb_summary}\n\n"
f"Available skills:\n{skill_descriptions}\n\n"
f"User request: {request}\n\n"
"Return a JSON array of skill names to execute in order:\n"
'[{"skill": "skill_name", "params": {}}]\n'
"Return ONLY the JSON array."
)
if not self.context.provider_manager:
# No LLM -- try to match skills by keyword
return self._keyword_match_execute(request)
raw = self.context.provider_manager.chat(
[{"role": "user", "content": plan_prompt}],
max_tokens=512,
temperature=0.1,
)
from video_processor.utils.json_parsing import parse_json_from_response
plan = parse_json_from_response(raw)
artifacts = []
if isinstance(plan, list):
for step in plan:
if isinstance(step, dict) and "skill" in step:
skill = get_skill(step["skill"])
if skill and skill.can_execute(self.context):
params = step.get("params", {})
artifact = skill.execute(self.context, **params)
artifacts.append(artifact)
self.context.artifacts.append(artifact)
return artifacts
|
from_kb_paths(kb_paths, provider_manager=None)
classmethod
Create an agent from knowledge base paths.
Source code in video_processor/agent/agent_loop.py
| @classmethod
def from_kb_paths(cls, kb_paths: List[Path], provider_manager=None) -> "PlanningAgent":
"""Create an agent from knowledge base paths."""
kb = KBContext()
for path in kb_paths:
kb.add_source(path)
kb.load(provider_manager=provider_manager)
context = AgentContext(
knowledge_graph=kb.knowledge_graph,
query_engine=kb.query_engine,
provider_manager=provider_manager,
)
return cls(context)
|
video_processor.agent.skills.base
Skill interface for the PlanOpticon planning agent.
AgentContext
dataclass
Shared context for agent skills.
Source code in video_processor/agent/skills/base.py
| @dataclass
class AgentContext:
"""Shared context for agent skills."""
knowledge_graph: Any = None # KnowledgeGraph instance
query_engine: Any = None # GraphQueryEngine instance
provider_manager: Any = None # ProviderManager instance
planning_entities: List[Any] = field(default_factory=list)
user_requirements: Dict[str, Any] = field(default_factory=dict)
conversation_history: List[Dict[str, str]] = field(default_factory=list)
artifacts: List[Artifact] = field(default_factory=list)
config: Dict[str, Any] = field(default_factory=dict)
|
Artifact
dataclass
Output from a skill execution.
Source code in video_processor/agent/skills/base.py
| @dataclass
class Artifact:
"""Output from a skill execution."""
name: str
content: str # The generated content (markdown, json, etc.)
artifact_type: str # "project_plan", "prd", "roadmap", "task_list", "document", "issues"
format: str = "markdown" # "markdown", "json", "mermaid"
metadata: Dict[str, Any] = field(default_factory=dict)
|
Skill
Bases: ABC
Base class for agent skills.
Source code in video_processor/agent/skills/base.py
| class Skill(ABC):
"""Base class for agent skills."""
name: str = ""
description: str = ""
@abstractmethod
def execute(self, context: AgentContext, **kwargs) -> Artifact:
"""Execute this skill and return an artifact."""
...
def can_execute(self, context: AgentContext) -> bool:
"""Check if this skill can execute given the current context."""
return context.knowledge_graph is not None and context.provider_manager is not None
|
can_execute(context)
Check if this skill can execute given the current context.
Source code in video_processor/agent/skills/base.py
| def can_execute(self, context: AgentContext) -> bool:
"""Check if this skill can execute given the current context."""
return context.knowledge_graph is not None and context.provider_manager is not None
|
execute(context, **kwargs)
abstractmethod
Execute this skill and return an artifact.
Source code in video_processor/agent/skills/base.py
| @abstractmethod
def execute(self, context: AgentContext, **kwargs) -> Artifact:
"""Execute this skill and return an artifact."""
...
|
get_skill(name)
Look up a skill by name.
Source code in video_processor/agent/skills/base.py
| def get_skill(name: str) -> Optional["Skill"]:
"""Look up a skill by name."""
return _skills.get(name)
|
list_skills()
Return all registered skills.
Source code in video_processor/agent/skills/base.py
| def list_skills() -> List["Skill"]:
"""Return all registered skills."""
return list(_skills.values())
|
register_skill(skill)
Register a skill instance in the global registry.
Source code in video_processor/agent/skills/base.py
| def register_skill(skill: "Skill") -> None:
"""Register a skill instance in the global registry."""
_skills[skill.name] = skill
|
video_processor.agent.kb_context
Knowledge base context manager for loading and merging knowledge graphs.
KBContext
Load and merge multiple knowledge graphs into a unified context.
Source code in video_processor/agent/kb_context.py
| class KBContext:
"""Load and merge multiple knowledge graphs into a unified context."""
def __init__(self):
self._sources: List[Path] = []
self._kg = None # KnowledgeGraph instance
self._engine = None # GraphQueryEngine instance
def add_source(self, path) -> None:
"""Add a knowledge graph source (.db or .json file, or directory to search)."""
path = Path(path).resolve()
if path.is_dir():
from video_processor.integrators.graph_discovery import find_knowledge_graphs
graphs = find_knowledge_graphs(path)
self._sources.extend(graphs)
elif path.is_file():
self._sources.append(path)
else:
raise FileNotFoundError(f"Not found: {path}")
def load(self, provider_manager=None) -> "KBContext":
"""Load and merge all added sources into a single knowledge graph."""
from video_processor.integrators.graph_query import GraphQueryEngine
from video_processor.integrators.knowledge_graph import KnowledgeGraph
self._kg = KnowledgeGraph(provider_manager=provider_manager)
for source_path in self._sources:
if source_path.suffix == ".db":
other = KnowledgeGraph(db_path=source_path)
self._kg.merge(other)
elif source_path.suffix == ".json":
data = json.loads(source_path.read_text())
other = KnowledgeGraph.from_dict(data)
self._kg.merge(other)
self._engine = GraphQueryEngine(self._kg._store, provider_manager=provider_manager)
return self
@property
def knowledge_graph(self):
"""Return the merged KnowledgeGraph, or None if not loaded."""
if not self._kg:
raise RuntimeError("Call load() first")
return self._kg
@property
def query_engine(self):
"""Return the GraphQueryEngine, or None if not loaded."""
if not self._engine:
raise RuntimeError("Call load() first")
return self._engine
@property
def sources(self) -> List[Path]:
"""Return the list of source paths."""
return list(self._sources)
def summary(self) -> str:
"""Generate a brief summary of the loaded knowledge base."""
if not self._kg:
return "No knowledge base loaded."
stats = self._engine.stats().data
lines = [
f"Knowledge base: {len(self._sources)} source(s)",
f" Entities: {stats['entity_count']}",
f" Relationships: {stats['relationship_count']}",
]
if stats.get("entity_types"):
lines.append(" Entity types:")
for t, count in sorted(stats["entity_types"].items(), key=lambda x: -x[1]):
lines.append(f" {t}: {count}")
return "\n".join(lines)
@classmethod
def auto_discover(cls, start_dir: Optional[Path] = None, provider_manager=None) -> "KBContext":
"""Create a KBContext by auto-discovering knowledge graphs near start_dir."""
from video_processor.integrators.graph_discovery import find_knowledge_graphs
ctx = cls()
graphs = find_knowledge_graphs(start_dir)
for g in graphs:
ctx._sources.append(g)
if ctx._sources:
ctx.load(provider_manager=provider_manager)
return ctx
|
knowledge_graph
property
Return the merged KnowledgeGraph, or None if not loaded.
query_engine
property
Return the GraphQueryEngine, or None if not loaded.
sources
property
Return the list of source paths.
add_source(path)
Add a knowledge graph source (.db or .json file, or directory to search).
Source code in video_processor/agent/kb_context.py
| def add_source(self, path) -> None:
"""Add a knowledge graph source (.db or .json file, or directory to search)."""
path = Path(path).resolve()
if path.is_dir():
from video_processor.integrators.graph_discovery import find_knowledge_graphs
graphs = find_knowledge_graphs(path)
self._sources.extend(graphs)
elif path.is_file():
self._sources.append(path)
else:
raise FileNotFoundError(f"Not found: {path}")
|
auto_discover(start_dir=None, provider_manager=None)
classmethod
Create a KBContext by auto-discovering knowledge graphs near start_dir.
Source code in video_processor/agent/kb_context.py
| @classmethod
def auto_discover(cls, start_dir: Optional[Path] = None, provider_manager=None) -> "KBContext":
"""Create a KBContext by auto-discovering knowledge graphs near start_dir."""
from video_processor.integrators.graph_discovery import find_knowledge_graphs
ctx = cls()
graphs = find_knowledge_graphs(start_dir)
for g in graphs:
ctx._sources.append(g)
if ctx._sources:
ctx.load(provider_manager=provider_manager)
return ctx
|
load(provider_manager=None)
Load and merge all added sources into a single knowledge graph.
Source code in video_processor/agent/kb_context.py
| def load(self, provider_manager=None) -> "KBContext":
"""Load and merge all added sources into a single knowledge graph."""
from video_processor.integrators.graph_query import GraphQueryEngine
from video_processor.integrators.knowledge_graph import KnowledgeGraph
self._kg = KnowledgeGraph(provider_manager=provider_manager)
for source_path in self._sources:
if source_path.suffix == ".db":
other = KnowledgeGraph(db_path=source_path)
self._kg.merge(other)
elif source_path.suffix == ".json":
data = json.loads(source_path.read_text())
other = KnowledgeGraph.from_dict(data)
self._kg.merge(other)
self._engine = GraphQueryEngine(self._kg._store, provider_manager=provider_manager)
return self
|
summary()
Generate a brief summary of the loaded knowledge base.
Source code in video_processor/agent/kb_context.py
| def summary(self) -> str:
"""Generate a brief summary of the loaded knowledge base."""
if not self._kg:
return "No knowledge base loaded."
stats = self._engine.stats().data
lines = [
f"Knowledge base: {len(self._sources)} source(s)",
f" Entities: {stats['entity_count']}",
f" Relationships: {stats['relationship_count']}",
]
if stats.get("entity_types"):
lines.append(" Entity types:")
for t, count in sorted(stats["entity_types"].items(), key=lambda x: -x[1]):
lines.append(f" {t}: {count}")
return "\n".join(lines)
|
Overview
The agent module implements a planning agent that synthesizes knowledge from processed video content into actionable artifacts such as project plans, PRDs, task breakdowns, and roadmaps. The agent operates on knowledge graphs loaded via KBContext and uses a skill-based architecture for extensibility.
Key components:
PlanningAgent -- orchestrates skill selection and execution based on user requests
AgentContext -- shared state passed between skills during execution
Skill (ABC) -- base class for pluggable agent capabilities
Artifact -- output produced by skill execution
KBContext -- loads and merges multiple knowledge graph sources
PlanningAgent
from video_processor.agent.agent_loop import PlanningAgent
AI agent that synthesizes knowledge into planning artifacts. Uses an LLM to select which skills to execute for a given request, or falls back to keyword matching when no LLM is available.
Constructor
def __init__(self, context: AgentContext)
| Parameter |
Type |
Description |
context |
AgentContext |
Shared context containing knowledge graph, query engine, and provider |
from_kb_paths()
@classmethod
def from_kb_paths(
cls,
kb_paths: List[Path],
provider_manager=None,
) -> PlanningAgent
Factory method that creates an agent from one or more knowledge base file paths. Handles loading and merging knowledge graphs automatically.
Parameters:
| Parameter |
Type |
Default |
Description |
kb_paths |
List[Path] |
required |
Paths to .db or .json knowledge graph files, or directories to search |
provider_manager |
ProviderManager |
None |
LLM provider for agent operations |
Returns: PlanningAgent -- configured agent with loaded knowledge base.
from pathlib import Path
from video_processor.agent.agent_loop import PlanningAgent
from video_processor.providers.manager import ProviderManager
agent = PlanningAgent.from_kb_paths(
kb_paths=[Path("results/knowledge_graph.db")],
provider_manager=ProviderManager(),
)
execute()
def execute(self, request: str) -> List[Artifact]
Execute a user request by selecting and running appropriate skills.
Process:
- Build a context summary from the knowledge base statistics
- Format available skills with their descriptions
- Ask the LLM to select skills and parameters (or use keyword matching as fallback)
- Execute selected skills in order, accumulating artifacts
Parameters:
| Parameter |
Type |
Description |
request |
str |
Natural language request (e.g., "Generate a project plan") |
Returns: List[Artifact] -- generated artifacts from skill execution.
LLM mode: The LLM receives the knowledge base summary, available skills, and user request, then returns a JSON array of {"skill": "name", "params": {}} objects to execute.
Keyword fallback: Without an LLM, skills are matched by splitting the skill name into words and checking if any appear in the request text.
artifacts = agent.execute("Create a PRD and task breakdown")
for artifact in artifacts:
print(f"--- {artifact.name} ({artifact.artifact_type}) ---")
print(artifact.content[:500])
chat()
def chat(self, message: str) -> str
Interactive chat mode. Maintains conversation history and provides contextual responses about the loaded knowledge base.
Parameters:
| Parameter |
Type |
Description |
message |
str |
User message |
Returns: str -- assistant response.
The chat mode provides the LLM with:
- Knowledge base statistics (entity counts, relationship counts)
- List of previously generated artifacts
- Full conversation history
- Available REPL commands (e.g.,
/entities, /search, /plan, /export)
Requires a configured provider_manager. Returns a static error message if no LLM is available.
response = agent.chat("What technologies were discussed in the meetings?")
print(response)
response = agent.chat("Which of those have the most dependencies?")
print(response)
AgentContext
from video_processor.agent.skills.base import AgentContext
Shared state dataclass passed to all skills during execution. Accumulates artifacts and conversation history across the agent session.
| Field |
Type |
Default |
Description |
knowledge_graph |
Any |
None |
KnowledgeGraph instance |
query_engine |
Any |
None |
GraphQueryEngine instance for querying the KG |
provider_manager |
Any |
None |
ProviderManager instance for LLM calls |
planning_entities |
List[Any] |
[] |
Extracted PlanningEntity instances |
user_requirements |
Dict[str, Any] |
{} |
User-specified requirements and constraints |
conversation_history |
List[Dict[str, str]] |
[] |
Chat message history (role, content dicts) |
artifacts |
List[Artifact] |
[] |
Previously generated artifacts |
config |
Dict[str, Any] |
{} |
Additional configuration |
from video_processor.agent.skills.base import AgentContext
context = AgentContext(
knowledge_graph=kg,
query_engine=engine,
provider_manager=pm,
config={"output_format": "markdown"},
)
Skill (ABC)
from video_processor.agent.skills.base import Skill
Base class for agent skills. Each skill represents a discrete capability that produces an artifact from the agent context.
Class attributes:
| Attribute |
Type |
Description |
name |
str |
Skill identifier (e.g., "project_plan", "prd") |
description |
str |
Human-readable description shown to the LLM for skill selection |
execute()
@abstractmethod
def execute(self, context: AgentContext, **kwargs) -> Artifact
Execute this skill and return an artifact. Receives the shared agent context and any parameters selected by the LLM planner.
can_execute()
def can_execute(self, context: AgentContext) -> bool
Check if this skill can execute given the current context. The default implementation requires both knowledge_graph and provider_manager to be set. Override for skills with different requirements.
Returns: bool
Implementing a custom skill
from video_processor.agent.skills.base import Skill, Artifact, AgentContext, register_skill
class SummarySkill(Skill):
name = "summary"
description = "Generate a concise summary of the knowledge base"
def execute(self, context: AgentContext, **kwargs) -> Artifact:
stats = context.query_engine.stats()
prompt = f"Summarize this knowledge base:\n{stats.to_text()}"
content = context.provider_manager.chat(
[{"role": "user", "content": prompt}]
)
return Artifact(
name="Knowledge Base Summary",
content=content,
artifact_type="document",
format="markdown",
)
def can_execute(self, context: AgentContext) -> bool:
return context.query_engine is not None and context.provider_manager is not None
# Register the skill so the agent can discover it
register_skill(SummarySkill())
Artifact
from video_processor.agent.skills.base import Artifact
Dataclass representing the output of a skill execution.
| Field |
Type |
Default |
Description |
name |
str |
required |
Human-readable artifact name |
content |
str |
required |
Generated content (Markdown, JSON, Mermaid, etc.) |
artifact_type |
str |
required |
Type: "project_plan", "prd", "roadmap", "task_list", "document", "issues" |
format |
str |
"markdown" |
Content format: "markdown", "json", "mermaid" |
metadata |
Dict[str, Any] |
{} |
Additional metadata |
Skill Registry Functions
register_skill()
def register_skill(skill: Skill) -> None
Register a skill instance in the global registry. Skills must be registered before the agent can discover and execute them.
get_skill()
def get_skill(name: str) -> Optional[Skill]
Look up a registered skill by name.
Returns: Optional[Skill] -- the skill instance, or None if not found.
list_skills()
def list_skills() -> List[Skill]
Return all registered skill instances.
KBContext
from video_processor.agent.kb_context import KBContext
Loads and merges multiple knowledge graph sources into a unified context for agent consumption. Supports both FalkorDB (.db) and JSON (.json) formats, and can auto-discover graphs in a directory tree.
Constructor
Creates an empty context. Use add_source() to add knowledge graph paths, then load() to initialize.
add_source()
def add_source(self, path) -> None
Add a knowledge graph source.
Parameters:
| Parameter |
Type |
Description |
path |
str \| Path |
Path to a .db file, .json file, or directory to search for knowledge graphs |
If path is a directory, it is searched recursively for knowledge graph files using find_knowledge_graphs().
Raises: FileNotFoundError if the path does not exist.
load()
def load(self, provider_manager=None) -> KBContext
Load and merge all added sources into a single knowledge graph and query engine.
Parameters:
| Parameter |
Type |
Default |
Description |
provider_manager |
ProviderManager |
None |
LLM provider for the knowledge graph and query engine |
Returns: KBContext -- self, for method chaining.
Properties
| Property |
Type |
Description |
knowledge_graph |
KnowledgeGraph |
The merged knowledge graph (raises RuntimeError if not loaded) |
query_engine |
GraphQueryEngine |
Query engine for the merged graph (raises RuntimeError if not loaded) |
sources |
List[Path] |
List of resolved source paths |
summary()
Generate a brief text summary of the loaded knowledge base, including entity counts by type and relationship counts.
Returns: str -- multi-line summary text.
auto_discover()
@classmethod
def auto_discover(
cls,
start_dir: Optional[Path] = None,
provider_manager=None,
) -> KBContext
Factory method that creates a KBContext by auto-discovering knowledge graphs near start_dir (defaults to current directory).
Returns: KBContext -- loaded context (may have zero sources if none found).
Usage examples
from pathlib import Path
from video_processor.agent.kb_context import KBContext
# Manual source management
kb = KBContext()
kb.add_source(Path("project_a/knowledge_graph.db"))
kb.add_source(Path("project_b/results/")) # searches directory
kb.load(provider_manager=pm)
print(kb.summary())
# Knowledge base: 3 source(s)
# Entities: 142
# Relationships: 89
# Entity types:
# technology: 45
# person: 23
# concept: 74
# Auto-discover from current directory
kb = KBContext.auto_discover()
# Use with the agent
from video_processor.agent.agent_loop import PlanningAgent
from video_processor.agent.skills.base import AgentContext
context = AgentContext(
knowledge_graph=kb.knowledge_graph,
query_engine=kb.query_engine,
provider_manager=pm,
)
agent = PlanningAgent(context)