|
0981a08…
|
noreply
|
1 |
"""Planning agent loop for synthesizing knowledge into artifacts.""" |
|
0981a08…
|
noreply
|
2 |
|
|
0981a08…
|
noreply
|
3 |
import logging |
|
0981a08…
|
noreply
|
4 |
from pathlib import Path |
|
0981a08…
|
noreply
|
5 |
from typing import List |
|
0981a08…
|
noreply
|
6 |
|
|
0981a08…
|
noreply
|
7 |
from video_processor.agent.kb_context import KBContext |
|
0981a08…
|
noreply
|
8 |
from video_processor.agent.skills.base import ( |
|
0981a08…
|
noreply
|
9 |
AgentContext, |
|
0981a08…
|
noreply
|
10 |
Artifact, |
|
0981a08…
|
noreply
|
11 |
get_skill, |
|
0981a08…
|
noreply
|
12 |
list_skills, |
|
0981a08…
|
noreply
|
13 |
) |
|
0981a08…
|
noreply
|
14 |
|
|
0981a08…
|
noreply
|
15 |
logger = logging.getLogger(__name__) |
|
0981a08…
|
noreply
|
16 |
|
|
0981a08…
|
noreply
|
17 |
|
|
0981a08…
|
noreply
|
18 |
class PlanningAgent: |
|
0981a08…
|
noreply
|
19 |
"""AI agent that synthesizes knowledge into planning artifacts.""" |
|
0981a08…
|
noreply
|
20 |
|
|
0981a08…
|
noreply
|
21 |
def __init__(self, context: AgentContext): |
|
0981a08…
|
noreply
|
22 |
self.context = context |
|
0981a08…
|
noreply
|
23 |
|
|
0981a08…
|
noreply
|
24 |
@classmethod |
|
0981a08…
|
noreply
|
25 |
def from_kb_paths(cls, kb_paths: List[Path], provider_manager=None) -> "PlanningAgent": |
|
0981a08…
|
noreply
|
26 |
"""Create an agent from knowledge base paths.""" |
|
0981a08…
|
noreply
|
27 |
kb = KBContext() |
|
0981a08…
|
noreply
|
28 |
for path in kb_paths: |
|
0981a08…
|
noreply
|
29 |
kb.add_source(path) |
|
0981a08…
|
noreply
|
30 |
kb.load(provider_manager=provider_manager) |
|
0981a08…
|
noreply
|
31 |
|
|
0981a08…
|
noreply
|
32 |
context = AgentContext( |
|
0981a08…
|
noreply
|
33 |
knowledge_graph=kb.knowledge_graph, |
|
0981a08…
|
noreply
|
34 |
query_engine=kb.query_engine, |
|
0981a08…
|
noreply
|
35 |
provider_manager=provider_manager, |
|
0981a08…
|
noreply
|
36 |
) |
|
0981a08…
|
noreply
|
37 |
return cls(context) |
|
0981a08…
|
noreply
|
38 |
|
|
0981a08…
|
noreply
|
39 |
def execute(self, request: str) -> List[Artifact]: |
|
0981a08…
|
noreply
|
40 |
"""Execute a user request by selecting and running appropriate skills.""" |
|
0981a08…
|
noreply
|
41 |
# Step 1: Build context summary for LLM |
|
0981a08…
|
noreply
|
42 |
kb_summary = "" |
|
0981a08…
|
noreply
|
43 |
if self.context.query_engine: |
|
0981a08…
|
noreply
|
44 |
stats = self.context.query_engine.stats() |
|
0981a08…
|
noreply
|
45 |
kb_summary = stats.to_text() |
|
0981a08…
|
noreply
|
46 |
|
|
0981a08…
|
noreply
|
47 |
available_skills = list_skills() |
|
0981a08…
|
noreply
|
48 |
skill_descriptions = "\n".join(f"- {s.name}: {s.description}" for s in available_skills) |
|
0981a08…
|
noreply
|
49 |
|
|
0981a08…
|
noreply
|
50 |
# Step 2: Ask LLM to select skills |
|
0981a08…
|
noreply
|
51 |
plan_prompt = ( |
|
0981a08…
|
noreply
|
52 |
"You are a planning agent. Given a user request and available skills, " |
|
0981a08…
|
noreply
|
53 |
"select which skills to execute and in what order.\n\n" |
|
0981a08…
|
noreply
|
54 |
f"Knowledge base:\n{kb_summary}\n\n" |
|
0981a08…
|
noreply
|
55 |
f"Available skills:\n{skill_descriptions}\n\n" |
|
0981a08…
|
noreply
|
56 |
f"User request: {request}\n\n" |
|
0981a08…
|
noreply
|
57 |
"Return a JSON array of skill names to execute in order:\n" |
|
0981a08…
|
noreply
|
58 |
'[{"skill": "skill_name", "params": {}}]\n' |
|
0981a08…
|
noreply
|
59 |
"Return ONLY the JSON array." |
|
0981a08…
|
noreply
|
60 |
) |
|
0981a08…
|
noreply
|
61 |
|
|
0981a08…
|
noreply
|
62 |
if not self.context.provider_manager: |
|
0981a08…
|
noreply
|
63 |
# No LLM -- try to match skills by keyword |
|
0981a08…
|
noreply
|
64 |
return self._keyword_match_execute(request) |
|
0981a08…
|
noreply
|
65 |
|
|
0981a08…
|
noreply
|
66 |
raw = self.context.provider_manager.chat( |
|
0981a08…
|
noreply
|
67 |
[{"role": "user", "content": plan_prompt}], |
|
0981a08…
|
noreply
|
68 |
max_tokens=512, |
|
0981a08…
|
noreply
|
69 |
temperature=0.1, |
|
0981a08…
|
noreply
|
70 |
) |
|
0981a08…
|
noreply
|
71 |
|
|
0981a08…
|
noreply
|
72 |
from video_processor.utils.json_parsing import parse_json_from_response |
|
0981a08…
|
noreply
|
73 |
|
|
0981a08…
|
noreply
|
74 |
plan = parse_json_from_response(raw) |
|
0981a08…
|
noreply
|
75 |
|
|
0981a08…
|
noreply
|
76 |
artifacts = [] |
|
0981a08…
|
noreply
|
77 |
if isinstance(plan, list): |
|
0981a08…
|
noreply
|
78 |
for step in plan: |
|
0981a08…
|
noreply
|
79 |
if isinstance(step, dict) and "skill" in step: |
|
0981a08…
|
noreply
|
80 |
skill = get_skill(step["skill"]) |
|
0981a08…
|
noreply
|
81 |
if skill and skill.can_execute(self.context): |
|
0981a08…
|
noreply
|
82 |
params = step.get("params", {}) |
|
0981a08…
|
noreply
|
83 |
artifact = skill.execute(self.context, **params) |
|
0981a08…
|
noreply
|
84 |
artifacts.append(artifact) |
|
0981a08…
|
noreply
|
85 |
self.context.artifacts.append(artifact) |
|
0981a08…
|
noreply
|
86 |
|
|
0981a08…
|
noreply
|
87 |
return artifacts |
|
0981a08…
|
noreply
|
88 |
|
|
0981a08…
|
noreply
|
89 |
def _keyword_match_execute(self, request: str) -> List[Artifact]: |
|
0981a08…
|
noreply
|
90 |
"""Fallback: match skills by keywords in the request.""" |
|
0981a08…
|
noreply
|
91 |
request_lower = request.lower() |
|
0981a08…
|
noreply
|
92 |
artifacts = [] |
|
0981a08…
|
noreply
|
93 |
for skill in list_skills(): |
|
0981a08…
|
noreply
|
94 |
# Simple keyword matching |
|
0981a08…
|
noreply
|
95 |
skill_words = skill.name.replace("_", " ").split() |
|
0981a08…
|
noreply
|
96 |
if any(word in request_lower for word in skill_words): |
|
0981a08…
|
noreply
|
97 |
if skill.can_execute(self.context): |
|
0981a08…
|
noreply
|
98 |
artifact = skill.execute(self.context) |
|
0981a08…
|
noreply
|
99 |
artifacts.append(artifact) |
|
0981a08…
|
noreply
|
100 |
self.context.artifacts.append(artifact) |
|
0981a08…
|
noreply
|
101 |
return artifacts |
|
0981a08…
|
noreply
|
102 |
|
|
0981a08…
|
noreply
|
103 |
def chat(self, message: str) -> str: |
|
0981a08…
|
noreply
|
104 |
"""Interactive chat -- accumulate context and answer questions.""" |
|
0981a08…
|
noreply
|
105 |
self.context.conversation_history.append({"role": "user", "content": message}) |
|
0981a08…
|
noreply
|
106 |
|
|
0981a08…
|
noreply
|
107 |
if not self.context.provider_manager: |
|
0981a08…
|
noreply
|
108 |
return "Agent requires a configured LLM provider for chat mode." |
|
0981a08…
|
noreply
|
109 |
|
|
0981a08…
|
noreply
|
110 |
# Build system context |
|
0981a08…
|
noreply
|
111 |
kb_summary = "" |
|
0981a08…
|
noreply
|
112 |
if self.context.query_engine: |
|
0981a08…
|
noreply
|
113 |
stats = self.context.query_engine.stats() |
|
0981a08…
|
noreply
|
114 |
kb_summary = f"\n\nKnowledge base:\n{stats.to_text()}" |
|
0981a08…
|
noreply
|
115 |
|
|
0981a08…
|
noreply
|
116 |
artifacts_summary = "" |
|
0981a08…
|
noreply
|
117 |
if self.context.artifacts: |
|
0981a08…
|
noreply
|
118 |
artifacts_summary = "\n\nGenerated artifacts:\n" + "\n".join( |
|
0981a08…
|
noreply
|
119 |
f"- {a.name} ({a.artifact_type})" for a in self.context.artifacts |
|
0981a08…
|
noreply
|
120 |
) |
|
0981a08…
|
noreply
|
121 |
|
|
0981a08…
|
noreply
|
122 |
system_msg = ( |
|
0981a08…
|
noreply
|
123 |
"You are PlanOpticon, an AI planning companion built into the PlanOpticon CLI. " |
|
0981a08…
|
noreply
|
124 |
"PlanOpticon is a video analysis and knowledge extraction tool that processes " |
|
0981a08…
|
noreply
|
125 |
"recordings into structured knowledge graphs.\n\n" |
|
0981a08…
|
noreply
|
126 |
"You are running inside the interactive companion REPL. The user can use these " |
|
0981a08…
|
noreply
|
127 |
"built-in commands (suggest them when relevant):\n" |
|
0981a08…
|
noreply
|
128 |
" /status - Show workspace status (loaded KG, videos, docs)\n" |
|
0981a08…
|
noreply
|
129 |
" /entities [--type T] - List knowledge graph entities\n" |
|
0981a08…
|
noreply
|
130 |
" /search TERM - Search entities by name\n" |
|
0981a08…
|
noreply
|
131 |
" /neighbors ENTITY - Show entity relationships\n" |
|
0981a08…
|
noreply
|
132 |
" /export FORMAT - Export KG (markdown, obsidian, notion, csv)\n" |
|
0981a08…
|
noreply
|
133 |
" /analyze PATH - Analyze a video or document\n" |
|
0981a08…
|
noreply
|
134 |
" /ingest PATH - Ingest a file into the knowledge graph\n" |
|
0981a08…
|
noreply
|
135 |
" /auth SERVICE - Authenticate with a service " |
|
0981a08…
|
noreply
|
136 |
"(zoom, google, microsoft, notion, dropbox, github)\n" |
|
0981a08…
|
noreply
|
137 |
" /provider [NAME] - List or switch LLM provider\n" |
|
0981a08…
|
noreply
|
138 |
" /model [NAME] - Show or switch chat model\n" |
|
0981a08…
|
noreply
|
139 |
" /plan - Generate a project plan\n" |
|
0981a08…
|
noreply
|
140 |
" /prd - Generate a PRD\n" |
|
0981a08…
|
noreply
|
141 |
" /tasks - Generate a task breakdown\n\n" |
|
0981a08…
|
noreply
|
142 |
"PlanOpticon CLI commands the user can run outside the REPL:\n" |
|
0981a08…
|
noreply
|
143 |
" planopticon auth zoom|google|microsoft - Authenticate with cloud services\n" |
|
0981a08…
|
noreply
|
144 |
" planopticon recordings zoom-list|teams-list|meet-list - List cloud recordings\n" |
|
0981a08…
|
noreply
|
145 |
" planopticon analyze -i VIDEO - Analyze a video file\n" |
|
0981a08…
|
noreply
|
146 |
" planopticon query - Query the knowledge graph\n" |
|
0981a08…
|
noreply
|
147 |
" planopticon export FORMAT PATH - Export knowledge graph\n\n" |
|
0981a08…
|
noreply
|
148 |
f"{kb_summary}{artifacts_summary}\n\n" |
|
0981a08…
|
noreply
|
149 |
"Help the user with their planning tasks. When they ask about capabilities, " |
|
0981a08…
|
noreply
|
150 |
"refer them to the appropriate built-in commands. Ask clarifying questions " |
|
0981a08…
|
noreply
|
151 |
"to gather requirements. When ready, suggest using specific skills or commands " |
|
0981a08…
|
noreply
|
152 |
"to generate artifacts." |
|
0981a08…
|
noreply
|
153 |
) |
|
0981a08…
|
noreply
|
154 |
|
|
0981a08…
|
noreply
|
155 |
messages = [{"role": "system", "content": system_msg}] + self.context.conversation_history |
|
0981a08…
|
noreply
|
156 |
|
|
0981a08…
|
noreply
|
157 |
response = self.context.provider_manager.chat(messages, max_tokens=2048, temperature=0.5) |
|
0981a08…
|
noreply
|
158 |
self.context.conversation_history.append({"role": "assistant", "content": response}) |
|
0981a08…
|
noreply
|
159 |
return response |