|
1
|
"""Command-line interface for PlanOpticon.""" |
|
2
|
|
|
3
|
import json |
|
4
|
import logging |
|
5
|
import os |
|
6
|
import sys |
|
7
|
from pathlib import Path |
|
8
|
|
|
9
|
import click |
|
10
|
import colorlog |
|
11
|
from tqdm import tqdm |
|
12
|
|
|
13
|
|
|
14
|
def setup_logging(verbose: bool = False) -> None: |
|
15
|
"""Set up logging with color formatting.""" |
|
16
|
log_level = logging.DEBUG if verbose else logging.INFO |
|
17
|
formatter = colorlog.ColoredFormatter( |
|
18
|
"%(log_color)s%(asctime)s [%(levelname)s] %(message)s", |
|
19
|
datefmt="%Y-%m-%d %H:%M:%S", |
|
20
|
log_colors={ |
|
21
|
"DEBUG": "cyan", |
|
22
|
"INFO": "green", |
|
23
|
"WARNING": "yellow", |
|
24
|
"ERROR": "red", |
|
25
|
"CRITICAL": "red,bg_white", |
|
26
|
}, |
|
27
|
) |
|
28
|
console_handler = logging.StreamHandler() |
|
29
|
console_handler.setFormatter(formatter) |
|
30
|
root_logger = logging.getLogger() |
|
31
|
root_logger.setLevel(log_level) |
|
32
|
for handler in root_logger.handlers: |
|
33
|
root_logger.removeHandler(handler) |
|
34
|
root_logger.addHandler(console_handler) |
|
35
|
|
|
36
|
|
|
37
|
@click.group(invoke_without_command=True) |
|
38
|
@click.option("--verbose", "-v", is_flag=True, help="Enable verbose output") |
|
39
|
@click.option( |
|
40
|
"--chat", |
|
41
|
"-C", |
|
42
|
is_flag=True, |
|
43
|
help="Launch interactive companion REPL", |
|
44
|
) |
|
45
|
@click.option( |
|
46
|
"--interactive", |
|
47
|
"-I", |
|
48
|
"interactive_flag", |
|
49
|
is_flag=True, |
|
50
|
help="Launch interactive companion REPL", |
|
51
|
) |
|
52
|
@click.version_option("0.5.0", prog_name="PlanOpticon") |
|
53
|
@click.pass_context |
|
54
|
def cli(ctx, verbose, chat, interactive_flag): |
|
55
|
"""PlanOpticon - Comprehensive Video Analysis & Knowledge Extraction Tool.""" |
|
56
|
ctx.ensure_object(dict) |
|
57
|
ctx.obj["verbose"] = verbose |
|
58
|
setup_logging(verbose) |
|
59
|
|
|
60
|
if (chat or interactive_flag) and ctx.invoked_subcommand is None: |
|
61
|
from video_processor.cli.companion import CompanionREPL |
|
62
|
|
|
63
|
repl = CompanionREPL() |
|
64
|
repl.run() |
|
65
|
ctx.exit(0) |
|
66
|
elif ctx.invoked_subcommand is None: |
|
67
|
_interactive_menu(ctx) |
|
68
|
|
|
69
|
|
|
70
|
@cli.command("init") |
|
71
|
@click.pass_context |
|
72
|
def init_cmd(ctx): |
|
73
|
"""Interactive setup wizard — configure providers, API keys, and .env.""" |
|
74
|
from video_processor.cli.init_wizard import run_wizard |
|
75
|
|
|
76
|
run_wizard() |
|
77
|
|
|
78
|
|
|
79
|
@cli.command() |
|
80
|
@click.pass_context |
|
81
|
def doctor(ctx): |
|
82
|
"""Check setup health — Python, FFmpeg, API keys, dependencies.""" |
|
83
|
from video_processor.cli.doctor import format_results, run_all_checks |
|
84
|
|
|
85
|
results = run_all_checks() |
|
86
|
click.echo(format_results(results)) |
|
87
|
|
|
88
|
|
|
89
|
@cli.command() |
|
90
|
@click.option( |
|
91
|
"--input", "-i", required=True, type=click.Path(exists=True), help="Input video file path" |
|
92
|
) |
|
93
|
@click.option("--output", "-o", required=True, type=click.Path(), help="Output directory") |
|
94
|
@click.option( |
|
95
|
"--depth", |
|
96
|
type=click.Choice(["basic", "standard", "comprehensive"]), |
|
97
|
default="standard", |
|
98
|
help="Processing depth", |
|
99
|
) |
|
100
|
@click.option( |
|
101
|
"--focus", type=str, help='Comma-separated focus areas (e.g., "diagrams,action-items")' |
|
102
|
) |
|
103
|
@click.option("--use-gpu", is_flag=True, help="Enable GPU acceleration if available") |
|
104
|
@click.option("--sampling-rate", type=float, default=0.5, help="Frame sampling rate") |
|
105
|
@click.option("--change-threshold", type=float, default=0.15, help="Visual change threshold") |
|
106
|
@click.option( |
|
107
|
"--periodic-capture", |
|
108
|
type=float, |
|
109
|
default=30.0, |
|
110
|
help="Capture a frame every N seconds regardless of change (0 to disable)", |
|
111
|
) |
|
112
|
@click.option("--title", type=str, help="Title for the analysis report") |
|
113
|
@click.option( |
|
114
|
"--provider", |
|
115
|
"-p", |
|
116
|
type=click.Choice( |
|
117
|
[ |
|
118
|
"auto", |
|
119
|
"openai", |
|
120
|
"anthropic", |
|
121
|
"gemini", |
|
122
|
"ollama", |
|
123
|
"azure", |
|
124
|
"together", |
|
125
|
"fireworks", |
|
126
|
"cerebras", |
|
127
|
"xai", |
|
128
|
] |
|
129
|
), |
|
130
|
default="auto", |
|
131
|
help="API provider", |
|
132
|
) |
|
133
|
@click.option("--vision-model", type=str, default=None, help="Override model for vision tasks") |
|
134
|
@click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks") |
|
135
|
@click.option( |
|
136
|
"--output-format", |
|
137
|
type=click.Choice(["default", "json"]), |
|
138
|
default="default", |
|
139
|
help="Output format: default (files + summary) or json (structured JSON to stdout)", |
|
140
|
) |
|
141
|
@click.option( |
|
142
|
"--templates-dir", |
|
143
|
type=click.Path(exists=True), |
|
144
|
default=None, |
|
145
|
help="Directory with custom prompt template .txt files", |
|
146
|
) |
|
147
|
@click.option( |
|
148
|
"--speakers", |
|
149
|
type=str, |
|
150
|
default=None, |
|
151
|
help='Comma-separated speaker names for diarization hints (e.g., "Alice,Bob,Carol")', |
|
152
|
) |
|
153
|
@click.pass_context |
|
154
|
def analyze( |
|
155
|
ctx, |
|
156
|
input, |
|
157
|
output, |
|
158
|
depth, |
|
159
|
focus, |
|
160
|
use_gpu, |
|
161
|
sampling_rate, |
|
162
|
change_threshold, |
|
163
|
periodic_capture, |
|
164
|
title, |
|
165
|
provider, |
|
166
|
vision_model, |
|
167
|
chat_model, |
|
168
|
output_format, |
|
169
|
templates_dir, |
|
170
|
speakers, |
|
171
|
): |
|
172
|
"""Analyze a single video and extract structured knowledge.""" |
|
173
|
from video_processor.pipeline import process_single_video |
|
174
|
from video_processor.providers.manager import ProviderManager |
|
175
|
|
|
176
|
focus_areas = [a.strip().lower() for a in focus.split(",")] if focus else [] |
|
177
|
speaker_hints = [s.strip() for s in speakers.split(",")] if speakers else None |
|
178
|
prov = None if provider == "auto" else provider |
|
179
|
|
|
180
|
pm = ProviderManager( |
|
181
|
vision_model=vision_model, |
|
182
|
chat_model=chat_model, |
|
183
|
provider=prov, |
|
184
|
) |
|
185
|
|
|
186
|
if templates_dir: |
|
187
|
from video_processor.utils.prompt_templates import PromptTemplate |
|
188
|
|
|
189
|
pm.prompt_templates = PromptTemplate(templates_dir=templates_dir) |
|
190
|
|
|
191
|
try: |
|
192
|
manifest = process_single_video( |
|
193
|
input_path=input, |
|
194
|
output_dir=output, |
|
195
|
provider_manager=pm, |
|
196
|
depth=depth, |
|
197
|
focus_areas=focus_areas, |
|
198
|
sampling_rate=sampling_rate, |
|
199
|
change_threshold=change_threshold, |
|
200
|
periodic_capture_seconds=periodic_capture, |
|
201
|
use_gpu=use_gpu, |
|
202
|
title=title, |
|
203
|
speaker_hints=speaker_hints, |
|
204
|
) |
|
205
|
if output_format == "json": |
|
206
|
click.echo(json.dumps(manifest.model_dump(), indent=2, default=str)) |
|
207
|
else: |
|
208
|
click.echo(pm.usage.format_summary()) |
|
209
|
click.echo(f"\n Results: {output}/manifest.json") |
|
210
|
except Exception as e: |
|
211
|
logging.error(f"Error: {e}") |
|
212
|
if output_format == "json": |
|
213
|
click.echo(json.dumps({"error": str(e)})) |
|
214
|
else: |
|
215
|
click.echo(pm.usage.format_summary()) |
|
216
|
if ctx.obj["verbose"]: |
|
217
|
import traceback |
|
218
|
|
|
219
|
traceback.print_exc() |
|
220
|
sys.exit(1) |
|
221
|
|
|
222
|
|
|
223
|
@cli.command() |
|
224
|
@click.option( |
|
225
|
"--input-dir", "-i", type=click.Path(), default=None, help="Local directory of videos" |
|
226
|
) |
|
227
|
@click.option("--output", "-o", required=True, type=click.Path(), help="Output directory") |
|
228
|
@click.option( |
|
229
|
"--depth", |
|
230
|
type=click.Choice(["basic", "standard", "comprehensive"]), |
|
231
|
default="standard", |
|
232
|
help="Processing depth", |
|
233
|
) |
|
234
|
@click.option( |
|
235
|
"--pattern", |
|
236
|
type=str, |
|
237
|
default="*.mp4,*.mkv,*.avi,*.mov,*.webm", |
|
238
|
help="File glob patterns (comma-separated)", |
|
239
|
) |
|
240
|
@click.option("--title", type=str, default="Batch Processing Results", help="Batch title") |
|
241
|
@click.option( |
|
242
|
"--provider", |
|
243
|
"-p", |
|
244
|
type=click.Choice( |
|
245
|
[ |
|
246
|
"auto", |
|
247
|
"openai", |
|
248
|
"anthropic", |
|
249
|
"gemini", |
|
250
|
"ollama", |
|
251
|
"azure", |
|
252
|
"together", |
|
253
|
"fireworks", |
|
254
|
"cerebras", |
|
255
|
"xai", |
|
256
|
] |
|
257
|
), |
|
258
|
default="auto", |
|
259
|
help="API provider", |
|
260
|
) |
|
261
|
@click.option("--vision-model", type=str, default=None, help="Override model for vision tasks") |
|
262
|
@click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks") |
|
263
|
@click.option( |
|
264
|
"--source", |
|
265
|
type=click.Choice(["local", "gdrive", "dropbox"]), |
|
266
|
default="local", |
|
267
|
help="Video source (local directory, Google Drive, or Dropbox)", |
|
268
|
) |
|
269
|
@click.option("--folder-id", type=str, default=None, help="Google Drive folder ID") |
|
270
|
@click.option("--folder-path", type=str, default=None, help="Cloud folder path") |
|
271
|
@click.option( |
|
272
|
"--recursive/--no-recursive", default=True, help="Recurse into subfolders (default: recursive)" |
|
273
|
) |
|
274
|
@click.pass_context |
|
275
|
def batch( |
|
276
|
ctx, |
|
277
|
input_dir, |
|
278
|
output, |
|
279
|
depth, |
|
280
|
pattern, |
|
281
|
title, |
|
282
|
provider, |
|
283
|
vision_model, |
|
284
|
chat_model, |
|
285
|
source, |
|
286
|
folder_id, |
|
287
|
folder_path, |
|
288
|
recursive, |
|
289
|
): |
|
290
|
"""Process a folder of videos in batch.""" |
|
291
|
from video_processor.integrators.knowledge_graph import KnowledgeGraph |
|
292
|
from video_processor.integrators.plan_generator import PlanGenerator |
|
293
|
from video_processor.models import BatchManifest, BatchVideoEntry |
|
294
|
from video_processor.output_structure import ( |
|
295
|
create_batch_output_dirs, |
|
296
|
write_batch_manifest, |
|
297
|
) |
|
298
|
from video_processor.pipeline import process_single_video |
|
299
|
from video_processor.providers.manager import ProviderManager |
|
300
|
|
|
301
|
prov = None if provider == "auto" else provider |
|
302
|
pm = ProviderManager(vision_model=vision_model, chat_model=chat_model, provider=prov) |
|
303
|
patterns = [p.strip() for p in pattern.split(",")] |
|
304
|
|
|
305
|
# Handle cloud sources |
|
306
|
if source != "local": |
|
307
|
download_dir = Path(output) / "_downloads" |
|
308
|
download_dir.mkdir(parents=True, exist_ok=True) |
|
309
|
|
|
310
|
if source == "gdrive": |
|
311
|
from video_processor.sources.google_drive import GoogleDriveSource |
|
312
|
|
|
313
|
cloud = GoogleDriveSource() |
|
314
|
if not cloud.authenticate(): |
|
315
|
logging.error("Google Drive authentication failed") |
|
316
|
sys.exit(1) |
|
317
|
cloud_files = cloud.list_videos( |
|
318
|
folder_id=folder_id, folder_path=folder_path, patterns=patterns, recursive=recursive |
|
319
|
) |
|
320
|
cloud.download_all(cloud_files, download_dir) |
|
321
|
elif source == "dropbox": |
|
322
|
from video_processor.sources.dropbox_source import DropboxSource |
|
323
|
|
|
324
|
cloud = DropboxSource() |
|
325
|
if not cloud.authenticate(): |
|
326
|
logging.error("Dropbox authentication failed") |
|
327
|
sys.exit(1) |
|
328
|
cloud_files = cloud.list_videos(folder_path=folder_path, patterns=patterns) |
|
329
|
cloud.download_all(cloud_files, download_dir) |
|
330
|
else: |
|
331
|
logging.error(f"Unknown source: {source}") |
|
332
|
sys.exit(1) |
|
333
|
|
|
334
|
input_dir = download_dir |
|
335
|
else: |
|
336
|
if not input_dir: |
|
337
|
logging.error("--input-dir is required for local source") |
|
338
|
sys.exit(1) |
|
339
|
input_dir = Path(input_dir) |
|
340
|
|
|
341
|
# Find videos (rglob for recursive, glob for flat) |
|
342
|
videos = [] |
|
343
|
glob_fn = input_dir.rglob if recursive else input_dir.glob |
|
344
|
for pat in patterns: |
|
345
|
videos.extend(sorted(glob_fn(pat))) |
|
346
|
videos = sorted(set(videos)) |
|
347
|
|
|
348
|
if not videos: |
|
349
|
logging.error(f"No videos found in {input_dir} matching {pattern}") |
|
350
|
sys.exit(1) |
|
351
|
|
|
352
|
logging.info(f"Found {len(videos)} videos to process") |
|
353
|
|
|
354
|
dirs = create_batch_output_dirs(output, title) |
|
355
|
manifests = [] |
|
356
|
entries = [] |
|
357
|
merged_kg_db = Path(output) / "knowledge_graph.db" |
|
358
|
merged_kg = KnowledgeGraph(db_path=merged_kg_db) |
|
359
|
|
|
360
|
for idx, video_path in enumerate(tqdm(videos, desc="Batch processing", unit="video")): |
|
361
|
video_name = video_path.stem |
|
362
|
video_output = dirs["videos"] / video_name |
|
363
|
logging.info(f"Processing video {idx + 1}/{len(videos)}: {video_path.name}") |
|
364
|
|
|
365
|
entry = BatchVideoEntry( |
|
366
|
video_name=video_name, |
|
367
|
manifest_path=f"videos/{video_name}/manifest.json", |
|
368
|
) |
|
369
|
|
|
370
|
try: |
|
371
|
manifest = process_single_video( |
|
372
|
input_path=video_path, |
|
373
|
output_dir=video_output, |
|
374
|
provider_manager=pm, |
|
375
|
depth=depth, |
|
376
|
title=f"Analysis of {video_name}", |
|
377
|
) |
|
378
|
entry.status = "completed" |
|
379
|
entry.diagrams_count = len(manifest.diagrams) |
|
380
|
entry.action_items_count = len(manifest.action_items) |
|
381
|
entry.key_points_count = len(manifest.key_points) |
|
382
|
entry.duration_seconds = manifest.video.duration_seconds |
|
383
|
manifests.append(manifest) |
|
384
|
|
|
385
|
# Merge knowledge graph (prefer .db, fall back to .json) |
|
386
|
kg_db = video_output / "results" / "knowledge_graph.db" |
|
387
|
kg_json = video_output / "results" / "knowledge_graph.json" |
|
388
|
if kg_db.exists(): |
|
389
|
video_kg = KnowledgeGraph(db_path=kg_db) |
|
390
|
merged_kg.merge(video_kg) |
|
391
|
elif kg_json.exists(): |
|
392
|
kg_data = json.loads(kg_json.read_text()) |
|
393
|
video_kg = KnowledgeGraph.from_dict(kg_data) |
|
394
|
merged_kg.merge(video_kg) |
|
395
|
|
|
396
|
except Exception as e: |
|
397
|
logging.error(f"Failed to process {video_path.name}: {e}") |
|
398
|
entry.status = "failed" |
|
399
|
entry.error = str(e) |
|
400
|
if ctx.obj["verbose"]: |
|
401
|
import traceback |
|
402
|
|
|
403
|
traceback.print_exc() |
|
404
|
|
|
405
|
entries.append(entry) |
|
406
|
|
|
407
|
# Save merged knowledge graph (SQLite is primary, JSON is export) |
|
408
|
merged_kg.save(Path(output) / "knowledge_graph.json") |
|
409
|
|
|
410
|
# Generate batch summary |
|
411
|
plan_gen = PlanGenerator(provider_manager=pm, knowledge_graph=merged_kg) |
|
412
|
summary_path = Path(output) / "batch_summary.md" |
|
413
|
plan_gen.generate_batch_summary( |
|
414
|
manifests=manifests, |
|
415
|
kg=merged_kg, |
|
416
|
title=title, |
|
417
|
output_path=summary_path, |
|
418
|
) |
|
419
|
|
|
420
|
# Write batch manifest |
|
421
|
batch_manifest = BatchManifest( |
|
422
|
title=title, |
|
423
|
total_videos=len(videos), |
|
424
|
completed_videos=sum(1 for e in entries if e.status == "completed"), |
|
425
|
failed_videos=sum(1 for e in entries if e.status == "failed"), |
|
426
|
total_diagrams=sum(e.diagrams_count for e in entries), |
|
427
|
total_action_items=sum(e.action_items_count for e in entries), |
|
428
|
total_key_points=sum(e.key_points_count for e in entries), |
|
429
|
videos=entries, |
|
430
|
batch_summary_md="batch_summary.md", |
|
431
|
merged_knowledge_graph_json="knowledge_graph.json", |
|
432
|
merged_knowledge_graph_db="knowledge_graph.db", |
|
433
|
) |
|
434
|
write_batch_manifest(batch_manifest, output) |
|
435
|
click.echo(pm.usage.format_summary()) |
|
436
|
click.echo( |
|
437
|
f"\n Batch complete: {batch_manifest.completed_videos}" |
|
438
|
f"/{batch_manifest.total_videos} succeeded" |
|
439
|
) |
|
440
|
click.echo(f" Results: {output}/batch_manifest.json") |
|
441
|
|
|
442
|
|
|
443
|
@cli.command() |
|
444
|
@click.argument("input_path", type=click.Path(exists=True)) |
|
445
|
@click.option( |
|
446
|
"--output", "-o", type=click.Path(), default=None, help="Output directory for knowledge graph" |
|
447
|
) |
|
448
|
@click.option( |
|
449
|
"--db-path", type=click.Path(), default=None, help="Existing knowledge_graph.db to add to" |
|
450
|
) |
|
451
|
@click.option("--recursive/--no-recursive", "-r", default=True, help="Recurse into subdirectories") |
|
452
|
@click.option( |
|
453
|
"--provider", |
|
454
|
"-p", |
|
455
|
type=click.Choice( |
|
456
|
[ |
|
457
|
"auto", |
|
458
|
"openai", |
|
459
|
"anthropic", |
|
460
|
"gemini", |
|
461
|
"ollama", |
|
462
|
"azure", |
|
463
|
"together", |
|
464
|
"fireworks", |
|
465
|
"cerebras", |
|
466
|
"xai", |
|
467
|
] |
|
468
|
), |
|
469
|
default="auto", |
|
470
|
help="API provider", |
|
471
|
) |
|
472
|
@click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks") |
|
473
|
@click.pass_context |
|
474
|
def ingest(ctx, input_path, output, db_path, recursive, provider, chat_model): |
|
475
|
"""Ingest documents into a knowledge graph. |
|
476
|
|
|
477
|
Supports: .md, .txt, .pdf (with pymupdf or pdfplumber installed) |
|
478
|
|
|
479
|
Examples: |
|
480
|
|
|
481
|
planopticon ingest spec.md |
|
482
|
|
|
483
|
planopticon ingest ./docs/ -o ./output |
|
484
|
|
|
485
|
planopticon ingest report.pdf --db-path existing.db |
|
486
|
""" |
|
487
|
from video_processor.integrators.knowledge_graph import KnowledgeGraph |
|
488
|
from video_processor.processors import list_supported_extensions |
|
489
|
from video_processor.processors.ingest import ingest_directory, ingest_file |
|
490
|
from video_processor.providers.manager import ProviderManager |
|
491
|
|
|
492
|
input_path = Path(input_path) |
|
493
|
prov = None if provider == "auto" else provider |
|
494
|
pm = ProviderManager(chat_model=chat_model, provider=prov) |
|
495
|
|
|
496
|
# Determine DB path |
|
497
|
if db_path: |
|
498
|
kg_path = Path(db_path) |
|
499
|
elif output: |
|
500
|
out_dir = Path(output) |
|
501
|
out_dir.mkdir(parents=True, exist_ok=True) |
|
502
|
kg_path = out_dir / "knowledge_graph.db" |
|
503
|
else: |
|
504
|
kg_path = Path.cwd() / "knowledge_graph.db" |
|
505
|
|
|
506
|
kg_path.parent.mkdir(parents=True, exist_ok=True) |
|
507
|
|
|
508
|
click.echo(f"Knowledge graph: {kg_path}") |
|
509
|
kg = KnowledgeGraph(provider_manager=pm, db_path=kg_path) |
|
510
|
|
|
511
|
total_files = 0 |
|
512
|
total_chunks = 0 |
|
513
|
|
|
514
|
try: |
|
515
|
if input_path.is_file(): |
|
516
|
count = ingest_file(input_path, kg) |
|
517
|
total_files = 1 |
|
518
|
total_chunks = count |
|
519
|
click.echo(f" {input_path.name}: {count} chunks") |
|
520
|
elif input_path.is_dir(): |
|
521
|
results = ingest_directory(input_path, kg, recursive=recursive) |
|
522
|
total_files = len(results) |
|
523
|
total_chunks = sum(results.values()) |
|
524
|
for fpath, count in results.items(): |
|
525
|
click.echo(f" {Path(fpath).name}: {count} chunks") |
|
526
|
else: |
|
527
|
click.echo(f"Error: {input_path} is not a file or directory", err=True) |
|
528
|
sys.exit(1) |
|
529
|
except ValueError as e: |
|
530
|
click.echo(f"Error: {e}", err=True) |
|
531
|
click.echo(f"Supported extensions: {', '.join(list_supported_extensions())}") |
|
532
|
sys.exit(1) |
|
533
|
except ImportError as e: |
|
534
|
click.echo(f"Error: {e}", err=True) |
|
535
|
sys.exit(1) |
|
536
|
|
|
537
|
# Save both .db and .json |
|
538
|
kg.save(kg_path) |
|
539
|
json_path = kg_path.with_suffix(".json") |
|
540
|
kg.save(json_path) |
|
541
|
|
|
542
|
entity_count = kg._store.get_entity_count() |
|
543
|
rel_count = kg._store.get_relationship_count() |
|
544
|
|
|
545
|
click.echo("\nIngestion complete:") |
|
546
|
click.echo(f" Files processed: {total_files}") |
|
547
|
click.echo(f" Total chunks: {total_chunks}") |
|
548
|
click.echo(f" Entities extracted: {entity_count}") |
|
549
|
click.echo(f" Relationships: {rel_count}") |
|
550
|
click.echo(f" Knowledge graph: {kg_path}") |
|
551
|
|
|
552
|
|
|
553
|
@cli.command("list-models") |
|
554
|
@click.pass_context |
|
555
|
def list_models(ctx): |
|
556
|
"""Discover and display available models from all configured providers.""" |
|
557
|
from video_processor.providers.discovery import discover_available_models |
|
558
|
|
|
559
|
models = discover_available_models(force_refresh=True) |
|
560
|
if not models: |
|
561
|
click.echo( |
|
562
|
"No models discovered. Check that at least one API key is set or Ollama is running:" |
|
563
|
) |
|
564
|
click.echo(" OPENAI_API_KEY, ANTHROPIC_API_KEY, GEMINI_API_KEY, or `ollama serve`") |
|
565
|
return |
|
566
|
|
|
567
|
by_provider: dict[str, list] = {} |
|
568
|
for m in models: |
|
569
|
by_provider.setdefault(m.provider, []).append(m) |
|
570
|
|
|
571
|
for provider, provider_models in sorted(by_provider.items()): |
|
572
|
click.echo(f"\n{provider.upper()} ({len(provider_models)} models)") |
|
573
|
click.echo("-" * 60) |
|
574
|
for m in provider_models: |
|
575
|
caps = ", ".join(m.capabilities) |
|
576
|
click.echo(f" {m.id:<40} [{caps}]") |
|
577
|
|
|
578
|
click.echo(f"\nTotal: {len(models)} models across {len(by_provider)} providers") |
|
579
|
|
|
580
|
|
|
581
|
@cli.command() |
|
582
|
@click.option("--cache-dir", type=click.Path(), help="Path to cache directory") |
|
583
|
@click.option("--older-than", type=int, help="Clear entries older than N seconds") |
|
584
|
@click.option("--all", "clear_all", is_flag=True, help="Clear all cache entries") |
|
585
|
@click.pass_context |
|
586
|
def clear_cache(ctx, cache_dir, older_than, clear_all): |
|
587
|
"""Clear API response cache.""" |
|
588
|
if not cache_dir and not os.environ.get("CACHE_DIR"): |
|
589
|
logging.error("Cache directory not specified") |
|
590
|
sys.exit(1) |
|
591
|
|
|
592
|
cache_path = Path(cache_dir or os.environ.get("CACHE_DIR")) |
|
593
|
if not cache_path.exists(): |
|
594
|
logging.warning(f"Cache directory does not exist: {cache_path}") |
|
595
|
return |
|
596
|
|
|
597
|
try: |
|
598
|
from video_processor.utils.api_cache import ApiCache |
|
599
|
|
|
600
|
namespaces = [d.name for d in cache_path.iterdir() if d.is_dir()] |
|
601
|
if not namespaces: |
|
602
|
logging.info("No cache namespaces found") |
|
603
|
return |
|
604
|
|
|
605
|
total_cleared = 0 |
|
606
|
for namespace in namespaces: |
|
607
|
cache = ApiCache(cache_path, namespace) |
|
608
|
cleared = cache.clear(older_than if not clear_all else None) |
|
609
|
total_cleared += cleared |
|
610
|
logging.info(f"Cleared {cleared} entries from {namespace} cache") |
|
611
|
|
|
612
|
logging.info(f"Total cleared: {total_cleared} entries") |
|
613
|
except Exception as e: |
|
614
|
logging.error(f"Error clearing cache: {e}") |
|
615
|
if ctx.obj["verbose"]: |
|
616
|
import traceback |
|
617
|
|
|
618
|
traceback.print_exc() |
|
619
|
sys.exit(1) |
|
620
|
|
|
621
|
|
|
622
|
@cli.command("agent-analyze") |
|
623
|
@click.option( |
|
624
|
"--input", "-i", required=True, type=click.Path(exists=True), help="Input video file path" |
|
625
|
) |
|
626
|
@click.option("--output", "-o", required=True, type=click.Path(), help="Output directory") |
|
627
|
@click.option( |
|
628
|
"--depth", |
|
629
|
type=click.Choice(["basic", "standard", "comprehensive"]), |
|
630
|
default="standard", |
|
631
|
help="Initial processing depth (agent may adapt)", |
|
632
|
) |
|
633
|
@click.option("--title", type=str, help="Title for the analysis report") |
|
634
|
@click.option( |
|
635
|
"--provider", |
|
636
|
"-p", |
|
637
|
type=click.Choice( |
|
638
|
[ |
|
639
|
"auto", |
|
640
|
"openai", |
|
641
|
"anthropic", |
|
642
|
"gemini", |
|
643
|
"ollama", |
|
644
|
"azure", |
|
645
|
"together", |
|
646
|
"fireworks", |
|
647
|
"cerebras", |
|
648
|
"xai", |
|
649
|
] |
|
650
|
), |
|
651
|
default="auto", |
|
652
|
help="API provider", |
|
653
|
) |
|
654
|
@click.option("--vision-model", type=str, default=None, help="Override model for vision tasks") |
|
655
|
@click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks") |
|
656
|
@click.pass_context |
|
657
|
def agent_analyze(ctx, input, output, depth, title, provider, vision_model, chat_model): |
|
658
|
"""Agentic video analysis — adaptive, intelligent processing.""" |
|
659
|
from video_processor.agent.orchestrator import AgentOrchestrator |
|
660
|
from video_processor.output_structure import write_video_manifest |
|
661
|
from video_processor.providers.manager import ProviderManager |
|
662
|
|
|
663
|
prov = None if provider == "auto" else provider |
|
664
|
pm = ProviderManager(vision_model=vision_model, chat_model=chat_model, provider=prov) |
|
665
|
|
|
666
|
agent = AgentOrchestrator(provider_manager=pm) |
|
667
|
|
|
668
|
try: |
|
669
|
manifest = agent.process( |
|
670
|
input_path=input, |
|
671
|
output_dir=output, |
|
672
|
initial_depth=depth, |
|
673
|
title=title, |
|
674
|
) |
|
675
|
write_video_manifest(manifest, output) |
|
676
|
|
|
677
|
if agent.insights: |
|
678
|
logging.info("Agent insights:") |
|
679
|
for insight in agent.insights: |
|
680
|
logging.info(f" - {insight}") |
|
681
|
|
|
682
|
logging.info(f"Results at {output}/manifest.json") |
|
683
|
except Exception as e: |
|
684
|
logging.error(f"Error: {e}") |
|
685
|
if ctx.obj["verbose"]: |
|
686
|
import traceback |
|
687
|
|
|
688
|
traceback.print_exc() |
|
689
|
sys.exit(1) |
|
690
|
|
|
691
|
|
|
692
|
@cli.command() |
|
693
|
@click.argument("request", required=False, default=None) |
|
694
|
@click.option("--kb", multiple=True, type=click.Path(exists=True), help="Knowledge base paths") |
|
695
|
@click.option("--interactive", "-I", is_flag=True, help="Interactive chat mode") |
|
696
|
@click.option("--export", type=click.Path(), default=None, help="Export artifacts to directory") |
|
697
|
@click.option( |
|
698
|
"--provider", |
|
699
|
"-p", |
|
700
|
type=click.Choice( |
|
701
|
[ |
|
702
|
"auto", |
|
703
|
"openai", |
|
704
|
"anthropic", |
|
705
|
"gemini", |
|
706
|
"ollama", |
|
707
|
"azure", |
|
708
|
"together", |
|
709
|
"fireworks", |
|
710
|
"cerebras", |
|
711
|
"xai", |
|
712
|
] |
|
713
|
), |
|
714
|
default="auto", |
|
715
|
help="API provider", |
|
716
|
) |
|
717
|
@click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks") |
|
718
|
@click.pass_context |
|
719
|
def agent(ctx, request, kb, interactive, export, provider, chat_model): |
|
720
|
"""AI planning agent. Synthesizes knowledge into project plans and artifacts. |
|
721
|
|
|
722
|
Examples: |
|
723
|
|
|
724
|
planopticon agent "Create a project plan" --kb ./results |
|
725
|
|
|
726
|
planopticon agent -I --kb ./videos --kb ./docs |
|
727
|
|
|
728
|
planopticon agent "Generate a PRD" --export ./output |
|
729
|
""" |
|
730
|
# Ensure all skills are registered |
|
731
|
import video_processor.agent.skills # noqa: F401 |
|
732
|
from video_processor.agent.agent_loop import PlanningAgent |
|
733
|
from video_processor.agent.kb_context import KBContext |
|
734
|
from video_processor.agent.skills.base import AgentContext |
|
735
|
|
|
736
|
# Build provider manager |
|
737
|
pm = None |
|
738
|
try: |
|
739
|
from video_processor.providers.manager import ProviderManager |
|
740
|
|
|
741
|
prov = None if provider == "auto" else provider |
|
742
|
pm = ProviderManager(chat_model=chat_model, provider=prov) |
|
743
|
except Exception: |
|
744
|
if not interactive: |
|
745
|
click.echo("Warning: could not initialize LLM provider.", err=True) |
|
746
|
|
|
747
|
# Load knowledge base |
|
748
|
kb_ctx = KBContext() |
|
749
|
if kb: |
|
750
|
for path in kb: |
|
751
|
kb_ctx.add_source(Path(path)) |
|
752
|
kb_ctx.load(provider_manager=pm) |
|
753
|
click.echo(kb_ctx.summary()) |
|
754
|
else: |
|
755
|
# Auto-discover |
|
756
|
kb_ctx = KBContext.auto_discover(provider_manager=pm) |
|
757
|
if kb_ctx.sources: |
|
758
|
click.echo(kb_ctx.summary()) |
|
759
|
else: |
|
760
|
click.echo("No knowledge base found. Use --kb to specify paths.") |
|
761
|
|
|
762
|
agent_inst = PlanningAgent( |
|
763
|
context=AgentContext( |
|
764
|
knowledge_graph=kb_ctx.knowledge_graph if kb_ctx.sources else None, |
|
765
|
query_engine=kb_ctx.query_engine if kb_ctx.sources else None, |
|
766
|
provider_manager=pm, |
|
767
|
) |
|
768
|
) |
|
769
|
|
|
770
|
if interactive: |
|
771
|
click.echo("\nPlanOpticon Agent (interactive mode)") |
|
772
|
click.echo("Type your request, or 'quit' to exit.\n") |
|
773
|
while True: |
|
774
|
try: |
|
775
|
line = click.prompt("agent", prompt_suffix="> ") |
|
776
|
except (KeyboardInterrupt, EOFError): |
|
777
|
click.echo("\nBye.") |
|
778
|
break |
|
779
|
if line.strip().lower() in ("quit", "exit", "q"): |
|
780
|
click.echo("Bye.") |
|
781
|
break |
|
782
|
|
|
783
|
# Check for slash commands |
|
784
|
if line.strip().startswith("/"): |
|
785
|
cmd = line.strip()[1:].split()[0] |
|
786
|
if cmd == "plan": |
|
787
|
artifacts = agent_inst.execute("Generate a project plan") |
|
788
|
elif cmd == "skills": |
|
789
|
from video_processor.agent.skills.base import list_skills |
|
790
|
|
|
791
|
for s in list_skills(): |
|
792
|
click.echo(f" {s.name}: {s.description}") |
|
793
|
continue |
|
794
|
elif cmd == "summary": |
|
795
|
if kb_ctx.sources: |
|
796
|
click.echo(kb_ctx.summary()) |
|
797
|
continue |
|
798
|
else: |
|
799
|
artifacts = agent_inst.execute(line.strip()[1:]) |
|
800
|
|
|
801
|
for a in artifacts: |
|
802
|
click.echo(f"\n--- {a.name} ({a.artifact_type}) ---\n") |
|
803
|
click.echo(a.content) |
|
804
|
else: |
|
805
|
response = agent_inst.chat(line) |
|
806
|
click.echo(f"\n{response}\n") |
|
807
|
elif request: |
|
808
|
artifacts = agent_inst.execute(request) |
|
809
|
if not artifacts: |
|
810
|
click.echo("No artifacts generated. Try a more specific request.") |
|
811
|
for artifact in artifacts: |
|
812
|
click.echo(f"\n--- {artifact.name} ({artifact.artifact_type}) ---\n") |
|
813
|
click.echo(artifact.content) |
|
814
|
|
|
815
|
if export: |
|
816
|
from video_processor.agent.skills.artifact_export import export_artifacts |
|
817
|
|
|
818
|
export_dir = Path(export) |
|
819
|
export_artifacts(artifacts, export_dir) |
|
820
|
click.echo(f"Exported {len(artifacts)} artifacts to {export_dir}/") |
|
821
|
click.echo(f"Manifest: {export_dir / 'manifest.json'}") |
|
822
|
else: |
|
823
|
click.echo("Provide a request or use -I for interactive mode.") |
|
824
|
click.echo("Example: planopticon agent 'Create a project plan' --kb ./results") |
|
825
|
|
|
826
|
|
|
827
|
@cli.command() |
|
828
|
@click.argument("question", required=False, default=None) |
|
829
|
@click.option( |
|
830
|
"--db-path", |
|
831
|
type=click.Path(), |
|
832
|
default=None, |
|
833
|
help="Path to knowledge_graph.db or .json (auto-detected if omitted)", |
|
834
|
) |
|
835
|
@click.option( |
|
836
|
"--mode", |
|
837
|
type=click.Choice(["direct", "agentic", "auto"]), |
|
838
|
default="auto", |
|
839
|
help="Query mode: direct (no LLM), agentic (LLM), or auto", |
|
840
|
) |
|
841
|
@click.option( |
|
842
|
"--format", |
|
843
|
"output_format", |
|
844
|
type=click.Choice(["text", "json", "mermaid"]), |
|
845
|
default="text", |
|
846
|
help="Output format", |
|
847
|
) |
|
848
|
@click.option("--interactive", "-I", is_flag=True, help="Enter interactive REPL mode") |
|
849
|
@click.option( |
|
850
|
"--provider", |
|
851
|
"-p", |
|
852
|
type=click.Choice( |
|
853
|
[ |
|
854
|
"auto", |
|
855
|
"openai", |
|
856
|
"anthropic", |
|
857
|
"gemini", |
|
858
|
"ollama", |
|
859
|
"azure", |
|
860
|
"together", |
|
861
|
"fireworks", |
|
862
|
"cerebras", |
|
863
|
"xai", |
|
864
|
] |
|
865
|
), |
|
866
|
default="auto", |
|
867
|
help="API provider for agentic mode", |
|
868
|
) |
|
869
|
@click.option("--chat-model", type=str, default=None, help="Override model for agentic mode") |
|
870
|
@click.pass_context |
|
871
|
def query(ctx, question, db_path, mode, output_format, interactive, provider, chat_model): |
|
872
|
"""Query a knowledge graph. Runs stats if no question given. |
|
873
|
|
|
874
|
Direct commands recognized in QUESTION: stats, entities, relationships, |
|
875
|
neighbors, path, clusters, sources, provenance, sql. |
|
876
|
Natural language questions use agentic mode. |
|
877
|
|
|
878
|
Examples: |
|
879
|
|
|
880
|
planopticon query |
|
881
|
planopticon query stats |
|
882
|
planopticon query "entities --type technology" |
|
883
|
planopticon query "neighbors Alice" |
|
884
|
planopticon query sources |
|
885
|
planopticon query "provenance Alice" |
|
886
|
planopticon query "What was discussed?" |
|
887
|
planopticon query -I |
|
888
|
""" |
|
889
|
from video_processor.integrators.graph_discovery import find_nearest_graph |
|
890
|
from video_processor.integrators.graph_query import GraphQueryEngine |
|
891
|
|
|
892
|
# Resolve graph path |
|
893
|
if db_path: |
|
894
|
graph_path = Path(db_path) |
|
895
|
if not graph_path.exists(): |
|
896
|
click.echo(f"Error: file not found: {db_path}", err=True) |
|
897
|
sys.exit(1) |
|
898
|
else: |
|
899
|
graph_path = find_nearest_graph() |
|
900
|
if not graph_path: |
|
901
|
click.echo( |
|
902
|
"No knowledge graph found. Run 'planopticon analyze' first to generate one,\n" |
|
903
|
"or use --db-path to specify a file.", |
|
904
|
err=True, |
|
905
|
) |
|
906
|
sys.exit(1) |
|
907
|
click.echo(f"Using: {graph_path}") |
|
908
|
|
|
909
|
# Build provider manager for agentic mode |
|
910
|
pm = None |
|
911
|
if mode in ("agentic", "auto"): |
|
912
|
try: |
|
913
|
from video_processor.providers.manager import ProviderManager |
|
914
|
|
|
915
|
prov = None if provider == "auto" else provider |
|
916
|
pm = ProviderManager(chat_model=chat_model, provider=prov) |
|
917
|
except Exception: |
|
918
|
if mode == "agentic": |
|
919
|
click.echo("Warning: could not initialize LLM provider for agentic mode.", err=True) |
|
920
|
|
|
921
|
# Create engine |
|
922
|
if graph_path.suffix == ".json": |
|
923
|
engine = GraphQueryEngine.from_json_path(graph_path, provider_manager=pm) |
|
924
|
else: |
|
925
|
engine = GraphQueryEngine.from_db_path(graph_path, provider_manager=pm) |
|
926
|
|
|
927
|
if interactive: |
|
928
|
_query_repl(engine, output_format) |
|
929
|
return |
|
930
|
|
|
931
|
if not question: |
|
932
|
question = "stats" |
|
933
|
|
|
934
|
result = _execute_query(engine, question, mode) |
|
935
|
_print_result(result, output_format) |
|
936
|
|
|
937
|
|
|
938
|
def _execute_query(engine, question, mode): |
|
939
|
"""Parse a question string and execute the appropriate query.""" |
|
940
|
parts = question.strip().split() |
|
941
|
cmd = parts[0].lower() if parts else "" |
|
942
|
|
|
943
|
# Direct commands |
|
944
|
if cmd == "stats": |
|
945
|
return engine.stats() |
|
946
|
|
|
947
|
if cmd == "entities": |
|
948
|
kwargs = _parse_filter_args(parts[1:]) |
|
949
|
return engine.entities( |
|
950
|
name=kwargs.get("name"), |
|
951
|
entity_type=kwargs.get("type"), |
|
952
|
limit=int(kwargs.get("limit", 50)), |
|
953
|
) |
|
954
|
|
|
955
|
if cmd == "relationships": |
|
956
|
kwargs = _parse_filter_args(parts[1:]) |
|
957
|
return engine.relationships( |
|
958
|
source=kwargs.get("source"), |
|
959
|
target=kwargs.get("target"), |
|
960
|
rel_type=kwargs.get("type"), |
|
961
|
limit=int(kwargs.get("limit", 50)), |
|
962
|
) |
|
963
|
|
|
964
|
if cmd == "neighbors": |
|
965
|
entity_name = " ".join(parts[1:]) if len(parts) > 1 else "" |
|
966
|
return engine.neighbors(entity_name) |
|
967
|
|
|
968
|
if cmd == "sources": |
|
969
|
return engine.sources() |
|
970
|
|
|
971
|
if cmd == "provenance": |
|
972
|
entity_name = " ".join(parts[1:]) if len(parts) > 1 else "" |
|
973
|
return engine.provenance(entity_name) |
|
974
|
|
|
975
|
if cmd == "path": |
|
976
|
if len(parts) < 3: |
|
977
|
return engine.stats() |
|
978
|
return engine.shortest_path(start=parts[1], end=parts[2]) |
|
979
|
|
|
980
|
if cmd == "clusters": |
|
981
|
return engine.clusters() |
|
982
|
|
|
983
|
if cmd == "sql": |
|
984
|
sql_query = " ".join(parts[1:]) |
|
985
|
return engine.sql(sql_query) |
|
986
|
|
|
987
|
# Natural language → agentic (or fallback to entity search in direct mode) |
|
988
|
if mode == "direct": |
|
989
|
return engine.entities(name=question) |
|
990
|
return engine.ask(question) |
|
991
|
|
|
992
|
|
|
993
|
def _parse_filter_args(parts): |
|
994
|
"""Parse --key value pairs from a split argument list.""" |
|
995
|
kwargs = {} |
|
996
|
i = 0 |
|
997
|
while i < len(parts): |
|
998
|
if parts[i].startswith("--") and i + 1 < len(parts): |
|
999
|
key = parts[i][2:] |
|
1000
|
kwargs[key] = parts[i + 1] |
|
1001
|
i += 2 |
|
1002
|
else: |
|
1003
|
# Treat as name filter |
|
1004
|
kwargs.setdefault("name", parts[i]) |
|
1005
|
i += 1 |
|
1006
|
return kwargs |
|
1007
|
|
|
1008
|
|
|
1009
|
def _print_result(result, output_format): |
|
1010
|
"""Print a QueryResult in the requested format.""" |
|
1011
|
if output_format == "json": |
|
1012
|
click.echo(result.to_json()) |
|
1013
|
elif output_format == "mermaid": |
|
1014
|
click.echo(result.to_mermaid()) |
|
1015
|
else: |
|
1016
|
click.echo(result.to_text()) |
|
1017
|
|
|
1018
|
|
|
1019
|
def _query_repl(engine, output_format): |
|
1020
|
"""Interactive REPL for querying the knowledge graph.""" |
|
1021
|
click.echo("PlanOpticon Knowledge Graph REPL") |
|
1022
|
click.echo("Type a query, or 'quit' / 'exit' to leave.\n") |
|
1023
|
while True: |
|
1024
|
try: |
|
1025
|
line = click.prompt("query", prompt_suffix="> ") |
|
1026
|
except (KeyboardInterrupt, EOFError): |
|
1027
|
click.echo("\nBye.") |
|
1028
|
break |
|
1029
|
line = line.strip() |
|
1030
|
if not line: |
|
1031
|
continue |
|
1032
|
if line.lower() in ("quit", "exit", "q"): |
|
1033
|
click.echo("Bye.") |
|
1034
|
break |
|
1035
|
result = _execute_query(engine, line, "auto") |
|
1036
|
_print_result(result, output_format) |
|
1037
|
click.echo() |
|
1038
|
|
|
1039
|
|
|
1040
|
@cli.command() |
|
1041
|
@click.argument( |
|
1042
|
"service", |
|
1043
|
type=click.Choice( |
|
1044
|
[ |
|
1045
|
"google", |
|
1046
|
"dropbox", |
|
1047
|
"zoom", |
|
1048
|
"notion", |
|
1049
|
"github", |
|
1050
|
"microsoft", |
|
1051
|
] |
|
1052
|
), |
|
1053
|
) |
|
1054
|
@click.option("--logout", is_flag=True, help="Clear saved token") |
|
1055
|
@click.pass_context |
|
1056
|
def auth(ctx, service, logout): |
|
1057
|
"""Authenticate with a cloud service via OAuth or API key. |
|
1058
|
|
|
1059
|
Uses OAuth when available, falls back to API keys. |
|
1060
|
Tokens are saved to ~/.planopticon/ for reuse. |
|
1061
|
|
|
1062
|
Examples: |
|
1063
|
|
|
1064
|
planopticon auth google |
|
1065
|
|
|
1066
|
planopticon auth zoom |
|
1067
|
|
|
1068
|
planopticon auth github --logout |
|
1069
|
""" |
|
1070
|
from video_processor.auth import get_auth_manager |
|
1071
|
|
|
1072
|
manager = get_auth_manager(service) |
|
1073
|
if not manager: |
|
1074
|
click.echo(f"Unknown service: {service}", err=True) |
|
1075
|
sys.exit(1) |
|
1076
|
|
|
1077
|
if logout: |
|
1078
|
manager.clear_token() |
|
1079
|
click.echo(f"Cleared saved {service} token.") |
|
1080
|
return |
|
1081
|
|
|
1082
|
result = manager.authenticate() |
|
1083
|
if result.success: |
|
1084
|
click.echo(f"{service.title()} authentication successful ({result.method}).") |
|
1085
|
else: |
|
1086
|
click.echo( |
|
1087
|
f"{service.title()} authentication failed: {result.error}", |
|
1088
|
err=True, |
|
1089
|
) |
|
1090
|
sys.exit(1) |
|
1091
|
|
|
1092
|
|
|
1093
|
@cli.group() |
|
1094
|
def gws(): |
|
1095
|
"""Google Workspace: fetch docs, sheets, and slides via the gws CLI.""" |
|
1096
|
pass |
|
1097
|
|
|
1098
|
|
|
1099
|
@gws.command("list") |
|
1100
|
@click.option("--folder-id", type=str, default=None, help="Drive folder ID to list") |
|
1101
|
@click.option("--query", "-q", type=str, default=None, help="Drive search query") |
|
1102
|
@click.option("--json", "as_json", is_flag=True, help="Output as JSON") |
|
1103
|
def gws_list(folder_id, query, as_json): |
|
1104
|
"""List documents in Google Drive. |
|
1105
|
|
|
1106
|
Examples: |
|
1107
|
|
|
1108
|
planopticon gws list |
|
1109
|
|
|
1110
|
planopticon gws list --folder-id 1abc... |
|
1111
|
|
|
1112
|
planopticon gws list -q "name contains 'PRD'" --json |
|
1113
|
""" |
|
1114
|
from video_processor.sources.gws_source import GWSSource |
|
1115
|
|
|
1116
|
source = GWSSource(folder_id=folder_id, query=query) |
|
1117
|
if not source.authenticate(): |
|
1118
|
click.echo("Error: gws CLI not available or not authenticated.", err=True) |
|
1119
|
click.echo("Install: npm install -g @googleworkspace/cli", err=True) |
|
1120
|
click.echo("Auth: gws auth login", err=True) |
|
1121
|
sys.exit(1) |
|
1122
|
|
|
1123
|
files = source.list_videos(folder_id=folder_id) |
|
1124
|
if as_json: |
|
1125
|
click.echo(json.dumps([f.model_dump() for f in files], indent=2, default=str)) |
|
1126
|
else: |
|
1127
|
if not files: |
|
1128
|
click.echo("No documents found.") |
|
1129
|
return |
|
1130
|
for f in files: |
|
1131
|
size = f"{f.size_bytes / 1024:.0f}KB" if f.size_bytes else "—" |
|
1132
|
click.echo(f" {f.id[:12]}… {size:>8s} {f.mime_type or ''} {f.name}") |
|
1133
|
|
|
1134
|
|
|
1135
|
@gws.command("fetch") |
|
1136
|
@click.argument("doc_ids", nargs=-1) |
|
1137
|
@click.option("--folder-id", type=str, default=None, help="Fetch all docs in a folder") |
|
1138
|
@click.option("-o", "--output", type=click.Path(), default=None, help="Output directory") |
|
1139
|
def gws_fetch(doc_ids, folder_id, output): |
|
1140
|
"""Fetch Google Docs/Sheets/Slides as text files. |
|
1141
|
|
|
1142
|
Examples: |
|
1143
|
|
|
1144
|
planopticon gws fetch DOC_ID1 DOC_ID2 -o ./docs |
|
1145
|
|
|
1146
|
planopticon gws fetch --folder-id 1abc... -o ./docs |
|
1147
|
""" |
|
1148
|
from video_processor.sources.gws_source import GWSSource |
|
1149
|
|
|
1150
|
source = GWSSource(folder_id=folder_id, doc_ids=list(doc_ids)) |
|
1151
|
if not source.authenticate(): |
|
1152
|
click.echo("Error: gws CLI not available or not authenticated.", err=True) |
|
1153
|
sys.exit(1) |
|
1154
|
|
|
1155
|
out_dir = Path(output) if output else Path.cwd() / "gws_docs" |
|
1156
|
out_dir.mkdir(parents=True, exist_ok=True) |
|
1157
|
|
|
1158
|
files = source.list_videos(folder_id=folder_id) |
|
1159
|
if not files: |
|
1160
|
click.echo("No documents found.") |
|
1161
|
return |
|
1162
|
|
|
1163
|
for f in files: |
|
1164
|
safe_name = f.name.replace("/", "_").replace("\\", "_") |
|
1165
|
dest = out_dir / f"{safe_name}.txt" |
|
1166
|
try: |
|
1167
|
source.download(f, dest) |
|
1168
|
click.echo(f" ✓ {f.name} → {dest}") |
|
1169
|
except Exception as e: |
|
1170
|
click.echo(f" ✗ {f.name}: {e}", err=True) |
|
1171
|
|
|
1172
|
click.echo(f"\nFetched {len(files)} document(s) to {out_dir}") |
|
1173
|
|
|
1174
|
|
|
1175
|
@gws.command("ingest") |
|
1176
|
@click.option("--folder-id", type=str, default=None, help="Drive folder ID") |
|
1177
|
@click.option("--doc-id", type=str, multiple=True, help="Specific doc IDs (repeatable)") |
|
1178
|
@click.option("--query", "-q", type=str, default=None, help="Drive search query") |
|
1179
|
@click.option("-o", "--output", type=click.Path(), default=None, help="Output directory") |
|
1180
|
@click.option("--db-path", type=click.Path(), default=None, help="Existing DB to merge into") |
|
1181
|
@click.option( |
|
1182
|
"-p", |
|
1183
|
"--provider", |
|
1184
|
type=click.Choice( |
|
1185
|
[ |
|
1186
|
"auto", |
|
1187
|
"openai", |
|
1188
|
"anthropic", |
|
1189
|
"gemini", |
|
1190
|
"ollama", |
|
1191
|
"azure", |
|
1192
|
"together", |
|
1193
|
"fireworks", |
|
1194
|
"cerebras", |
|
1195
|
"xai", |
|
1196
|
] |
|
1197
|
), |
|
1198
|
default="auto", |
|
1199
|
help="API provider", |
|
1200
|
) |
|
1201
|
@click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks") |
|
1202
|
@click.pass_context |
|
1203
|
def gws_ingest(ctx, folder_id, doc_id, query, output, db_path, provider, chat_model): |
|
1204
|
"""Fetch Google Workspace docs and ingest into a knowledge graph. |
|
1205
|
|
|
1206
|
Combines gws fetch + planopticon ingest in one step. |
|
1207
|
|
|
1208
|
Examples: |
|
1209
|
|
|
1210
|
planopticon gws ingest --folder-id 1abc... |
|
1211
|
|
|
1212
|
planopticon gws ingest --doc-id DOC1 --doc-id DOC2 -o ./results |
|
1213
|
|
|
1214
|
planopticon gws ingest -q "name contains 'spec'" --db-path existing.db |
|
1215
|
""" |
|
1216
|
import tempfile |
|
1217
|
|
|
1218
|
from video_processor.integrators.knowledge_graph import KnowledgeGraph |
|
1219
|
from video_processor.processors.ingest import ingest_file |
|
1220
|
from video_processor.providers.manager import ProviderManager |
|
1221
|
from video_processor.sources.gws_source import GWSSource |
|
1222
|
|
|
1223
|
source = GWSSource(folder_id=folder_id, doc_ids=list(doc_id), query=query) |
|
1224
|
if not source.authenticate(): |
|
1225
|
click.echo("Error: gws CLI not available or not authenticated.", err=True) |
|
1226
|
click.echo("Install: npm install -g @googleworkspace/cli", err=True) |
|
1227
|
click.echo("Auth: gws auth login", err=True) |
|
1228
|
sys.exit(1) |
|
1229
|
|
|
1230
|
# Fetch docs to temp dir |
|
1231
|
files = source.list_videos(folder_id=folder_id) |
|
1232
|
if not files: |
|
1233
|
click.echo("No documents found.") |
|
1234
|
return |
|
1235
|
|
|
1236
|
click.echo(f"Found {len(files)} document(s), fetching...") |
|
1237
|
|
|
1238
|
with tempfile.TemporaryDirectory() as tmp_dir: |
|
1239
|
tmp_path = Path(tmp_dir) |
|
1240
|
local_files = [] |
|
1241
|
for f in files: |
|
1242
|
safe_name = f.name.replace("/", "_").replace("\\", "_") |
|
1243
|
dest = tmp_path / f"{safe_name}.txt" |
|
1244
|
try: |
|
1245
|
source.download(f, dest) |
|
1246
|
local_files.append(dest) |
|
1247
|
click.echo(f" ✓ {f.name}") |
|
1248
|
except Exception as e: |
|
1249
|
click.echo(f" ✗ {f.name}: {e}", err=True) |
|
1250
|
|
|
1251
|
if not local_files: |
|
1252
|
click.echo("No documents fetched successfully.", err=True) |
|
1253
|
sys.exit(1) |
|
1254
|
|
|
1255
|
# Set up KG |
|
1256
|
prov = None if provider == "auto" else provider |
|
1257
|
pm = ProviderManager(chat_model=chat_model, provider=prov) |
|
1258
|
|
|
1259
|
if db_path: |
|
1260
|
kg_path = Path(db_path) |
|
1261
|
elif output: |
|
1262
|
out_dir = Path(output) |
|
1263
|
out_dir.mkdir(parents=True, exist_ok=True) |
|
1264
|
kg_path = out_dir / "knowledge_graph.db" |
|
1265
|
else: |
|
1266
|
kg_path = Path.cwd() / "knowledge_graph.db" |
|
1267
|
|
|
1268
|
kg_path.parent.mkdir(parents=True, exist_ok=True) |
|
1269
|
kg = KnowledgeGraph(provider_manager=pm, db_path=kg_path) |
|
1270
|
|
|
1271
|
total_chunks = 0 |
|
1272
|
for lf in local_files: |
|
1273
|
try: |
|
1274
|
count = ingest_file(lf, kg) |
|
1275
|
total_chunks += count |
|
1276
|
click.echo(f" Ingested {lf.stem}: {count} chunks") |
|
1277
|
except Exception as e: |
|
1278
|
click.echo(f" Failed to ingest {lf.stem}: {e}", err=True) |
|
1279
|
|
|
1280
|
kg.save(kg_path) |
|
1281
|
kg.save(kg_path.with_suffix(".json")) |
|
1282
|
|
|
1283
|
entity_count = kg._store.get_entity_count() |
|
1284
|
rel_count = kg._store.get_relationship_count() |
|
1285
|
|
|
1286
|
click.echo("\nIngestion complete:") |
|
1287
|
click.echo(f" Documents: {len(local_files)}") |
|
1288
|
click.echo(f" Chunks: {total_chunks}") |
|
1289
|
click.echo(f" Entities: {entity_count}") |
|
1290
|
click.echo(f" Relationships: {rel_count}") |
|
1291
|
click.echo(f" Knowledge graph: {kg_path}") |
|
1292
|
|
|
1293
|
|
|
1294
|
@cli.group() |
|
1295
|
def m365(): |
|
1296
|
"""Microsoft 365: fetch docs from SharePoint and OneDrive via the m365 CLI.""" |
|
1297
|
pass |
|
1298
|
|
|
1299
|
|
|
1300
|
@m365.command("list") |
|
1301
|
@click.option("--web-url", type=str, required=True, help="SharePoint site URL") |
|
1302
|
@click.option("--folder-url", type=str, required=True, help="Server-relative folder URL") |
|
1303
|
@click.option("--recursive", is_flag=True, help="Include subfolders") |
|
1304
|
@click.option("--json", "as_json", is_flag=True, help="Output as JSON") |
|
1305
|
def m365_list(web_url, folder_url, recursive, as_json): |
|
1306
|
"""List documents in SharePoint or OneDrive. |
|
1307
|
|
|
1308
|
Examples: |
|
1309
|
|
|
1310
|
planopticon m365 list --web-url https://contoso.sharepoint.com/sites/proj \\ |
|
1311
|
--folder-url /sites/proj/Shared\\ Documents |
|
1312
|
|
|
1313
|
planopticon m365 list --web-url URL --folder-url FOLDER --recursive --json |
|
1314
|
""" |
|
1315
|
from video_processor.sources.m365_source import M365Source |
|
1316
|
|
|
1317
|
source = M365Source(web_url=web_url, folder_url=folder_url, recursive=recursive) |
|
1318
|
if not source.authenticate(): |
|
1319
|
click.echo("Error: m365 CLI not available or not logged in.", err=True) |
|
1320
|
click.echo("Install: npm install -g @pnp/cli-microsoft365", err=True) |
|
1321
|
click.echo("Auth: m365 login", err=True) |
|
1322
|
sys.exit(1) |
|
1323
|
|
|
1324
|
files = source.list_videos() |
|
1325
|
if as_json: |
|
1326
|
click.echo(json.dumps([f.model_dump() for f in files], indent=2, default=str)) |
|
1327
|
else: |
|
1328
|
if not files: |
|
1329
|
click.echo("No documents found.") |
|
1330
|
return |
|
1331
|
for f in files: |
|
1332
|
size = f"{f.size_bytes / 1024:.0f}KB" if f.size_bytes else "—" |
|
1333
|
click.echo(f" {f.id[:12]}… {size:>8s} {f.name}") |
|
1334
|
|
|
1335
|
|
|
1336
|
@m365.command("fetch") |
|
1337
|
@click.option("--web-url", type=str, required=True, help="SharePoint site URL") |
|
1338
|
@click.option("--folder-url", type=str, default=None, help="Server-relative folder URL") |
|
1339
|
@click.option("--file-id", type=str, multiple=True, help="Specific file IDs (repeatable)") |
|
1340
|
@click.option("-o", "--output", type=click.Path(), default=None, help="Output directory") |
|
1341
|
def m365_fetch(web_url, folder_url, file_id, output): |
|
1342
|
"""Fetch SharePoint/OneDrive documents as local files. |
|
1343
|
|
|
1344
|
Examples: |
|
1345
|
|
|
1346
|
planopticon m365 fetch --web-url URL --folder-url FOLDER -o ./docs |
|
1347
|
|
|
1348
|
planopticon m365 fetch --web-url URL --file-id ID1 --file-id ID2 -o ./docs |
|
1349
|
""" |
|
1350
|
from video_processor.sources.m365_source import M365Source |
|
1351
|
|
|
1352
|
source = M365Source(web_url=web_url, folder_url=folder_url, file_ids=list(file_id)) |
|
1353
|
if not source.authenticate(): |
|
1354
|
click.echo("Error: m365 CLI not available or not logged in.", err=True) |
|
1355
|
sys.exit(1) |
|
1356
|
|
|
1357
|
out_dir = Path(output) if output else Path.cwd() / "m365_docs" |
|
1358
|
out_dir.mkdir(parents=True, exist_ok=True) |
|
1359
|
|
|
1360
|
files = source.list_videos() |
|
1361
|
if not files: |
|
1362
|
click.echo("No documents found.") |
|
1363
|
return |
|
1364
|
|
|
1365
|
for f in files: |
|
1366
|
dest = out_dir / f.name |
|
1367
|
try: |
|
1368
|
source.download(f, dest) |
|
1369
|
click.echo(f" fetched {f.name}") |
|
1370
|
except Exception as e: |
|
1371
|
click.echo(f" failed {f.name}: {e}", err=True) |
|
1372
|
|
|
1373
|
click.echo(f"\nFetched {len(files)} document(s) to {out_dir}") |
|
1374
|
|
|
1375
|
|
|
1376
|
@m365.command("ingest") |
|
1377
|
@click.option("--web-url", type=str, required=True, help="SharePoint site URL") |
|
1378
|
@click.option("--folder-url", type=str, default=None, help="Server-relative folder URL") |
|
1379
|
@click.option("--file-id", type=str, multiple=True, help="Specific file IDs (repeatable)") |
|
1380
|
@click.option("-o", "--output", type=click.Path(), default=None, help="Output directory") |
|
1381
|
@click.option("--db-path", type=click.Path(), default=None, help="Existing DB to merge into") |
|
1382
|
@click.option( |
|
1383
|
"-p", |
|
1384
|
"--provider", |
|
1385
|
type=click.Choice( |
|
1386
|
[ |
|
1387
|
"auto", |
|
1388
|
"openai", |
|
1389
|
"anthropic", |
|
1390
|
"gemini", |
|
1391
|
"ollama", |
|
1392
|
"azure", |
|
1393
|
"together", |
|
1394
|
"fireworks", |
|
1395
|
"cerebras", |
|
1396
|
"xai", |
|
1397
|
] |
|
1398
|
), |
|
1399
|
default="auto", |
|
1400
|
help="API provider", |
|
1401
|
) |
|
1402
|
@click.option("--chat-model", type=str, default=None, help="Override model for LLM/chat tasks") |
|
1403
|
@click.pass_context |
|
1404
|
def m365_ingest(ctx, web_url, folder_url, file_id, output, db_path, provider, chat_model): |
|
1405
|
"""Fetch SharePoint/OneDrive docs and ingest into a knowledge graph. |
|
1406
|
|
|
1407
|
Examples: |
|
1408
|
|
|
1409
|
planopticon m365 ingest --web-url URL --folder-url FOLDER |
|
1410
|
|
|
1411
|
planopticon m365 ingest --web-url URL --file-id ID1 --file-id ID2 -o ./results |
|
1412
|
""" |
|
1413
|
import tempfile |
|
1414
|
|
|
1415
|
from video_processor.integrators.knowledge_graph import KnowledgeGraph |
|
1416
|
from video_processor.processors.ingest import ingest_file |
|
1417
|
from video_processor.providers.manager import ProviderManager |
|
1418
|
from video_processor.sources.m365_source import M365Source |
|
1419
|
|
|
1420
|
source = M365Source(web_url=web_url, folder_url=folder_url, file_ids=list(file_id)) |
|
1421
|
if not source.authenticate(): |
|
1422
|
click.echo("Error: m365 CLI not available or not logged in.", err=True) |
|
1423
|
click.echo("Install: npm install -g @pnp/cli-microsoft365", err=True) |
|
1424
|
click.echo("Auth: m365 login", err=True) |
|
1425
|
sys.exit(1) |
|
1426
|
|
|
1427
|
files = source.list_videos() |
|
1428
|
if not files: |
|
1429
|
click.echo("No documents found.") |
|
1430
|
return |
|
1431
|
|
|
1432
|
click.echo(f"Found {len(files)} document(s), fetching...") |
|
1433
|
|
|
1434
|
with tempfile.TemporaryDirectory() as tmp_dir: |
|
1435
|
tmp_path = Path(tmp_dir) |
|
1436
|
local_files = [] |
|
1437
|
for f in files: |
|
1438
|
dest = tmp_path / f.name |
|
1439
|
try: |
|
1440
|
source.download(f, dest) |
|
1441
|
# Extract text for non-text formats |
|
1442
|
text_dest = tmp_path / f"{Path(f.name).stem}.txt" |
|
1443
|
text = source.download_as_text(f) |
|
1444
|
text_dest.write_text(text, encoding="utf-8") |
|
1445
|
local_files.append(text_dest) |
|
1446
|
click.echo(f" fetched {f.name}") |
|
1447
|
except Exception as e: |
|
1448
|
click.echo(f" failed {f.name}: {e}", err=True) |
|
1449
|
|
|
1450
|
if not local_files: |
|
1451
|
click.echo("No documents fetched successfully.", err=True) |
|
1452
|
sys.exit(1) |
|
1453
|
|
|
1454
|
prov = None if provider == "auto" else provider |
|
1455
|
pm = ProviderManager(chat_model=chat_model, provider=prov) |
|
1456
|
|
|
1457
|
if db_path: |
|
1458
|
kg_path = Path(db_path) |
|
1459
|
elif output: |
|
1460
|
out_dir = Path(output) |
|
1461
|
out_dir.mkdir(parents=True, exist_ok=True) |
|
1462
|
kg_path = out_dir / "knowledge_graph.db" |
|
1463
|
else: |
|
1464
|
kg_path = Path.cwd() / "knowledge_graph.db" |
|
1465
|
|
|
1466
|
kg_path.parent.mkdir(parents=True, exist_ok=True) |
|
1467
|
kg = KnowledgeGraph(provider_manager=pm, db_path=kg_path) |
|
1468
|
|
|
1469
|
total_chunks = 0 |
|
1470
|
for lf in local_files: |
|
1471
|
try: |
|
1472
|
count = ingest_file(lf, kg) |
|
1473
|
total_chunks += count |
|
1474
|
click.echo(f" Ingested {lf.stem}: {count} chunks") |
|
1475
|
except Exception as e: |
|
1476
|
click.echo(f" Failed to ingest {lf.stem}: {e}", err=True) |
|
1477
|
|
|
1478
|
kg.save(kg_path) |
|
1479
|
kg.save(kg_path.with_suffix(".json")) |
|
1480
|
|
|
1481
|
entity_count = kg._store.get_entity_count() |
|
1482
|
rel_count = kg._store.get_relationship_count() |
|
1483
|
|
|
1484
|
click.echo("\nIngestion complete:") |
|
1485
|
click.echo(f" Documents: {len(local_files)}") |
|
1486
|
click.echo(f" Chunks: {total_chunks}") |
|
1487
|
click.echo(f" Entities: {entity_count}") |
|
1488
|
click.echo(f" Relationships: {rel_count}") |
|
1489
|
click.echo(f" Knowledge graph: {kg_path}") |
|
1490
|
|
|
1491
|
|
|
1492
|
@cli.group() |
|
1493
|
def export(): |
|
1494
|
"""Export knowledge graphs as markdown docs, notes, or CSV.""" |
|
1495
|
pass |
|
1496
|
|
|
1497
|
|
|
1498
|
@export.command("markdown") |
|
1499
|
@click.argument("db_path", type=click.Path(exists=True)) |
|
1500
|
@click.option("-o", "--output", type=click.Path(), default=None, help="Output directory") |
|
1501
|
@click.option( |
|
1502
|
"--type", |
|
1503
|
"doc_types", |
|
1504
|
type=click.Choice( |
|
1505
|
[ |
|
1506
|
"summary", |
|
1507
|
"meeting-notes", |
|
1508
|
"glossary", |
|
1509
|
"relationship-map", |
|
1510
|
"status-report", |
|
1511
|
"entity-index", |
|
1512
|
"csv", |
|
1513
|
"all", |
|
1514
|
] |
|
1515
|
), |
|
1516
|
multiple=True, |
|
1517
|
default=("all",), |
|
1518
|
help="Document types to generate (repeatable)", |
|
1519
|
) |
|
1520
|
def export_markdown(db_path, output, doc_types): |
|
1521
|
"""Generate markdown documents from a knowledge graph. |
|
1522
|
|
|
1523
|
No API key needed — pure template-based generation. |
|
1524
|
|
|
1525
|
Examples: |
|
1526
|
|
|
1527
|
planopticon export markdown knowledge_graph.db |
|
1528
|
|
|
1529
|
planopticon export markdown kg.db -o ./docs --type summary --type glossary |
|
1530
|
|
|
1531
|
planopticon export markdown kg.db --type meeting-notes --type csv |
|
1532
|
""" |
|
1533
|
from video_processor.exporters.markdown import generate_all |
|
1534
|
from video_processor.integrators.knowledge_graph import KnowledgeGraph |
|
1535
|
|
|
1536
|
db_path = Path(db_path) |
|
1537
|
out_dir = Path(output) if output else Path.cwd() / "export" |
|
1538
|
|
|
1539
|
kg = KnowledgeGraph(db_path=db_path) |
|
1540
|
kg_data = kg.to_dict() |
|
1541
|
|
|
1542
|
types = None if "all" in doc_types else list(doc_types) |
|
1543
|
created = generate_all(kg_data, out_dir, doc_types=types) |
|
1544
|
|
|
1545
|
click.echo(f"Generated {len(created)} files in {out_dir}/") |
|
1546
|
# Show top-level files (not entity briefs) |
|
1547
|
for p in sorted(created): |
|
1548
|
if p.parent == out_dir: |
|
1549
|
click.echo(f" {p.name}") |
|
1550
|
entity_count = len([p for p in created if p.parent != out_dir]) |
|
1551
|
if entity_count: |
|
1552
|
click.echo(f" entities/ ({entity_count} entity briefs)") |
|
1553
|
|
|
1554
|
|
|
1555
|
@export.command("obsidian") |
|
1556
|
@click.argument("db_path", type=click.Path(exists=True)) |
|
1557
|
@click.option("-o", "--output", type=click.Path(), default=None, help="Output vault directory") |
|
1558
|
def export_obsidian(db_path, output): |
|
1559
|
"""Export knowledge graph as an Obsidian vault with frontmatter and wiki-links. |
|
1560
|
|
|
1561
|
Examples: |
|
1562
|
|
|
1563
|
planopticon export obsidian knowledge_graph.db -o ./my-vault |
|
1564
|
""" |
|
1565
|
from video_processor.agent.skills.notes_export import export_to_obsidian |
|
1566
|
from video_processor.integrators.knowledge_graph import KnowledgeGraph |
|
1567
|
|
|
1568
|
db_path = Path(db_path) |
|
1569
|
out_dir = Path(output) if output else Path.cwd() / "obsidian-vault" |
|
1570
|
|
|
1571
|
kg = KnowledgeGraph(db_path=db_path) |
|
1572
|
kg_data = kg.to_dict() |
|
1573
|
created = export_to_obsidian(kg_data, out_dir) |
|
1574
|
|
|
1575
|
click.echo(f"Exported Obsidian vault: {len(created)} notes in {out_dir}/") |
|
1576
|
|
|
1577
|
|
|
1578
|
@export.command("notion") |
|
1579
|
@click.argument("db_path", type=click.Path(exists=True)) |
|
1580
|
@click.option("-o", "--output", type=click.Path(), default=None, help="Output directory") |
|
1581
|
def export_notion(db_path, output): |
|
1582
|
"""Export knowledge graph as Notion-compatible markdown + CSV database. |
|
1583
|
|
|
1584
|
Examples: |
|
1585
|
|
|
1586
|
planopticon export notion knowledge_graph.db -o ./notion-export |
|
1587
|
""" |
|
1588
|
from video_processor.agent.skills.notes_export import export_to_notion_md |
|
1589
|
from video_processor.integrators.knowledge_graph import KnowledgeGraph |
|
1590
|
|
|
1591
|
db_path = Path(db_path) |
|
1592
|
out_dir = Path(output) if output else Path.cwd() / "notion-export" |
|
1593
|
|
|
1594
|
kg = KnowledgeGraph(db_path=db_path) |
|
1595
|
kg_data = kg.to_dict() |
|
1596
|
created = export_to_notion_md(kg_data, out_dir) |
|
1597
|
|
|
1598
|
click.echo(f"Exported Notion markdown: {len(created)} files in {out_dir}/") |
|
1599
|
|
|
1600
|
|
|
1601
|
@export.command("pdf") |
|
1602
|
@click.argument("db_path", type=click.Path(exists=True)) |
|
1603
|
@click.option("-o", "--output", type=click.Path(), default=None, help="Output PDF file path") |
|
1604
|
@click.option("--title", type=str, default=None, help="Report title") |
|
1605
|
@click.option( |
|
1606
|
"--diagrams", |
|
1607
|
type=click.Path(exists=True), |
|
1608
|
default=None, |
|
1609
|
help="Directory with diagram PNGs to embed", |
|
1610
|
) |
|
1611
|
def export_pdf(db_path, output, title, diagrams): |
|
1612
|
"""Generate a PDF report from a knowledge graph. |
|
1613
|
|
|
1614
|
Requires: pip install reportlab |
|
1615
|
|
|
1616
|
Examples: |
|
1617
|
|
|
1618
|
planopticon export pdf knowledge_graph.db |
|
1619
|
|
|
1620
|
planopticon export pdf kg.db -o report.pdf --title "Q1 Review" |
|
1621
|
|
|
1622
|
planopticon export pdf kg.db --diagrams ./diagrams/ |
|
1623
|
""" |
|
1624
|
from video_processor.exporters.pdf_export import generate_pdf |
|
1625
|
from video_processor.integrators.knowledge_graph import KnowledgeGraph |
|
1626
|
|
|
1627
|
db_path = Path(db_path) |
|
1628
|
out_path = Path(output) if output else Path.cwd() / "export" / "report.pdf" |
|
1629
|
diagrams_path = Path(diagrams) if diagrams else None |
|
1630
|
|
|
1631
|
kg = KnowledgeGraph(db_path=db_path) |
|
1632
|
kg_data = kg.to_dict() |
|
1633
|
|
|
1634
|
result = generate_pdf(kg_data, out_path, title=title, diagrams_dir=diagrams_path) |
|
1635
|
click.echo(f"Generated PDF: {result}") |
|
1636
|
|
|
1637
|
|
|
1638
|
@export.command("pptx") |
|
1639
|
@click.argument("db_path", type=click.Path(exists=True)) |
|
1640
|
@click.option("-o", "--output", type=click.Path(), default=None, help="Output PPTX file path") |
|
1641
|
@click.option("--title", type=str, default=None, help="Presentation title") |
|
1642
|
@click.option( |
|
1643
|
"--diagrams", |
|
1644
|
type=click.Path(exists=True), |
|
1645
|
default=None, |
|
1646
|
help="Directory with diagram PNGs to embed", |
|
1647
|
) |
|
1648
|
def export_pptx(db_path, output, title, diagrams): |
|
1649
|
"""Generate a PPTX slide deck from a knowledge graph. |
|
1650
|
|
|
1651
|
Requires: pip install python-pptx |
|
1652
|
|
|
1653
|
Examples: |
|
1654
|
|
|
1655
|
planopticon export pptx knowledge_graph.db |
|
1656
|
|
|
1657
|
planopticon export pptx kg.db -o slides.pptx --title "Architecture Overview" |
|
1658
|
|
|
1659
|
planopticon export pptx kg.db --diagrams ./diagrams/ |
|
1660
|
""" |
|
1661
|
from video_processor.exporters.pptx_export import generate_pptx |
|
1662
|
from video_processor.integrators.knowledge_graph import KnowledgeGraph |
|
1663
|
|
|
1664
|
db_path = Path(db_path) |
|
1665
|
out_path = Path(output) if output else Path.cwd() / "export" / "presentation.pptx" |
|
1666
|
diagrams_path = Path(diagrams) if diagrams else None |
|
1667
|
|
|
1668
|
kg = KnowledgeGraph(db_path=db_path) |
|
1669
|
kg_data = kg.to_dict() |
|
1670
|
|
|
1671
|
result = generate_pptx(kg_data, out_path, title=title, diagrams_dir=diagrams_path) |
|
1672
|
click.echo(f"Generated PPTX: {result}") |
|
1673
|
|
|
1674
|
|
|
1675
|
@export.command("exchange") |
|
1676
|
@click.argument("db_path", type=click.Path(exists=True)) |
|
1677
|
@click.option( |
|
1678
|
"-o", |
|
1679
|
"--output", |
|
1680
|
type=click.Path(), |
|
1681
|
default=None, |
|
1682
|
help="Output JSON file path", |
|
1683
|
) |
|
1684
|
@click.option( |
|
1685
|
"--name", |
|
1686
|
"project_name", |
|
1687
|
type=str, |
|
1688
|
default="Untitled", |
|
1689
|
help="Project name for the exchange payload", |
|
1690
|
) |
|
1691
|
@click.option( |
|
1692
|
"--description", |
|
1693
|
"project_desc", |
|
1694
|
type=str, |
|
1695
|
default="", |
|
1696
|
help="Project description", |
|
1697
|
) |
|
1698
|
def export_exchange(db_path, output, project_name, project_desc): |
|
1699
|
"""Export a knowledge graph as a PlanOpticonExchange JSON file. |
|
1700
|
|
|
1701
|
Examples: |
|
1702
|
|
|
1703
|
planopticon export exchange knowledge_graph.db |
|
1704
|
|
|
1705
|
planopticon export exchange kg.db -o exchange.json --name "My Project" |
|
1706
|
""" |
|
1707
|
from video_processor.exchange import PlanOpticonExchange |
|
1708
|
from video_processor.integrators.knowledge_graph import KnowledgeGraph |
|
1709
|
|
|
1710
|
db_path = Path(db_path) |
|
1711
|
kg = KnowledgeGraph(db_path=db_path) |
|
1712
|
kg_data = kg.to_dict() |
|
1713
|
|
|
1714
|
ex = PlanOpticonExchange.from_knowledge_graph( |
|
1715
|
kg_data, |
|
1716
|
project_name=project_name, |
|
1717
|
project_description=project_desc, |
|
1718
|
) |
|
1719
|
|
|
1720
|
out_path = Path(output) if output else Path.cwd() / "exchange.json" |
|
1721
|
ex.to_file(out_path) |
|
1722
|
|
|
1723
|
click.echo( |
|
1724
|
f"Exported PlanOpticonExchange to {out_path} " |
|
1725
|
f"({len(ex.entities)} entities, " |
|
1726
|
f"{len(ex.relationships)} relationships)" |
|
1727
|
) |
|
1728
|
|
|
1729
|
|
|
1730
|
@cli.group() |
|
1731
|
def wiki(): |
|
1732
|
"""Generate and push GitHub wikis from knowledge graphs.""" |
|
1733
|
pass |
|
1734
|
|
|
1735
|
|
|
1736
|
@wiki.command("generate") |
|
1737
|
@click.argument("db_path", type=click.Path(exists=True)) |
|
1738
|
@click.option("-o", "--output", type=click.Path(), default=None, help="Output directory for wiki") |
|
1739
|
@click.option("--title", type=str, default="Knowledge Base", help="Wiki title") |
|
1740
|
def wiki_generate(db_path, output, title): |
|
1741
|
"""Generate a GitHub wiki from a knowledge graph. |
|
1742
|
|
|
1743
|
Examples: |
|
1744
|
|
|
1745
|
planopticon wiki generate knowledge_graph.db -o ./wiki |
|
1746
|
|
|
1747
|
planopticon wiki generate results/kg.db --title "Project Wiki" |
|
1748
|
""" |
|
1749
|
from video_processor.agent.skills.wiki_generator import generate_wiki, write_wiki |
|
1750
|
from video_processor.integrators.knowledge_graph import KnowledgeGraph |
|
1751
|
|
|
1752
|
db_path = Path(db_path) |
|
1753
|
out_dir = Path(output) if output else Path.cwd() / "wiki" |
|
1754
|
|
|
1755
|
kg = KnowledgeGraph(db_path=db_path) |
|
1756
|
kg_data = kg.to_dict() |
|
1757
|
pages = generate_wiki(kg_data, title=title) |
|
1758
|
written = write_wiki(pages, out_dir) |
|
1759
|
|
|
1760
|
click.echo(f"Generated {len(written)} wiki pages in {out_dir}") |
|
1761
|
for p in sorted(written): |
|
1762
|
click.echo(f" {p.name}") |
|
1763
|
|
|
1764
|
|
|
1765
|
@wiki.command("push") |
|
1766
|
@click.argument("wiki_dir", type=click.Path(exists=True)) |
|
1767
|
@click.argument("repo", type=str) |
|
1768
|
@click.option("--message", "-m", type=str, default="Update wiki", help="Commit message") |
|
1769
|
def wiki_push(wiki_dir, repo, message): |
|
1770
|
"""Push generated wiki pages to a GitHub wiki repo. |
|
1771
|
|
|
1772
|
REPO should be in 'owner/repo' format. |
|
1773
|
|
|
1774
|
Examples: |
|
1775
|
|
|
1776
|
planopticon wiki push ./wiki ConflictHQ/PlanOpticon |
|
1777
|
|
|
1778
|
planopticon wiki push ./wiki owner/repo -m "Add entity pages" |
|
1779
|
""" |
|
1780
|
from video_processor.agent.skills.wiki_generator import push_wiki |
|
1781
|
|
|
1782
|
wiki_dir = Path(wiki_dir) |
|
1783
|
success = push_wiki(wiki_dir, repo, message=message) |
|
1784
|
if success: |
|
1785
|
click.echo(f"Wiki pushed to https://github.com/{repo}/wiki") |
|
1786
|
else: |
|
1787
|
click.echo("Wiki push failed. Check auth and repo permissions.", err=True) |
|
1788
|
sys.exit(1) |
|
1789
|
|
|
1790
|
|
|
1791
|
@cli.group() |
|
1792
|
def recordings(): |
|
1793
|
"""Fetch meeting recordings from Zoom, Teams, and Google Meet.""" |
|
1794
|
pass |
|
1795
|
|
|
1796
|
|
|
1797
|
@recordings.command("zoom-list") |
|
1798
|
@click.option("--json", "as_json", is_flag=True, help="Output as JSON") |
|
1799
|
def recordings_zoom_list(as_json): |
|
1800
|
"""List Zoom cloud recordings. |
|
1801
|
|
|
1802
|
Requires ZOOM_CLIENT_ID (and optionally ZOOM_CLIENT_SECRET, |
|
1803
|
ZOOM_ACCOUNT_ID) environment variables. |
|
1804
|
|
|
1805
|
Examples: |
|
1806
|
|
|
1807
|
planopticon recordings zoom-list |
|
1808
|
|
|
1809
|
planopticon recordings zoom-list --json |
|
1810
|
""" |
|
1811
|
from video_processor.sources.zoom_source import ZoomSource |
|
1812
|
|
|
1813
|
source = ZoomSource() |
|
1814
|
if not source.authenticate(): |
|
1815
|
click.echo("Zoom authentication failed.", err=True) |
|
1816
|
sys.exit(1) |
|
1817
|
|
|
1818
|
files = source.list_videos() |
|
1819
|
if as_json: |
|
1820
|
click.echo(json.dumps([f.__dict__ for f in files], indent=2, default=str)) |
|
1821
|
else: |
|
1822
|
click.echo(f"Found {len(files)} recording(s):") |
|
1823
|
for f in files: |
|
1824
|
size = f"{f.size_bytes // 1_000_000} MB" if f.size_bytes else "unknown" |
|
1825
|
click.echo(f" {f.name} ({size}) {f.modified_at or ''}") |
|
1826
|
|
|
1827
|
|
|
1828
|
@recordings.command("teams-list") |
|
1829
|
@click.option("--user-id", default="me", help="Microsoft user ID") |
|
1830
|
@click.option("--json", "as_json", is_flag=True, help="Output as JSON") |
|
1831
|
def recordings_teams_list(user_id, as_json): |
|
1832
|
"""List Teams meeting recordings via the m365 CLI. |
|
1833
|
|
|
1834
|
Requires: npm install -g @pnp/cli-microsoft365 && m365 login |
|
1835
|
|
|
1836
|
Examples: |
|
1837
|
|
|
1838
|
planopticon recordings teams-list |
|
1839
|
|
|
1840
|
planopticon recordings teams-list --json |
|
1841
|
""" |
|
1842
|
from video_processor.sources.teams_recording_source import ( |
|
1843
|
TeamsRecordingSource, |
|
1844
|
) |
|
1845
|
|
|
1846
|
source = TeamsRecordingSource(user_id=user_id) |
|
1847
|
if not source.authenticate(): |
|
1848
|
click.echo("Teams authentication failed.", err=True) |
|
1849
|
sys.exit(1) |
|
1850
|
|
|
1851
|
files = source.list_videos() |
|
1852
|
if as_json: |
|
1853
|
click.echo(json.dumps([f.__dict__ for f in files], indent=2, default=str)) |
|
1854
|
else: |
|
1855
|
click.echo(f"Found {len(files)} recording(s):") |
|
1856
|
for f in files: |
|
1857
|
click.echo(f" {f.name} {f.modified_at or ''}") |
|
1858
|
|
|
1859
|
|
|
1860
|
@recordings.command("meet-list") |
|
1861
|
@click.option("--folder-id", default=None, help="Drive folder ID") |
|
1862
|
@click.option("--json", "as_json", is_flag=True, help="Output as JSON") |
|
1863
|
def recordings_meet_list(folder_id, as_json): |
|
1864
|
"""List Google Meet recordings in Drive via the gws CLI. |
|
1865
|
|
|
1866
|
Requires: npm install -g @googleworkspace/cli && gws auth login |
|
1867
|
|
|
1868
|
Examples: |
|
1869
|
|
|
1870
|
planopticon recordings meet-list |
|
1871
|
|
|
1872
|
planopticon recordings meet-list --folder-id abc123 |
|
1873
|
""" |
|
1874
|
from video_processor.sources.meet_recording_source import ( |
|
1875
|
MeetRecordingSource, |
|
1876
|
) |
|
1877
|
|
|
1878
|
source = MeetRecordingSource(drive_folder_id=folder_id) |
|
1879
|
if not source.authenticate(): |
|
1880
|
click.echo("Google Meet authentication failed.", err=True) |
|
1881
|
sys.exit(1) |
|
1882
|
|
|
1883
|
files = source.list_videos() |
|
1884
|
if as_json: |
|
1885
|
click.echo(json.dumps([f.__dict__ for f in files], indent=2, default=str)) |
|
1886
|
else: |
|
1887
|
click.echo(f"Found {len(files)} recording(s):") |
|
1888
|
for f in files: |
|
1889
|
size = f"{f.size_bytes // 1_000_000} MB" if f.size_bytes else "unknown" |
|
1890
|
click.echo(f" {f.name} ({size}) {f.modified_at or ''}") |
|
1891
|
|
|
1892
|
|
|
1893
|
@cli.group() |
|
1894
|
def kg(): |
|
1895
|
"""Knowledge graph utilities: convert, sync, and inspect.""" |
|
1896
|
pass |
|
1897
|
|
|
1898
|
|
|
1899
|
@kg.command() |
|
1900
|
@click.argument("source_path", type=click.Path(exists=True)) |
|
1901
|
@click.argument("dest_path", type=click.Path()) |
|
1902
|
def convert(source_path, dest_path): |
|
1903
|
"""Convert a knowledge graph between formats. |
|
1904
|
|
|
1905
|
Supports .db (SQLite) and .json. The output format is inferred from DEST_PATH extension. |
|
1906
|
|
|
1907
|
Examples: |
|
1908
|
|
|
1909
|
planopticon kg convert results/knowledge_graph.db output.json |
|
1910
|
planopticon kg convert knowledge_graph.json knowledge_graph.db |
|
1911
|
""" |
|
1912
|
from video_processor.integrators.graph_store import InMemoryStore, SQLiteStore |
|
1913
|
|
|
1914
|
source_path = Path(source_path) |
|
1915
|
dest_path = Path(dest_path) |
|
1916
|
|
|
1917
|
if source_path.suffix == dest_path.suffix: |
|
1918
|
click.echo(f"Source and destination are the same format ({source_path.suffix}).", err=True) |
|
1919
|
sys.exit(1) |
|
1920
|
|
|
1921
|
# Load source |
|
1922
|
if source_path.suffix == ".db": |
|
1923
|
src_store = SQLiteStore(source_path) |
|
1924
|
elif source_path.suffix == ".json": |
|
1925
|
data = json.loads(source_path.read_text()) |
|
1926
|
src_store = InMemoryStore() |
|
1927
|
for node in data.get("nodes", []): |
|
1928
|
descs = node.get("descriptions", []) |
|
1929
|
if isinstance(descs, set): |
|
1930
|
descs = list(descs) |
|
1931
|
src_store.merge_entity(node.get("name", ""), node.get("type", "concept"), descs) |
|
1932
|
for occ in node.get("occurrences", []): |
|
1933
|
src_store.add_occurrence( |
|
1934
|
node.get("name", ""), |
|
1935
|
occ.get("source", ""), |
|
1936
|
occ.get("timestamp"), |
|
1937
|
occ.get("text"), |
|
1938
|
) |
|
1939
|
for rel in data.get("relationships", []): |
|
1940
|
src_store.add_relationship( |
|
1941
|
rel.get("source", ""), |
|
1942
|
rel.get("target", ""), |
|
1943
|
rel.get("type", "related_to"), |
|
1944
|
content_source=rel.get("content_source"), |
|
1945
|
timestamp=rel.get("timestamp"), |
|
1946
|
) |
|
1947
|
else: |
|
1948
|
click.echo(f"Unsupported source format: {source_path.suffix}", err=True) |
|
1949
|
sys.exit(1) |
|
1950
|
|
|
1951
|
# Write destination |
|
1952
|
from video_processor.integrators.knowledge_graph import KnowledgeGraph |
|
1953
|
|
|
1954
|
kg_obj = KnowledgeGraph(store=src_store) |
|
1955
|
kg_obj.save(dest_path) |
|
1956
|
|
|
1957
|
e_count = src_store.get_entity_count() |
|
1958
|
r_count = src_store.get_relationship_count() |
|
1959
|
click.echo( |
|
1960
|
f"Converted {source_path} → {dest_path} ({e_count} entities, {r_count} relationships)" |
|
1961
|
) |
|
1962
|
|
|
1963
|
if hasattr(src_store, "close"): |
|
1964
|
src_store.close() |
|
1965
|
|
|
1966
|
|
|
1967
|
@kg.command() |
|
1968
|
@click.argument("db_path", type=click.Path(exists=True)) |
|
1969
|
@click.argument("json_path", type=click.Path(), required=False, default=None) |
|
1970
|
@click.option( |
|
1971
|
"--direction", |
|
1972
|
type=click.Choice(["db-to-json", "json-to-db", "auto"]), |
|
1973
|
default="auto", |
|
1974
|
help="Sync direction. 'auto' picks the newer file as source.", |
|
1975
|
) |
|
1976
|
def sync(db_path, json_path, direction): |
|
1977
|
"""Sync a .db and .json knowledge graph, updating the stale one. |
|
1978
|
|
|
1979
|
If JSON_PATH is omitted, uses the same name with .json extension. |
|
1980
|
|
|
1981
|
Examples: |
|
1982
|
|
|
1983
|
planopticon kg sync results/knowledge_graph.db |
|
1984
|
planopticon kg sync knowledge_graph.db knowledge_graph.json --direction db-to-json |
|
1985
|
""" |
|
1986
|
db_path = Path(db_path) |
|
1987
|
if json_path is None: |
|
1988
|
json_path = db_path.with_suffix(".json") |
|
1989
|
else: |
|
1990
|
json_path = Path(json_path) |
|
1991
|
|
|
1992
|
if direction == "auto": |
|
1993
|
if not json_path.exists(): |
|
1994
|
direction = "db-to-json" |
|
1995
|
elif not db_path.exists(): |
|
1996
|
direction = "json-to-db" |
|
1997
|
else: |
|
1998
|
db_mtime = db_path.stat().st_mtime |
|
1999
|
json_mtime = json_path.stat().st_mtime |
|
2000
|
direction = "db-to-json" if db_mtime >= json_mtime else "json-to-db" |
|
2001
|
|
|
2002
|
from video_processor.integrators.knowledge_graph import KnowledgeGraph |
|
2003
|
|
|
2004
|
if direction == "db-to-json": |
|
2005
|
kg_obj = KnowledgeGraph(db_path=db_path) |
|
2006
|
kg_obj.save(json_path) |
|
2007
|
click.echo(f"Synced {db_path} → {json_path}") |
|
2008
|
else: |
|
2009
|
data = json.loads(json_path.read_text()) |
|
2010
|
kg_obj = KnowledgeGraph.from_dict(data, db_path=db_path) |
|
2011
|
# Force write to db by saving |
|
2012
|
kg_obj.save(db_path) |
|
2013
|
click.echo(f"Synced {json_path} → {db_path}") |
|
2014
|
|
|
2015
|
click.echo( |
|
2016
|
f" {kg_obj._store.get_entity_count()} entities, " |
|
2017
|
f"{kg_obj._store.get_relationship_count()} relationships" |
|
2018
|
) |
|
2019
|
|
|
2020
|
|
|
2021
|
@kg.command() |
|
2022
|
@click.argument("path", type=click.Path(exists=True)) |
|
2023
|
def inspect(path): |
|
2024
|
"""Show summary stats for a knowledge graph file (.db or .json).""" |
|
2025
|
from video_processor.integrators.graph_discovery import describe_graph |
|
2026
|
|
|
2027
|
path = Path(path) |
|
2028
|
info = describe_graph(path) |
|
2029
|
click.echo(f"File: {path}") |
|
2030
|
click.echo(f"Store: {info['store_type']}") |
|
2031
|
click.echo(f"Entities: {info['entity_count']}") |
|
2032
|
click.echo(f"Relationships: {info['relationship_count']}") |
|
2033
|
if info["entity_types"]: |
|
2034
|
click.echo("Entity types:") |
|
2035
|
for t, count in sorted(info["entity_types"].items(), key=lambda x: -x[1]): |
|
2036
|
click.echo(f" {t}: {count}") |
|
2037
|
|
|
2038
|
|
|
2039
|
@kg.command() |
|
2040
|
@click.argument("db_path", type=click.Path(exists=True)) |
|
2041
|
@click.option("--provider", "-p", type=str, default="auto") |
|
2042
|
@click.option("--chat-model", type=str, default=None) |
|
2043
|
@click.option( |
|
2044
|
"--format", |
|
2045
|
"output_format", |
|
2046
|
type=click.Choice(["text", "json"]), |
|
2047
|
default="text", |
|
2048
|
) |
|
2049
|
@click.pass_context |
|
2050
|
def classify(ctx, db_path, provider, chat_model, output_format): |
|
2051
|
"""Classify knowledge graph entities into planning taxonomy types. |
|
2052
|
|
|
2053
|
Examples:\n |
|
2054
|
planopticon kg classify results/knowledge_graph.db\n |
|
2055
|
planopticon kg classify results/knowledge_graph.db --format json |
|
2056
|
""" |
|
2057
|
from video_processor.integrators.graph_store import create_store |
|
2058
|
from video_processor.integrators.taxonomy import TaxonomyClassifier |
|
2059
|
|
|
2060
|
db_path = Path(db_path) |
|
2061
|
store = create_store(db_path) |
|
2062
|
entities = store.get_all_entities() |
|
2063
|
relationships = store.get_all_relationships() |
|
2064
|
|
|
2065
|
pm = None |
|
2066
|
if provider != "none": |
|
2067
|
try: |
|
2068
|
from video_processor.providers.manager import ProviderManager |
|
2069
|
|
|
2070
|
pm = ProviderManager(provider=provider if provider != "auto" else None) |
|
2071
|
if chat_model: |
|
2072
|
pm.chat_model = chat_model |
|
2073
|
except Exception: |
|
2074
|
pm = None # fall back to heuristic-only |
|
2075
|
|
|
2076
|
classifier = TaxonomyClassifier(provider_manager=pm) |
|
2077
|
planning_entities = classifier.classify_entities(entities, relationships) |
|
2078
|
|
|
2079
|
if output_format == "json": |
|
2080
|
click.echo( |
|
2081
|
json.dumps( |
|
2082
|
[pe.model_dump() for pe in planning_entities], |
|
2083
|
indent=2, |
|
2084
|
) |
|
2085
|
) |
|
2086
|
else: |
|
2087
|
if not planning_entities: |
|
2088
|
click.echo("No entities matched planning taxonomy types.") |
|
2089
|
return |
|
2090
|
workstreams = classifier.organize_by_workstream(planning_entities) |
|
2091
|
for group_name, items in sorted(workstreams.items()): |
|
2092
|
click.echo(f"\n{group_name.upper()} ({len(items)})") |
|
2093
|
for pe in items: |
|
2094
|
priority_str = f" [{pe.priority}]" if pe.priority else "" |
|
2095
|
click.echo(f" - {pe.name}{priority_str}") |
|
2096
|
if pe.description: |
|
2097
|
click.echo(f" {pe.description}") |
|
2098
|
|
|
2099
|
store.close() |
|
2100
|
|
|
2101
|
|
|
2102
|
@kg.command("from-exchange") |
|
2103
|
@click.argument("exchange_path", type=click.Path(exists=True)) |
|
2104
|
@click.option( |
|
2105
|
"-o", |
|
2106
|
"--output", |
|
2107
|
"db_path", |
|
2108
|
type=click.Path(), |
|
2109
|
default=None, |
|
2110
|
help="Output .db file path", |
|
2111
|
) |
|
2112
|
def kg_from_exchange(exchange_path, db_path): |
|
2113
|
"""Import a PlanOpticonExchange JSON file into a knowledge graph .db. |
|
2114
|
|
|
2115
|
Examples: |
|
2116
|
|
|
2117
|
planopticon kg from-exchange exchange.json |
|
2118
|
|
|
2119
|
planopticon kg from-exchange exchange.json -o project.db |
|
2120
|
""" |
|
2121
|
from video_processor.exchange import PlanOpticonExchange |
|
2122
|
from video_processor.integrators.knowledge_graph import KnowledgeGraph |
|
2123
|
|
|
2124
|
ex = PlanOpticonExchange.from_file(exchange_path) |
|
2125
|
|
|
2126
|
kg_dict = { |
|
2127
|
"nodes": [e.model_dump() for e in ex.entities], |
|
2128
|
"relationships": [r.model_dump() for r in ex.relationships], |
|
2129
|
"sources": [s.model_dump() for s in ex.sources], |
|
2130
|
} |
|
2131
|
|
|
2132
|
out = Path(db_path) if db_path else Path.cwd() / "knowledge_graph.db" |
|
2133
|
kg_obj = KnowledgeGraph.from_dict(kg_dict, db_path=out) |
|
2134
|
kg_obj.save(out) |
|
2135
|
|
|
2136
|
click.echo( |
|
2137
|
f"Imported exchange into {out} " |
|
2138
|
f"({len(ex.entities)} entities, " |
|
2139
|
f"{len(ex.relationships)} relationships)" |
|
2140
|
) |
|
2141
|
|
|
2142
|
|
|
2143
|
@cli.command() |
|
2144
|
@click.option( |
|
2145
|
"--kb", |
|
2146
|
multiple=True, |
|
2147
|
type=click.Path(exists=True), |
|
2148
|
help="Knowledge base paths", |
|
2149
|
) |
|
2150
|
@click.option( |
|
2151
|
"--provider", |
|
2152
|
"-p", |
|
2153
|
type=str, |
|
2154
|
default="auto", |
|
2155
|
help="LLM provider (auto, openai, anthropic, ...)", |
|
2156
|
) |
|
2157
|
@click.option( |
|
2158
|
"--chat-model", |
|
2159
|
type=str, |
|
2160
|
default=None, |
|
2161
|
help="Chat model override", |
|
2162
|
) |
|
2163
|
@click.pass_context |
|
2164
|
def companion(ctx, kb, provider, chat_model): |
|
2165
|
"""Interactive planning companion with workspace awareness. |
|
2166
|
|
|
2167
|
Examples: |
|
2168
|
|
|
2169
|
planopticon companion |
|
2170
|
|
|
2171
|
planopticon companion --kb ./results |
|
2172
|
|
|
2173
|
planopticon companion -p anthropic |
|
2174
|
""" |
|
2175
|
from video_processor.cli.companion import CompanionREPL |
|
2176
|
|
|
2177
|
repl = CompanionREPL( |
|
2178
|
kb_paths=list(kb), |
|
2179
|
provider=provider, |
|
2180
|
chat_model=chat_model, |
|
2181
|
) |
|
2182
|
repl.run() |
|
2183
|
|
|
2184
|
|
|
2185
|
def _interactive_menu(ctx): |
|
2186
|
"""Show an interactive menu when planopticon is run with no arguments.""" |
|
2187
|
click.echo() |
|
2188
|
click.echo(" PlanOpticon v0.2.0") |
|
2189
|
click.echo(" Comprehensive Video Analysis & Knowledge Extraction") |
|
2190
|
click.echo() |
|
2191
|
click.echo(" 1. Analyze a video") |
|
2192
|
click.echo(" 2. Batch process a folder") |
|
2193
|
click.echo(" 3. List available models") |
|
2194
|
click.echo(" 4. Authenticate cloud service") |
|
2195
|
click.echo(" 5. Clear cache") |
|
2196
|
click.echo(" 6. Show help") |
|
2197
|
click.echo(" 7. Query knowledge graph") |
|
2198
|
click.echo() |
|
2199
|
|
|
2200
|
choice = click.prompt(" Select an option", type=click.IntRange(1, 7)) |
|
2201
|
|
|
2202
|
if choice == 1: |
|
2203
|
input_path = click.prompt(" Video file path", type=click.Path(exists=True)) |
|
2204
|
output_dir = click.prompt(" Output directory", type=click.Path()) |
|
2205
|
depth = click.prompt( |
|
2206
|
" Processing depth", |
|
2207
|
type=click.Choice(["basic", "standard", "comprehensive"]), |
|
2208
|
default="standard", |
|
2209
|
) |
|
2210
|
provider = click.prompt( |
|
2211
|
" Provider", |
|
2212
|
type=click.Choice( |
|
2213
|
[ |
|
2214
|
"auto", |
|
2215
|
"openai", |
|
2216
|
"anthropic", |
|
2217
|
"gemini", |
|
2218
|
"ollama", |
|
2219
|
"azure", |
|
2220
|
"together", |
|
2221
|
"fireworks", |
|
2222
|
"cerebras", |
|
2223
|
"xai", |
|
2224
|
] |
|
2225
|
), |
|
2226
|
default="auto", |
|
2227
|
) |
|
2228
|
ctx.invoke( |
|
2229
|
analyze, |
|
2230
|
input=input_path, |
|
2231
|
output=output_dir, |
|
2232
|
depth=depth, |
|
2233
|
focus=None, |
|
2234
|
use_gpu=False, |
|
2235
|
sampling_rate=0.5, |
|
2236
|
change_threshold=0.15, |
|
2237
|
periodic_capture=30.0, |
|
2238
|
title=None, |
|
2239
|
provider=provider, |
|
2240
|
vision_model=None, |
|
2241
|
chat_model=None, |
|
2242
|
) |
|
2243
|
|
|
2244
|
elif choice == 2: |
|
2245
|
input_dir = click.prompt(" Video directory", type=click.Path(exists=True)) |
|
2246
|
output_dir = click.prompt(" Output directory", type=click.Path()) |
|
2247
|
depth = click.prompt( |
|
2248
|
" Processing depth", |
|
2249
|
type=click.Choice(["basic", "standard", "comprehensive"]), |
|
2250
|
default="standard", |
|
2251
|
) |
|
2252
|
provider = click.prompt( |
|
2253
|
" Provider", |
|
2254
|
type=click.Choice( |
|
2255
|
[ |
|
2256
|
"auto", |
|
2257
|
"openai", |
|
2258
|
"anthropic", |
|
2259
|
"gemini", |
|
2260
|
"ollama", |
|
2261
|
"azure", |
|
2262
|
"together", |
|
2263
|
"fireworks", |
|
2264
|
"cerebras", |
|
2265
|
"xai", |
|
2266
|
] |
|
2267
|
), |
|
2268
|
default="auto", |
|
2269
|
) |
|
2270
|
ctx.invoke( |
|
2271
|
batch, |
|
2272
|
input_dir=input_dir, |
|
2273
|
output=output_dir, |
|
2274
|
depth=depth, |
|
2275
|
pattern="*.mp4,*.mkv,*.avi,*.mov,*.webm", |
|
2276
|
title="Batch Processing Results", |
|
2277
|
provider=provider, |
|
2278
|
vision_model=None, |
|
2279
|
chat_model=None, |
|
2280
|
source="local", |
|
2281
|
folder_id=None, |
|
2282
|
folder_path=None, |
|
2283
|
recursive=True, |
|
2284
|
) |
|
2285
|
|
|
2286
|
elif choice == 3: |
|
2287
|
ctx.invoke(list_models) |
|
2288
|
|
|
2289
|
elif choice == 4: |
|
2290
|
service = click.prompt( |
|
2291
|
" Cloud service", |
|
2292
|
type=click.Choice(["google", "dropbox"]), |
|
2293
|
) |
|
2294
|
ctx.invoke(auth, service=service) |
|
2295
|
|
|
2296
|
elif choice == 5: |
|
2297
|
cache_dir = click.prompt(" Cache directory path", type=click.Path()) |
|
2298
|
clear_all = click.confirm(" Clear all entries?", default=True) |
|
2299
|
ctx.invoke( |
|
2300
|
clear_cache, |
|
2301
|
cache_dir=cache_dir, |
|
2302
|
older_than=None, |
|
2303
|
clear_all=clear_all, |
|
2304
|
) |
|
2305
|
|
|
2306
|
elif choice == 6: |
|
2307
|
click.echo() |
|
2308
|
click.echo(ctx.get_help()) |
|
2309
|
|
|
2310
|
elif choice == 7: |
|
2311
|
ctx.invoke( |
|
2312
|
query, |
|
2313
|
question=None, |
|
2314
|
db_path=None, |
|
2315
|
mode="auto", |
|
2316
|
output_format="text", |
|
2317
|
interactive=True, |
|
2318
|
provider="auto", |
|
2319
|
chat_model=None, |
|
2320
|
) |
|
2321
|
|
|
2322
|
|
|
2323
|
def main(): |
|
2324
|
"""Entry point for command-line usage.""" |
|
2325
|
cli(obj={}) |
|
2326
|
|
|
2327
|
|
|
2328
|
if __name__ == "__main__": |
|
2329
|
main() |
|
2330
|
|