PlanOpticon

planopticon / video_processor / pipeline.py
Source Blame History 418 lines
09a0b7a… leo 1 """Core video processing pipeline — the reusable function both CLI commands call."""
09a0b7a… leo 2
0981a08… noreply 3 import hashlib
09a0b7a… leo 4 import json
09a0b7a… leo 5 import logging
0981a08… noreply 6 import mimetypes
09a0b7a… leo 7 import time
09a0b7a… leo 8 from datetime import datetime
09a0b7a… leo 9 from pathlib import Path
09a0b7a… leo 10 from typing import Optional
09a0b7a… leo 11
287a3bb… leo 12 from tqdm import tqdm
287a3bb… leo 13
09a0b7a… leo 14 from video_processor.analyzers.diagram_analyzer import DiagramAnalyzer
09a0b7a… leo 15 from video_processor.extractors.audio_extractor import AudioExtractor
829e24a… leo 16 from video_processor.extractors.frame_extractor import (
829e24a… leo 17 extract_frames,
829e24a… leo 18 filter_people_frames,
829e24a… leo 19 save_frames,
829e24a… leo 20 )
09a0b7a… leo 21 from video_processor.integrators.knowledge_graph import KnowledgeGraph
09a0b7a… leo 22 from video_processor.integrators.plan_generator import PlanGenerator
09a0b7a… leo 23 from video_processor.models import (
09a0b7a… leo 24 ActionItem,
09a0b7a… leo 25 KeyPoint,
09a0b7a… leo 26 ProcessingStats,
0981a08… noreply 27 ProgressCallback,
09a0b7a… leo 28 VideoManifest,
09a0b7a… leo 29 VideoMetadata,
09a0b7a… leo 30 )
09a0b7a… leo 31 from video_processor.output_structure import create_video_output_dirs, write_video_manifest
09a0b7a… leo 32 from video_processor.providers.manager import ProviderManager
09a0b7a… leo 33 from video_processor.utils.export import export_all_formats
09a0b7a… leo 34
09a0b7a… leo 35 logger = logging.getLogger(__name__)
0981a08… noreply 36
0981a08… noreply 37
0981a08… noreply 38 def _notify(cb: Optional[ProgressCallback], method: str, *args, **kwargs) -> None:
0981a08… noreply 39 """Safely invoke a callback method, logging any errors."""
0981a08… noreply 40 if cb is None:
0981a08… noreply 41 return
0981a08… noreply 42 try:
0981a08… noreply 43 getattr(cb, method)(*args, **kwargs)
0981a08… noreply 44 except Exception as e:
0981a08… noreply 45 logger.warning(f"Progress callback {method} failed: {e}")
287a3bb… leo 46
287a3bb… leo 47
09a0b7a… leo 48 def process_single_video(
09a0b7a… leo 49 input_path: str | Path,
09a0b7a… leo 50 output_dir: str | Path,
09a0b7a… leo 51 provider_manager: Optional[ProviderManager] = None,
09a0b7a… leo 52 depth: str = "standard",
09a0b7a… leo 53 focus_areas: Optional[list[str]] = None,
09a0b7a… leo 54 sampling_rate: float = 0.5,
09a0b7a… leo 55 change_threshold: float = 0.15,
287a3bb… leo 56 periodic_capture_seconds: float = 30.0,
09a0b7a… leo 57 use_gpu: bool = False,
09a0b7a… leo 58 title: Optional[str] = None,
0981a08… noreply 59 progress_callback: Optional[ProgressCallback] = None,
0981a08… noreply 60 speaker_hints: Optional[list[str]] = None,
09a0b7a… leo 61 ) -> VideoManifest:
09a0b7a… leo 62 """
09a0b7a… leo 63 Full pipeline: frames -> audio -> transcription -> diagrams -> KG -> report -> export.
09a0b7a… leo 64
09a0b7a… leo 65 Returns a populated VideoManifest.
09a0b7a… leo 66 """
09a0b7a… leo 67 start_time = time.time()
09a0b7a… leo 68 input_path = Path(input_path)
09a0b7a… leo 69 output_dir = Path(output_dir)
09a0b7a… leo 70 pm = provider_manager or ProviderManager()
09a0b7a… leo 71 focus_areas = focus_areas or []
09a0b7a… leo 72
09a0b7a… leo 73 video_name = input_path.stem
09a0b7a… leo 74 if not title:
09a0b7a… leo 75 title = f"Analysis of {video_name}"
09a0b7a… leo 76
09a0b7a… leo 77 # Create standardized directory structure
09a0b7a… leo 78 dirs = create_video_output_dirs(output_dir, video_name)
09a0b7a… leo 79
09a0b7a… leo 80 logger.info(f"Processing: {input_path}")
09a0b7a… leo 81 logger.info(f"Depth: {depth}, Focus: {focus_areas or 'all'}")
09a0b7a… leo 82
287a3bb… leo 83 steps = [
287a3bb… leo 84 "Extract frames",
287a3bb… leo 85 "Extract audio",
287a3bb… leo 86 "Transcribe",
287a3bb… leo 87 "Analyze visuals",
287a3bb… leo 88 "Build knowledge graph",
287a3bb… leo 89 "Extract key points",
287a3bb… leo 90 "Generate report",
287a3bb… leo 91 "Export formats",
287a3bb… leo 92 ]
287a3bb… leo 93 pipeline_bar = tqdm(steps, desc="Pipeline", unit="step", position=0)
287a3bb… leo 94
0981a08… noreply 95 total_steps = len(steps)
0981a08… noreply 96
09a0b7a… leo 97 # --- Step 1: Extract frames ---
0981a08… noreply 98 _notify(progress_callback, "on_step_start", steps[0], 1, total_steps)
287a3bb… leo 99 pm.usage.start_step("Frame extraction")
287a3bb… leo 100 pipeline_bar.set_description("Pipeline: extracting frames")
287a3bb… leo 101 existing_frames = sorted(dirs["frames"].glob("frame_*.jpg"))
287a3bb… leo 102 people_removed = 0
287a3bb… leo 103 if existing_frames:
287a3bb… leo 104 frame_paths = existing_frames
287a3bb… leo 105 logger.info(f"Resuming: found {len(frame_paths)} frames on disk, skipping extraction")
287a3bb… leo 106 else:
287a3bb… leo 107 logger.info("Extracting video frames...")
287a3bb… leo 108 frames = extract_frames(
287a3bb… leo 109 input_path,
287a3bb… leo 110 sampling_rate=sampling_rate,
287a3bb… leo 111 change_threshold=change_threshold,
287a3bb… leo 112 periodic_capture_seconds=periodic_capture_seconds,
287a3bb… leo 113 disable_gpu=not use_gpu,
287a3bb… leo 114 )
287a3bb… leo 115 logger.info(f"Extracted {len(frames)} raw frames")
287a3bb… leo 116
287a3bb… leo 117 # Filter out people/webcam frames before saving
287a3bb… leo 118 frames, people_removed = filter_people_frames(frames)
287a3bb… leo 119 frame_paths = save_frames(frames, dirs["frames"], "frame")
287a3bb… leo 120 logger.info(f"Saved {len(frames)} content frames ({people_removed} people frames filtered)")
287a3bb… leo 121 pipeline_bar.update(1)
0981a08… noreply 122 _notify(progress_callback, "on_step_complete", steps[0], 1, total_steps)
09a0b7a… leo 123
09a0b7a… leo 124 # --- Step 2: Extract audio ---
0981a08… noreply 125 _notify(progress_callback, "on_step_start", steps[1], 2, total_steps)
287a3bb… leo 126 pm.usage.start_step("Audio extraction")
287a3bb… leo 127 pipeline_bar.set_description("Pipeline: extracting audio")
287a3bb… leo 128 audio_path = dirs["root"] / "audio" / f"{video_name}.wav"
09a0b7a… leo 129 audio_extractor = AudioExtractor()
287a3bb… leo 130 if audio_path.exists():
287a3bb… leo 131 logger.info(f"Resuming: found audio at {audio_path}, skipping extraction")
287a3bb… leo 132 else:
287a3bb… leo 133 logger.info("Extracting audio...")
287a3bb… leo 134 audio_path = audio_extractor.extract_audio(input_path, output_path=audio_path)
09a0b7a… leo 135 audio_props = audio_extractor.get_audio_properties(audio_path)
287a3bb… leo 136 pipeline_bar.update(1)
0981a08… noreply 137 _notify(progress_callback, "on_step_complete", steps[1], 2, total_steps)
09a0b7a… leo 138
09a0b7a… leo 139 # --- Step 3: Transcribe ---
0981a08… noreply 140 _notify(progress_callback, "on_step_start", steps[2], 3, total_steps)
287a3bb… leo 141 pm.usage.start_step("Transcription")
287a3bb… leo 142 pipeline_bar.set_description("Pipeline: transcribing audio")
09a0b7a… leo 143 transcript_json = dirs["transcript"] / "transcript.json"
287a3bb… leo 144 if transcript_json.exists():
287a3bb… leo 145 logger.info("Resuming: found transcript on disk, skipping transcription")
287a3bb… leo 146 transcript_data = json.loads(transcript_json.read_text())
287a3bb… leo 147 transcript_text = transcript_data.get("text", "")
287a3bb… leo 148 segments = transcript_data.get("segments", [])
287a3bb… leo 149 else:
287a3bb… leo 150 logger.info("Transcribing audio...")
0981a08… noreply 151 transcription = pm.transcribe_audio(audio_path, speaker_hints=speaker_hints)
287a3bb… leo 152 transcript_text = transcription.get("text", "")
287a3bb… leo 153 segments = transcription.get("segments", [])
287a3bb… leo 154
287a3bb… leo 155 # Save transcript files
287a3bb… leo 156 transcript_data = {
287a3bb… leo 157 "text": transcript_text,
287a3bb… leo 158 "segments": segments,
287a3bb… leo 159 "duration": transcription.get("duration") or audio_props.get("duration"),
287a3bb… leo 160 "language": transcription.get("language"),
287a3bb… leo 161 "provider": transcription.get("provider"),
287a3bb… leo 162 "model": transcription.get("model"),
287a3bb… leo 163 }
287a3bb… leo 164 transcript_json.write_text(json.dumps(transcript_data, indent=2))
287a3bb… leo 165
287a3bb… leo 166 transcript_txt = dirs["transcript"] / "transcript.txt"
287a3bb… leo 167 transcript_txt.write_text(transcript_text)
287a3bb… leo 168
287a3bb… leo 169 # SRT
287a3bb… leo 170 transcript_srt = dirs["transcript"] / "transcript.srt"
287a3bb… leo 171 srt_lines = []
287a3bb… leo 172 for i, seg in enumerate(segments):
287a3bb… leo 173 start = seg.get("start", 0)
287a3bb… leo 174 end = seg.get("end", 0)
287a3bb… leo 175 srt_lines.append(str(i + 1))
829e24a… leo 176 srt_lines.append(f"{_format_srt_time(start)} --> {_format_srt_time(end)}")
287a3bb… leo 177 srt_lines.append(seg.get("text", "").strip())
287a3bb… leo 178 srt_lines.append("")
287a3bb… leo 179 transcript_srt.write_text("\n".join(srt_lines))
287a3bb… leo 180 pipeline_bar.update(1)
0981a08… noreply 181 _notify(progress_callback, "on_step_complete", steps[2], 3, total_steps)
09a0b7a… leo 182
09a0b7a… leo 183 # --- Step 4: Diagram extraction ---
0981a08… noreply 184 _notify(progress_callback, "on_step_start", steps[3], 4, total_steps)
287a3bb… leo 185 pm.usage.start_step("Visual analysis")
287a3bb… leo 186 pipeline_bar.set_description("Pipeline: analyzing visuals")
09a0b7a… leo 187 diagrams = []
09a0b7a… leo 188 screen_captures = []
829e24a… leo 189 existing_diagrams = (
829e24a… leo 190 sorted(dirs["diagrams"].glob("diagram_*.json")) if dirs["diagrams"].exists() else []
829e24a… leo 191 )
287a3bb… leo 192 if existing_diagrams:
287a3bb… leo 193 logger.info(f"Resuming: found {len(existing_diagrams)} diagrams on disk, skipping analysis")
287a3bb… leo 194 from video_processor.models import DiagramResult
829e24a… leo 195
287a3bb… leo 196 for dj in existing_diagrams:
287a3bb… leo 197 try:
287a3bb… leo 198 diagrams.append(DiagramResult.model_validate_json(dj.read_text()))
287a3bb… leo 199 except Exception as e:
287a3bb… leo 200 logger.warning(f"Failed to load diagram {dj}: {e}")
287a3bb… leo 201 elif depth != "basic" and (not focus_areas or "diagrams" in focus_areas):
09a0b7a… leo 202 logger.info("Analyzing visual elements...")
09a0b7a… leo 203 analyzer = DiagramAnalyzer(provider_manager=pm)
09a0b7a… leo 204 max_frames = 10 if depth == "standard" else 20
287a3bb… leo 205 # Evenly sample across all frames rather than just taking the first N
287a3bb… leo 206 if len(frame_paths) <= max_frames:
287a3bb… leo 207 subset = frame_paths
287a3bb… leo 208 else:
287a3bb… leo 209 step = len(frame_paths) / max_frames
287a3bb… leo 210 subset = [frame_paths[int(i * step)] for i in range(max_frames)]
09a0b7a… leo 211 diagrams, screen_captures = analyzer.process_frames(
09a0b7a… leo 212 subset, diagrams_dir=dirs["diagrams"], captures_dir=dirs["captures"]
09a0b7a… leo 213 )
287a3bb… leo 214 pipeline_bar.update(1)
0981a08… noreply 215 _notify(progress_callback, "on_step_complete", steps[3], 4, total_steps)
09a0b7a… leo 216
09a0b7a… leo 217 # --- Step 5: Knowledge graph ---
0981a08… noreply 218 _notify(progress_callback, "on_step_start", steps[4], 5, total_steps)
287a3bb… leo 219 pm.usage.start_step("Knowledge graph")
287a3bb… leo 220 pipeline_bar.set_description("Pipeline: building knowledge graph")
0ad36b7… noreply 221 kg_db_path = dirs["results"] / "knowledge_graph.db"
0981a08… noreply 222 kg_json_path = dirs["results"] / "knowledge_graph.json"
0981a08… noreply 223 # Generate a stable source ID from the input path
0981a08… noreply 224 source_id = hashlib.sha256(str(input_path).encode()).hexdigest()[:12]
0981a08… noreply 225 mime_type = mimetypes.guess_type(str(input_path))[0] or "video/mp4"
0981a08… noreply 226
0981a08… noreply 227 if kg_db_path.exists():
287a3bb… leo 228 logger.info("Resuming: found knowledge graph on disk, loading")
0981a08… noreply 229 kg = KnowledgeGraph(provider_manager=pm, db_path=kg_db_path)
287a3bb… leo 230 else:
287a3bb… leo 231 logger.info("Building knowledge graph...")
0ad36b7… noreply 232 kg = KnowledgeGraph(provider_manager=pm, db_path=kg_db_path)
0981a08… noreply 233 kg.register_source(
0981a08… noreply 234 {
0981a08… noreply 235 "source_id": source_id,
0981a08… noreply 236 "source_type": "video",
0981a08… noreply 237 "title": title,
0981a08… noreply 238 "path": str(input_path),
0981a08… noreply 239 "mime_type": mime_type,
0981a08… noreply 240 "ingested_at": datetime.now().isoformat(),
0981a08… noreply 241 "metadata": {"duration_seconds": audio_props.get("duration")},
0981a08… noreply 242 }
0981a08… noreply 243 )
287a3bb… leo 244 kg.process_transcript(transcript_data)
287a3bb… leo 245 if diagrams:
287a3bb… leo 246 diagram_dicts = [d.model_dump() for d in diagrams]
287a3bb… leo 247 kg.process_diagrams(diagram_dicts)
2a1b11a… noreply 248 if screen_captures:
2a1b11a… noreply 249 capture_dicts = [sc.model_dump() for sc in screen_captures]
2a1b11a… noreply 250 kg.process_screenshots(capture_dicts)
0981a08… noreply 251 # Export JSON copy alongside the SQLite db
0981a08… noreply 252 kg.save(kg_json_path)
287a3bb… leo 253 pipeline_bar.update(1)
0981a08… noreply 254 _notify(progress_callback, "on_step_complete", steps[4], 5, total_steps)
09a0b7a… leo 255
09a0b7a… leo 256 # --- Step 6: Extract key points & action items ---
0981a08… noreply 257 _notify(progress_callback, "on_step_start", steps[5], 6, total_steps)
287a3bb… leo 258 pm.usage.start_step("Key points & actions")
287a3bb… leo 259 pipeline_bar.set_description("Pipeline: extracting key points")
09a0b7a… leo 260 kp_path = dirs["results"] / "key_points.json"
09a0b7a… leo 261 ai_path = dirs["results"] / "action_items.json"
287a3bb… leo 262 if kp_path.exists() and ai_path.exists():
287a3bb… leo 263 logger.info("Resuming: found key points and action items on disk")
829e24a… leo 264 key_points = [KeyPoint(**item) for item in json.loads(kp_path.read_text())]
829e24a… leo 265 action_items = [ActionItem(**item) for item in json.loads(ai_path.read_text())]
287a3bb… leo 266 else:
287a3bb… leo 267 key_points = _extract_key_points(pm, transcript_text)
287a3bb… leo 268 action_items = _extract_action_items(pm, transcript_text)
287a3bb… leo 269
287a3bb… leo 270 kp_path.write_text(json.dumps([kp.model_dump() for kp in key_points], indent=2))
287a3bb… leo 271 ai_path.write_text(json.dumps([ai.model_dump() for ai in action_items], indent=2))
287a3bb… leo 272 pipeline_bar.update(1)
0981a08… noreply 273 _notify(progress_callback, "on_step_complete", steps[5], 6, total_steps)
09a0b7a… leo 274
09a0b7a… leo 275 # --- Step 7: Generate markdown report ---
0981a08… noreply 276 _notify(progress_callback, "on_step_start", steps[6], 7, total_steps)
287a3bb… leo 277 pm.usage.start_step("Report generation")
287a3bb… leo 278 pipeline_bar.set_description("Pipeline: generating report")
09a0b7a… leo 279 md_path = dirs["results"] / "analysis.md"
287a3bb… leo 280 if md_path.exists():
287a3bb… leo 281 logger.info("Resuming: found analysis report on disk, skipping generation")
287a3bb… leo 282 else:
287a3bb… leo 283 logger.info("Generating report...")
287a3bb… leo 284 plan_gen = PlanGenerator(provider_manager=pm, knowledge_graph=kg)
287a3bb… leo 285 plan_gen.generate_markdown(
287a3bb… leo 286 transcript=transcript_data,
287a3bb… leo 287 key_points=[kp.model_dump() for kp in key_points],
287a3bb… leo 288 diagrams=[d.model_dump() for d in diagrams],
287a3bb… leo 289 knowledge_graph=kg.to_dict(),
287a3bb… leo 290 video_title=title,
287a3bb… leo 291 output_path=md_path,
287a3bb… leo 292 )
287a3bb… leo 293 pipeline_bar.update(1)
0981a08… noreply 294 _notify(progress_callback, "on_step_complete", steps[6], 7, total_steps)
09a0b7a… leo 295
09a0b7a… leo 296 # --- Build manifest ---
09a0b7a… leo 297 elapsed = time.time() - start_time
09a0b7a… leo 298 manifest = VideoManifest(
09a0b7a… leo 299 video=VideoMetadata(
09a0b7a… leo 300 title=title,
09a0b7a… leo 301 source_path=str(input_path),
09a0b7a… leo 302 duration_seconds=audio_props.get("duration"),
09a0b7a… leo 303 ),
09a0b7a… leo 304 stats=ProcessingStats(
09a0b7a… leo 305 start_time=datetime.now().isoformat(),
09a0b7a… leo 306 duration_seconds=elapsed,
287a3bb… leo 307 frames_extracted=len(frame_paths),
287a3bb… leo 308 people_frames_filtered=people_removed,
09a0b7a… leo 309 diagrams_detected=len(diagrams),
09a0b7a… leo 310 screen_captures=len(screen_captures),
09a0b7a… leo 311 transcript_duration_seconds=audio_props.get("duration"),
09a0b7a… leo 312 models_used=pm.get_models_used(),
09a0b7a… leo 313 ),
09a0b7a… leo 314 transcript_json="transcript/transcript.json",
09a0b7a… leo 315 transcript_txt="transcript/transcript.txt",
09a0b7a… leo 316 transcript_srt="transcript/transcript.srt",
09a0b7a… leo 317 analysis_md="results/analysis.md",
09a0b7a… leo 318 knowledge_graph_json="results/knowledge_graph.json",
0ad36b7… noreply 319 knowledge_graph_db="results/knowledge_graph.db",
09a0b7a… leo 320 key_points_json="results/key_points.json",
09a0b7a… leo 321 action_items_json="results/action_items.json",
09a0b7a… leo 322 key_points=key_points,
09a0b7a… leo 323 action_items=action_items,
09a0b7a… leo 324 diagrams=diagrams,
09a0b7a… leo 325 screen_captures=screen_captures,
09a0b7a… leo 326 frame_paths=[f"frames/{Path(p).name}" for p in frame_paths],
09a0b7a… leo 327 )
09a0b7a… leo 328
09a0b7a… leo 329 # --- Step 8: Export all formats ---
0981a08… noreply 330 _notify(progress_callback, "on_step_start", steps[7], 8, total_steps)
287a3bb… leo 331 pm.usage.start_step("Export formats")
287a3bb… leo 332 pipeline_bar.set_description("Pipeline: exporting formats")
09a0b7a… leo 333 manifest = export_all_formats(output_dir, manifest)
287a3bb… leo 334
287a3bb… leo 335 pm.usage.end_step()
287a3bb… leo 336 pipeline_bar.update(1)
0981a08… noreply 337 _notify(progress_callback, "on_step_complete", steps[7], 8, total_steps)
287a3bb… leo 338 pipeline_bar.set_description("Pipeline: complete")
287a3bb… leo 339 pipeline_bar.close()
09a0b7a… leo 340
09a0b7a… leo 341 # Write manifest
09a0b7a… leo 342 write_video_manifest(manifest, output_dir)
09a0b7a… leo 343
829e24a… leo 344 logger.info(
829e24a… leo 345 f"Processing complete in {elapsed:.1f}s: {len(diagrams)} diagrams, "
829e24a… leo 346 f"{len(screen_captures)} captures, {len(key_points)} key points, "
829e24a… leo 347 f"{len(action_items)} action items"
829e24a… leo 348 )
09a0b7a… leo 349
09a0b7a… leo 350 return manifest
09a0b7a… leo 351
09a0b7a… leo 352
09a0b7a… leo 353 def _extract_key_points(pm: ProviderManager, text: str) -> list[KeyPoint]:
09a0b7a… leo 354 """Extract key points via LLM."""
09a0b7a… leo 355 from video_processor.utils.json_parsing import parse_json_from_response
09a0b7a… leo 356
09a0b7a… leo 357 prompt = (
09a0b7a… leo 358 "Extract the key points from this transcript.\n\n"
09a0b7a… leo 359 f"TRANSCRIPT:\n{text[:8000]}\n\n"
09a0b7a… leo 360 'Return a JSON array: [{"point": "...", "topic": "...", "details": "..."}]\n'
09a0b7a… leo 361 "Return ONLY the JSON array."
09a0b7a… leo 362 )
09a0b7a… leo 363 try:
09a0b7a… leo 364 raw = pm.chat([{"role": "user", "content": prompt}], temperature=0.3)
09a0b7a… leo 365 parsed = parse_json_from_response(raw)
09a0b7a… leo 366 if isinstance(parsed, list):
09a0b7a… leo 367 return [
09a0b7a… leo 368 KeyPoint(
09a0b7a… leo 369 point=item.get("point", ""),
09a0b7a… leo 370 topic=item.get("topic"),
09a0b7a… leo 371 details=item.get("details"),
09a0b7a… leo 372 )
09a0b7a… leo 373 for item in parsed
09a0b7a… leo 374 if isinstance(item, dict) and item.get("point")
09a0b7a… leo 375 ]
09a0b7a… leo 376 except Exception as e:
09a0b7a… leo 377 logger.warning(f"Key point extraction failed: {e}")
09a0b7a… leo 378 return []
09a0b7a… leo 379
09a0b7a… leo 380
09a0b7a… leo 381 def _extract_action_items(pm: ProviderManager, text: str) -> list[ActionItem]:
09a0b7a… leo 382 """Extract action items via LLM."""
09a0b7a… leo 383 from video_processor.utils.json_parsing import parse_json_from_response
09a0b7a… leo 384
09a0b7a… leo 385 prompt = (
09a0b7a… leo 386 "Extract all action items from this transcript.\n\n"
09a0b7a… leo 387 f"TRANSCRIPT:\n{text[:8000]}\n\n"
09a0b7a… leo 388 'Return a JSON array: [{"action": "...", "assignee": "...", "deadline": "...", '
09a0b7a… leo 389 '"priority": "...", "context": "..."}]\n'
09a0b7a… leo 390 "Return ONLY the JSON array."
09a0b7a… leo 391 )
09a0b7a… leo 392 try:
09a0b7a… leo 393 raw = pm.chat([{"role": "user", "content": prompt}], temperature=0.3)
09a0b7a… leo 394 parsed = parse_json_from_response(raw)
09a0b7a… leo 395 if isinstance(parsed, list):
09a0b7a… leo 396 return [
09a0b7a… leo 397 ActionItem(
09a0b7a… leo 398 action=item.get("action", ""),
09a0b7a… leo 399 assignee=item.get("assignee"),
09a0b7a… leo 400 deadline=item.get("deadline"),
09a0b7a… leo 401 priority=item.get("priority"),
09a0b7a… leo 402 context=item.get("context"),
09a0b7a… leo 403 )
09a0b7a… leo 404 for item in parsed
09a0b7a… leo 405 if isinstance(item, dict) and item.get("action")
09a0b7a… leo 406 ]
09a0b7a… leo 407 except Exception as e:
09a0b7a… leo 408 logger.warning(f"Action item extraction failed: {e}")
09a0b7a… leo 409 return []
09a0b7a… leo 410
09a0b7a… leo 411
09a0b7a… leo 412 def _format_srt_time(seconds: float) -> str:
09a0b7a… leo 413 """Format seconds as SRT timestamp (HH:MM:SS,mmm)."""
09a0b7a… leo 414 h = int(seconds // 3600)
09a0b7a… leo 415 m = int((seconds % 3600) // 60)
09a0b7a… leo 416 s = int(seconds % 60)
09a0b7a… leo 417 ms = int((seconds % 1) * 1000)
09a0b7a… leo 418 return f"{h:02d}:{m:02d}:{s:02d},{ms:03d}"

Keyboard Shortcuts

Open search /
Next entry (timeline) j
Previous entry (timeline) k
Open focused entry Enter
Show this help ?
Toggle theme Top nav button