PlanOpticon

test: add output formatter, callbacks, sources, API spec, and agent skill tests Coverage: 52% -> 61% (647 tests, 0 failures) Closes #92, #93, #94, #95, #96, #97, #98

lmata 2026-03-07 22:28 trunk
Commit d0f709185b6ff355092e9c52dda62aa5c165bbf2d63663975aee529d889aea14
--- a/tests/test_agent_skills.py
+++ b/tests/test_agent_skills.py
@@ -0,0 +1,405 @@
1
+"""Tests for agent skill execute() methods with mocked context."""
2
+
3
+import json
4
+from dataclasses import dataclass
5
+from unittest.mock import MagicMock
6
+
7
+import pytest
8
+
9
+from video_processor.agent.skills.base import (
10
+ AgentContext,
11
+ Artifact,
12
+ _skills,
13
+)
14
+
15
+# ---------------------------------------------------------------------------
16
+# Fixtures
17
+# ---------------------------------------------------------------------------
18
+
19
+
20
+@pytest.fixture(autouse=True)
21
+def _clean_skill_registry():
22
+ """Save and restore the global skill registry between tests."""
23
+ original = dict(_skills)
24
+ yield
25
+ _skills.clear()
26
+ _skills.update(original)
27
+
28
+
29
+@dataclass
30
+class FakeEntity:
31
+ name: str
32
+ type: str
33
+
34
+ def __str__(self):
35
+ return self.name
36
+
37
+
38
+class FakeQueryResult:
39
+ """Mimics QueryResult.to_text()."""
40
+
41
+ def __init__(self, text="Stats: 10 entities, 5 relationships"):
42
+ self._text = text
43
+
44
+ def to_text(self):
45
+ return self._text
46
+
47
+
48
+def _make_context(
49
+ chat_response="# Generated Content\n\nSome markdown content.",
50
+ planning_entities=None,
51
+):
52
+ """Build an AgentContext with mocked query_engine and provider_manager."""
53
+ ctx = AgentContext()
54
+
55
+ qe = MagicMock()
56
+ qe.stats.return_value = FakeQueryResult("Stats: 10 entities, 5 rels")
57
+ qe.entities.return_value = FakeQueryResult("Entity1, Entity2")
58
+ qe.relationships.return_value = FakeQueryResult("Entity1 -> Entity2")
59
+ ctx.query_engine = qe
60
+
61
+ pm = MagicMock()
62
+ pm.chat.return_value = chat_response
63
+ ctx.provider_manager = pm
64
+
65
+ ctx.knowledge_graph = MagicMock()
66
+
67
+ if planning_entities is not None:
68
+ ctx.planning_entities = planning_entities
69
+ else:
70
+ ctx.planning_entities = [
71
+ FakeEntity(name="Auth system", type="feature"),
72
+ FakeEntity(name="Launch v1", type="milestone"),
73
+ FakeEntity(name="Must be fast", type="constraint"),
74
+ FakeEntity(name="Build dashboard", type="goal"),
75
+ FakeEntity(name="API depends on auth", type="dependency"),
76
+ FakeEntity(name="User login", type="requirement"),
77
+ ]
78
+
79
+ return ctx
80
+
81
+
82
+# ---------------------------------------------------------------------------
83
+# ProjectPlanSkill
84
+# ---------------------------------------------------------------------------
85
+
86
+
87
+class TestProjectPlanSkill:
88
+ def test_execute_returns_artifact(self):
89
+ from video_processor.agent.skills.project_plan import ProjectPlanSkill
90
+
91
+ skill = ProjectPlanSkill()
92
+ ctx = _make_context()
93
+ artifact = skill.execute(ctx)
94
+
95
+ assert isinstance(artifact, Artifact)
96
+ assert artifact.artifact_type == "project_plan"
97
+ assert artifact.format == "markdown"
98
+ assert len(artifact.content) > 0
99
+
100
+ def test_execute_calls_provider(self):
101
+ from video_processor.agent.skills.project_plan import ProjectPlanSkill
102
+
103
+ skill = ProjectPlanSkill()
104
+ ctx = _make_context()
105
+ skill.execute(ctx)
106
+
107
+ ctx.provider_manager.chat.assert_called_once()
108
+ call_args = ctx.provider_manager.chat.call_args
109
+ messages = call_args[1]["messages"] if "messages" in call_args[1] else call_args[0][0]
110
+ assert len(messages) == 1
111
+ assert messages[0]["role"] == "user"
112
+
113
+ def test_execute_queries_graph(self):
114
+ from video_processor.agent.skills.project_plan import ProjectPlanSkill
115
+
116
+ skill = ProjectPlanSkill()
117
+ ctx = _make_context()
118
+ skill.execute(ctx)
119
+
120
+ ctx.query_engine.stats.assert_called_once()
121
+ ctx.query_engine.entities.assert_called_once()
122
+ ctx.query_engine.relationships.assert_called_once()
123
+
124
+
125
+# ---------------------------------------------------------------------------
126
+# PRDSkill
127
+# ---------------------------------------------------------------------------
128
+
129
+
130
+class TestPRDSkill:
131
+ def test_execute_returns_artifact(self):
132
+ from video_processor.agent.skills.prd import PRDSkill
133
+
134
+ skill = PRDSkill()
135
+ ctx = _make_context()
136
+ artifact = skill.execute(ctx)
137
+
138
+ assert isinstance(artifact, Artifact)
139
+ assert artifact.artifact_type == "prd"
140
+ assert artifact.format == "markdown"
141
+
142
+ def test_execute_filters_relevant_entities(self):
143
+ from video_processor.agent.skills.prd import PRDSkill
144
+
145
+ skill = PRDSkill()
146
+ ctx = _make_context()
147
+ skill.execute(ctx)
148
+
149
+ # Should still call provider
150
+ ctx.provider_manager.chat.assert_called_once()
151
+
152
+ def test_execute_with_no_relevant_entities(self):
153
+ from video_processor.agent.skills.prd import PRDSkill
154
+
155
+ skill = PRDSkill()
156
+ ctx = _make_context(
157
+ planning_entities=[
158
+ FakeEntity(name="Some goal", type="goal"),
159
+ ]
160
+ )
161
+ artifact = skill.execute(ctx)
162
+
163
+ assert isinstance(artifact, Artifact)
164
+ assert artifact.artifact_type == "prd"
165
+
166
+
167
+# ---------------------------------------------------------------------------
168
+# RoadmapSkill
169
+# ---------------------------------------------------------------------------
170
+
171
+
172
+class TestRoadmapSkill:
173
+ def test_execute_returns_artifact(self):
174
+ from video_processor.agent.skills.roadmap import RoadmapSkill
175
+
176
+ skill = RoadmapSkill()
177
+ ctx = _make_context()
178
+ artifact = skill.execute(ctx)
179
+
180
+ assert isinstance(artifact, Artifact)
181
+ assert artifact.artifact_type == "roadmap"
182
+ assert artifact.format == "markdown"
183
+
184
+ def test_execute_with_no_relevant_entities(self):
185
+ from video_processor.agent.skills.roadmap import RoadmapSkill
186
+
187
+ skill = RoadmapSkill()
188
+ ctx = _make_context(
189
+ planning_entities=[
190
+ FakeEntity(name="Some constraint", type="constraint"),
191
+ ]
192
+ )
193
+ artifact = skill.execute(ctx)
194
+
195
+ assert isinstance(artifact, Artifact)
196
+
197
+
198
+# ---------------------------------------------------------------------------
199
+# TaskBreakdownSkill
200
+# ---------------------------------------------------------------------------
201
+
202
+
203
+class TestTaskBreakdownSkill:
204
+ def test_execute_returns_artifact_json(self):
205
+ from video_processor.agent.skills.task_breakdown import TaskBreakdownSkill
206
+
207
+ tasks_json = json.dumps(
208
+ [
209
+ {
210
+ "id": "T1",
211
+ "title": "Setup",
212
+ "description": "Init",
213
+ "depends_on": [],
214
+ "priority": "high",
215
+ "estimate": "1d",
216
+ "assignee_role": "dev",
217
+ },
218
+ ]
219
+ )
220
+ skill = TaskBreakdownSkill()
221
+ ctx = _make_context(chat_response=tasks_json)
222
+ artifact = skill.execute(ctx)
223
+
224
+ assert isinstance(artifact, Artifact)
225
+ assert artifact.artifact_type == "task_list"
226
+ assert artifact.format == "json"
227
+ assert "tasks" in artifact.metadata
228
+ assert len(artifact.metadata["tasks"]) == 1
229
+
230
+ def test_execute_with_non_json_response(self):
231
+ from video_processor.agent.skills.task_breakdown import TaskBreakdownSkill
232
+
233
+ skill = TaskBreakdownSkill()
234
+ ctx = _make_context(chat_response="Not valid JSON at all")
235
+ artifact = skill.execute(ctx)
236
+
237
+ assert isinstance(artifact, Artifact)
238
+ assert artifact.artifact_type == "task_list"
239
+
240
+ def test_execute_with_no_relevant_entities(self):
241
+ from video_processor.agent.skills.task_breakdown import TaskBreakdownSkill
242
+
243
+ tasks_json = json.dumps([])
244
+ skill = TaskBreakdownSkill()
245
+ ctx = _make_context(
246
+ chat_response=tasks_json,
247
+ planning_entities=[FakeEntity(name="X", type="constraint")],
248
+ )
249
+ artifact = skill.execute(ctx)
250
+ assert artifact.metadata["tasks"] == []
251
+
252
+
253
+# ---------------------------------------------------------------------------
254
+# DocGeneratorSkill
255
+# ---------------------------------------------------------------------------
256
+
257
+
258
+class TestDocGeneratorSkill:
259
+ def test_execute_default_type(self):
260
+ from video_processor.agent.skills.doc_generator import DocGeneratorSkill
261
+
262
+ skill = DocGeneratorSkill()
263
+ ctx = _make_context()
264
+ artifact = skill.execute(ctx)
265
+
266
+ assert isinstance(artifact, Artifact)
267
+ assert artifact.artifact_type == "document"
268
+ assert artifact.format == "markdown"
269
+ assert artifact.metadata["doc_type"] == "technical_doc"
270
+
271
+ def test_execute_adr_type(self):
272
+ from video_processor.agent.skills.doc_generator import DocGeneratorSkill
273
+
274
+ skill = DocGeneratorSkill()
275
+ ctx = _make_context()
276
+ artifact = skill.execute(ctx, doc_type="adr")
277
+
278
+ assert artifact.metadata["doc_type"] == "adr"
279
+
280
+ def test_execute_meeting_notes_type(self):
281
+ from video_processor.agent.skills.doc_generator import DocGeneratorSkill
282
+
283
+ skill = DocGeneratorSkill()
284
+ ctx = _make_context()
285
+ artifact = skill.execute(ctx, doc_type="meeting_notes")
286
+
287
+ assert artifact.metadata["doc_type"] == "meeting_notes"
288
+
289
+ def test_execute_unknown_type_falls_back(self):
290
+ from video_processor.agent.skills.doc_generator import DocGeneratorSkill
291
+
292
+ skill = DocGeneratorSkill()
293
+ ctx = _make_context()
294
+ artifact = skill.execute(ctx, doc_type="unknown_type")
295
+
296
+ # Falls back to technical_doc prompt
297
+ assert artifact.artifact_type == "document"
298
+
299
+
300
+# ---------------------------------------------------------------------------
301
+# RequirementsChatSkill
302
+# ---------------------------------------------------------------------------
303
+
304
+
305
+class TestRequirementsChatSkill:
306
+ def test_execute_returns_artifact(self):
307
+ from video_processor.agent.skills.requirements_chat import RequirementsChatSkill
308
+
309
+ questions = {
310
+ "questions": [
311
+ {"id": "Q1", "category": "goals", "question": "What?", "context": "Why"},
312
+ ]
313
+ }
314
+ skill = RequirementsChatSkill()
315
+ ctx = _make_context(chat_response=json.dumps(questions))
316
+ artifact = skill.execute(ctx)
317
+
318
+ assert isinstance(artifact, Artifact)
319
+ assert artifact.artifact_type == "requirements"
320
+ assert artifact.format == "json"
321
+ assert artifact.metadata["stage"] == "questionnaire"
322
+
323
+ def test_gather_requirements(self):
324
+ from video_processor.agent.skills.requirements_chat import RequirementsChatSkill
325
+
326
+ reqs = {
327
+ "goals": ["Build auth"],
328
+ "constraints": ["Budget < 10k"],
329
+ "priorities": ["Security"],
330
+ "scope": {"in_scope": ["Login"], "out_of_scope": ["SSO"]},
331
+ }
332
+ skill = RequirementsChatSkill()
333
+ ctx = _make_context(chat_response=json.dumps(reqs))
334
+ result = skill.gather_requirements(ctx, {"Q1": "We need auth", "Q2": "Budget is limited"})
335
+
336
+ assert isinstance(result, dict)
337
+
338
+ def test_gather_requirements_non_json_response(self):
339
+ from video_processor.agent.skills.requirements_chat import RequirementsChatSkill
340
+
341
+ skill = RequirementsChatSkill()
342
+ ctx = _make_context(chat_response="Not JSON")
343
+ result = skill.gather_requirements(ctx, {"Q1": "answer"})
344
+
345
+ assert isinstance(result, dict)
346
+
347
+
348
+# ---------------------------------------------------------------------------
349
+# Skill metadata
350
+# ---------------------------------------------------------------------------
351
+
352
+
353
+class TestSkillMetadata:
354
+ def test_project_plan_name(self):
355
+ from video_processor.agent.skills.project_plan import ProjectPlanSkill
356
+
357
+ assert ProjectPlanSkill.name == "project_plan"
358
+
359
+ def test_prd_name(self):
360
+ from video_processor.agent.skills.prd import PRDSkill
361
+
362
+ assert PRDSkill.name == "prd"
363
+
364
+ def test_roadmap_name(self):
365
+ from video_processor.agent.skills.roadmap import RoadmapSkill
366
+
367
+ assert RoadmapSkill.name == "roadmap"
368
+
369
+ def test_task_breakdown_name(self):
370
+ from video_processor.agent.skills.task_breakdown import TaskBreakdownSkill
371
+
372
+ assert TaskBreakdownSkill.name == "task_breakdown"
373
+
374
+ def test_doc_generator_name(self):
375
+ from video_processor.agent.skills.doc_generator import DocGeneratorSkill
376
+
377
+ assert DocGeneratorSkill.name == "doc_generator"
378
+
379
+ def test_requirements_chat_name(self):
380
+ from video_processor.agent.skills.requirements_chat import RequirementsChatSkill
381
+
382
+ assert RequirementsChatSkill.name == "requirements_chat"
383
+
384
+ def test_can_execute_with_context(self):
385
+ from video_processor.agent.skills.project_plan import ProjectPlanSkill
386
+
387
+ skill = ProjectPlanSkill()
388
+ ctx = _make_context()
389
+ assert skill.can_execute(ctx) is True
390
+
391
+ def test_can_execute_without_kg(self):
392
+ from video_processor.agent.skills.project_plan import ProjectPlanSkill
393
+
394
+ skill = ProjectPlanSkill()
395
+ ctx = _make_context()
396
+ ctx.knowledge_graph = None
397
+ assert skill.can_execute(ctx) is False
398
+
399
+ def test_can_execute_without_provider(self):
400
+ from video_processor.agent.skills.project_plan import ProjectPlanSkill
401
+
402
+ skill = ProjectPlanSkill()
403
+ ctx = _make_context()
404
+ ctx.provider_manager = None
405
+ assert skill.can_execute(ctx) is False
--- a/tests/test_agent_skills.py
+++ b/tests/test_agent_skills.py
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
--- a/tests/test_agent_skills.py
+++ b/tests/test_agent_skills.py
@@ -0,0 +1,405 @@
1 """Tests for agent skill execute() methods with mocked context."""
2
3 import json
4 from dataclasses import dataclass
5 from unittest.mock import MagicMock
6
7 import pytest
8
9 from video_processor.agent.skills.base import (
10 AgentContext,
11 Artifact,
12 _skills,
13 )
14
15 # ---------------------------------------------------------------------------
16 # Fixtures
17 # ---------------------------------------------------------------------------
18
19
20 @pytest.fixture(autouse=True)
21 def _clean_skill_registry():
22 """Save and restore the global skill registry between tests."""
23 original = dict(_skills)
24 yield
25 _skills.clear()
26 _skills.update(original)
27
28
29 @dataclass
30 class FakeEntity:
31 name: str
32 type: str
33
34 def __str__(self):
35 return self.name
36
37
38 class FakeQueryResult:
39 """Mimics QueryResult.to_text()."""
40
41 def __init__(self, text="Stats: 10 entities, 5 relationships"):
42 self._text = text
43
44 def to_text(self):
45 return self._text
46
47
48 def _make_context(
49 chat_response="# Generated Content\n\nSome markdown content.",
50 planning_entities=None,
51 ):
52 """Build an AgentContext with mocked query_engine and provider_manager."""
53 ctx = AgentContext()
54
55 qe = MagicMock()
56 qe.stats.return_value = FakeQueryResult("Stats: 10 entities, 5 rels")
57 qe.entities.return_value = FakeQueryResult("Entity1, Entity2")
58 qe.relationships.return_value = FakeQueryResult("Entity1 -> Entity2")
59 ctx.query_engine = qe
60
61 pm = MagicMock()
62 pm.chat.return_value = chat_response
63 ctx.provider_manager = pm
64
65 ctx.knowledge_graph = MagicMock()
66
67 if planning_entities is not None:
68 ctx.planning_entities = planning_entities
69 else:
70 ctx.planning_entities = [
71 FakeEntity(name="Auth system", type="feature"),
72 FakeEntity(name="Launch v1", type="milestone"),
73 FakeEntity(name="Must be fast", type="constraint"),
74 FakeEntity(name="Build dashboard", type="goal"),
75 FakeEntity(name="API depends on auth", type="dependency"),
76 FakeEntity(name="User login", type="requirement"),
77 ]
78
79 return ctx
80
81
82 # ---------------------------------------------------------------------------
83 # ProjectPlanSkill
84 # ---------------------------------------------------------------------------
85
86
87 class TestProjectPlanSkill:
88 def test_execute_returns_artifact(self):
89 from video_processor.agent.skills.project_plan import ProjectPlanSkill
90
91 skill = ProjectPlanSkill()
92 ctx = _make_context()
93 artifact = skill.execute(ctx)
94
95 assert isinstance(artifact, Artifact)
96 assert artifact.artifact_type == "project_plan"
97 assert artifact.format == "markdown"
98 assert len(artifact.content) > 0
99
100 def test_execute_calls_provider(self):
101 from video_processor.agent.skills.project_plan import ProjectPlanSkill
102
103 skill = ProjectPlanSkill()
104 ctx = _make_context()
105 skill.execute(ctx)
106
107 ctx.provider_manager.chat.assert_called_once()
108 call_args = ctx.provider_manager.chat.call_args
109 messages = call_args[1]["messages"] if "messages" in call_args[1] else call_args[0][0]
110 assert len(messages) == 1
111 assert messages[0]["role"] == "user"
112
113 def test_execute_queries_graph(self):
114 from video_processor.agent.skills.project_plan import ProjectPlanSkill
115
116 skill = ProjectPlanSkill()
117 ctx = _make_context()
118 skill.execute(ctx)
119
120 ctx.query_engine.stats.assert_called_once()
121 ctx.query_engine.entities.assert_called_once()
122 ctx.query_engine.relationships.assert_called_once()
123
124
125 # ---------------------------------------------------------------------------
126 # PRDSkill
127 # ---------------------------------------------------------------------------
128
129
130 class TestPRDSkill:
131 def test_execute_returns_artifact(self):
132 from video_processor.agent.skills.prd import PRDSkill
133
134 skill = PRDSkill()
135 ctx = _make_context()
136 artifact = skill.execute(ctx)
137
138 assert isinstance(artifact, Artifact)
139 assert artifact.artifact_type == "prd"
140 assert artifact.format == "markdown"
141
142 def test_execute_filters_relevant_entities(self):
143 from video_processor.agent.skills.prd import PRDSkill
144
145 skill = PRDSkill()
146 ctx = _make_context()
147 skill.execute(ctx)
148
149 # Should still call provider
150 ctx.provider_manager.chat.assert_called_once()
151
152 def test_execute_with_no_relevant_entities(self):
153 from video_processor.agent.skills.prd import PRDSkill
154
155 skill = PRDSkill()
156 ctx = _make_context(
157 planning_entities=[
158 FakeEntity(name="Some goal", type="goal"),
159 ]
160 )
161 artifact = skill.execute(ctx)
162
163 assert isinstance(artifact, Artifact)
164 assert artifact.artifact_type == "prd"
165
166
167 # ---------------------------------------------------------------------------
168 # RoadmapSkill
169 # ---------------------------------------------------------------------------
170
171
172 class TestRoadmapSkill:
173 def test_execute_returns_artifact(self):
174 from video_processor.agent.skills.roadmap import RoadmapSkill
175
176 skill = RoadmapSkill()
177 ctx = _make_context()
178 artifact = skill.execute(ctx)
179
180 assert isinstance(artifact, Artifact)
181 assert artifact.artifact_type == "roadmap"
182 assert artifact.format == "markdown"
183
184 def test_execute_with_no_relevant_entities(self):
185 from video_processor.agent.skills.roadmap import RoadmapSkill
186
187 skill = RoadmapSkill()
188 ctx = _make_context(
189 planning_entities=[
190 FakeEntity(name="Some constraint", type="constraint"),
191 ]
192 )
193 artifact = skill.execute(ctx)
194
195 assert isinstance(artifact, Artifact)
196
197
198 # ---------------------------------------------------------------------------
199 # TaskBreakdownSkill
200 # ---------------------------------------------------------------------------
201
202
203 class TestTaskBreakdownSkill:
204 def test_execute_returns_artifact_json(self):
205 from video_processor.agent.skills.task_breakdown import TaskBreakdownSkill
206
207 tasks_json = json.dumps(
208 [
209 {
210 "id": "T1",
211 "title": "Setup",
212 "description": "Init",
213 "depends_on": [],
214 "priority": "high",
215 "estimate": "1d",
216 "assignee_role": "dev",
217 },
218 ]
219 )
220 skill = TaskBreakdownSkill()
221 ctx = _make_context(chat_response=tasks_json)
222 artifact = skill.execute(ctx)
223
224 assert isinstance(artifact, Artifact)
225 assert artifact.artifact_type == "task_list"
226 assert artifact.format == "json"
227 assert "tasks" in artifact.metadata
228 assert len(artifact.metadata["tasks"]) == 1
229
230 def test_execute_with_non_json_response(self):
231 from video_processor.agent.skills.task_breakdown import TaskBreakdownSkill
232
233 skill = TaskBreakdownSkill()
234 ctx = _make_context(chat_response="Not valid JSON at all")
235 artifact = skill.execute(ctx)
236
237 assert isinstance(artifact, Artifact)
238 assert artifact.artifact_type == "task_list"
239
240 def test_execute_with_no_relevant_entities(self):
241 from video_processor.agent.skills.task_breakdown import TaskBreakdownSkill
242
243 tasks_json = json.dumps([])
244 skill = TaskBreakdownSkill()
245 ctx = _make_context(
246 chat_response=tasks_json,
247 planning_entities=[FakeEntity(name="X", type="constraint")],
248 )
249 artifact = skill.execute(ctx)
250 assert artifact.metadata["tasks"] == []
251
252
253 # ---------------------------------------------------------------------------
254 # DocGeneratorSkill
255 # ---------------------------------------------------------------------------
256
257
258 class TestDocGeneratorSkill:
259 def test_execute_default_type(self):
260 from video_processor.agent.skills.doc_generator import DocGeneratorSkill
261
262 skill = DocGeneratorSkill()
263 ctx = _make_context()
264 artifact = skill.execute(ctx)
265
266 assert isinstance(artifact, Artifact)
267 assert artifact.artifact_type == "document"
268 assert artifact.format == "markdown"
269 assert artifact.metadata["doc_type"] == "technical_doc"
270
271 def test_execute_adr_type(self):
272 from video_processor.agent.skills.doc_generator import DocGeneratorSkill
273
274 skill = DocGeneratorSkill()
275 ctx = _make_context()
276 artifact = skill.execute(ctx, doc_type="adr")
277
278 assert artifact.metadata["doc_type"] == "adr"
279
280 def test_execute_meeting_notes_type(self):
281 from video_processor.agent.skills.doc_generator import DocGeneratorSkill
282
283 skill = DocGeneratorSkill()
284 ctx = _make_context()
285 artifact = skill.execute(ctx, doc_type="meeting_notes")
286
287 assert artifact.metadata["doc_type"] == "meeting_notes"
288
289 def test_execute_unknown_type_falls_back(self):
290 from video_processor.agent.skills.doc_generator import DocGeneratorSkill
291
292 skill = DocGeneratorSkill()
293 ctx = _make_context()
294 artifact = skill.execute(ctx, doc_type="unknown_type")
295
296 # Falls back to technical_doc prompt
297 assert artifact.artifact_type == "document"
298
299
300 # ---------------------------------------------------------------------------
301 # RequirementsChatSkill
302 # ---------------------------------------------------------------------------
303
304
305 class TestRequirementsChatSkill:
306 def test_execute_returns_artifact(self):
307 from video_processor.agent.skills.requirements_chat import RequirementsChatSkill
308
309 questions = {
310 "questions": [
311 {"id": "Q1", "category": "goals", "question": "What?", "context": "Why"},
312 ]
313 }
314 skill = RequirementsChatSkill()
315 ctx = _make_context(chat_response=json.dumps(questions))
316 artifact = skill.execute(ctx)
317
318 assert isinstance(artifact, Artifact)
319 assert artifact.artifact_type == "requirements"
320 assert artifact.format == "json"
321 assert artifact.metadata["stage"] == "questionnaire"
322
323 def test_gather_requirements(self):
324 from video_processor.agent.skills.requirements_chat import RequirementsChatSkill
325
326 reqs = {
327 "goals": ["Build auth"],
328 "constraints": ["Budget < 10k"],
329 "priorities": ["Security"],
330 "scope": {"in_scope": ["Login"], "out_of_scope": ["SSO"]},
331 }
332 skill = RequirementsChatSkill()
333 ctx = _make_context(chat_response=json.dumps(reqs))
334 result = skill.gather_requirements(ctx, {"Q1": "We need auth", "Q2": "Budget is limited"})
335
336 assert isinstance(result, dict)
337
338 def test_gather_requirements_non_json_response(self):
339 from video_processor.agent.skills.requirements_chat import RequirementsChatSkill
340
341 skill = RequirementsChatSkill()
342 ctx = _make_context(chat_response="Not JSON")
343 result = skill.gather_requirements(ctx, {"Q1": "answer"})
344
345 assert isinstance(result, dict)
346
347
348 # ---------------------------------------------------------------------------
349 # Skill metadata
350 # ---------------------------------------------------------------------------
351
352
353 class TestSkillMetadata:
354 def test_project_plan_name(self):
355 from video_processor.agent.skills.project_plan import ProjectPlanSkill
356
357 assert ProjectPlanSkill.name == "project_plan"
358
359 def test_prd_name(self):
360 from video_processor.agent.skills.prd import PRDSkill
361
362 assert PRDSkill.name == "prd"
363
364 def test_roadmap_name(self):
365 from video_processor.agent.skills.roadmap import RoadmapSkill
366
367 assert RoadmapSkill.name == "roadmap"
368
369 def test_task_breakdown_name(self):
370 from video_processor.agent.skills.task_breakdown import TaskBreakdownSkill
371
372 assert TaskBreakdownSkill.name == "task_breakdown"
373
374 def test_doc_generator_name(self):
375 from video_processor.agent.skills.doc_generator import DocGeneratorSkill
376
377 assert DocGeneratorSkill.name == "doc_generator"
378
379 def test_requirements_chat_name(self):
380 from video_processor.agent.skills.requirements_chat import RequirementsChatSkill
381
382 assert RequirementsChatSkill.name == "requirements_chat"
383
384 def test_can_execute_with_context(self):
385 from video_processor.agent.skills.project_plan import ProjectPlanSkill
386
387 skill = ProjectPlanSkill()
388 ctx = _make_context()
389 assert skill.can_execute(ctx) is True
390
391 def test_can_execute_without_kg(self):
392 from video_processor.agent.skills.project_plan import ProjectPlanSkill
393
394 skill = ProjectPlanSkill()
395 ctx = _make_context()
396 ctx.knowledge_graph = None
397 assert skill.can_execute(ctx) is False
398
399 def test_can_execute_without_provider(self):
400 from video_processor.agent.skills.project_plan import ProjectPlanSkill
401
402 skill = ProjectPlanSkill()
403 ctx = _make_context()
404 ctx.provider_manager = None
405 assert skill.can_execute(ctx) is False
--- a/tests/test_api_spec.py
+++ b/tests/test_api_spec.py
@@ -0,0 +1,114 @@
1
+"""Tests for video_processor.api.openapi_spec."""
2
+
3
+from video_processor.api.openapi_spec import get_openapi_spec
4
+
5
+
6
+def test_returns_dict():
7
+ spec = get_openapi_spec()
8
+ assert isinstance(spec, dict)
9
+
10
+
11
+def test_has_top_level_keys():
12
+ spec = get_openapi_spec()
13
+ for key in ("openapi", "info", "paths", "components"):
14
+ assert key in spec, f"Missing top-level key: {key}"
15
+
16
+
17
+def test_openapi_version():
18
+ spec = get_openapi_spec()
19
+ assert spec["openapi"].startswith("3.0")
20
+
21
+
22
+def test_info_section():
23
+ spec = get_openapi_spec()
24
+ info = spec["info"]
25
+ assert "title" in info
26
+ assert "version" in info
27
+ assert "PlanOpticon" in info["title"]
28
+
29
+
30
+def test_expected_paths():
31
+ spec = get_openapi_spec()
32
+ expected_paths = [
33
+ "/analyze",
34
+ "/jobs/{id}",
35
+ "/knowledge-graph/{id}/entities",
36
+ "/knowledge-graph/{id}/relationships",
37
+ "/knowledge-graph/{id}/query",
38
+ ]
39
+ for path in expected_paths:
40
+ assert path in spec["paths"], f"Missing path: {path}"
41
+
42
+
43
+def test_analyze_endpoint():
44
+ spec = get_openapi_spec()
45
+ analyze = spec["paths"]["/analyze"]
46
+ assert "post" in analyze
47
+ post = analyze["post"]
48
+ assert "summary" in post
49
+ assert "requestBody" in post
50
+ assert "responses" in post
51
+ assert "202" in post["responses"]
52
+
53
+
54
+def test_jobs_endpoint():
55
+ spec = get_openapi_spec()
56
+ jobs = spec["paths"]["/jobs/{id}"]
57
+ assert "get" in jobs
58
+ get = jobs["get"]
59
+ assert "parameters" in get
60
+ assert get["parameters"][0]["name"] == "id"
61
+
62
+
63
+def test_entities_endpoint():
64
+ spec = get_openapi_spec()
65
+ entities = spec["paths"]["/knowledge-graph/{id}/entities"]
66
+ assert "get" in entities
67
+
68
+
69
+def test_relationships_endpoint():
70
+ spec = get_openapi_spec()
71
+ rels = spec["paths"]["/knowledge-graph/{id}/relationships"]
72
+ assert "get" in rels
73
+
74
+
75
+def test_query_endpoint():
76
+ spec = get_openapi_spec()
77
+ query = spec["paths"]["/knowledge-graph/{id}/query"]
78
+ assert "get" in query
79
+ params = query["get"]["parameters"]
80
+ param_names = [p["name"] for p in params]
81
+ assert "q" in param_names
82
+
83
+
84
+def test_component_schemas():
85
+ spec = get_openapi_spec()
86
+ schemas = spec["components"]["schemas"]
87
+ for schema_name in ("Job", "Entity", "Relationship"):
88
+ assert schema_name in schemas, f"Missing schema: {schema_name}"
89
+
90
+
91
+def test_job_schema_properties():
92
+ spec = get_openapi_spec()
93
+ job = spec["components"]["schemas"]["Job"]
94
+ props = job["properties"]
95
+ assert "id" in props
96
+ assert "status" in props
97
+ assert "progress" in props
98
+
99
+
100
+def test_job_status_enum():
101
+ spec = get_openapi_spec()
102
+ status = spec["components"]["schemas"]["Job"]["properties"]["status"]
103
+ assert "enum" in status
104
+ assert "pending" in status["enum"]
105
+ assert "completed" in status["enum"]
106
+
107
+
108
+def test_analyze_request_body_schema():
109
+ spec = get_openapi_spec()
110
+ schema = spec["paths"]["/analyze"]["post"]["requestBody"]["content"]["application/json"][
111
+ "schema"
112
+ ]
113
+ assert "video_url" in schema["properties"]
114
+ assert "video_url" in schema["required"]
--- a/tests/test_api_spec.py
+++ b/tests/test_api_spec.py
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
--- a/tests/test_api_spec.py
+++ b/tests/test_api_spec.py
@@ -0,0 +1,114 @@
1 """Tests for video_processor.api.openapi_spec."""
2
3 from video_processor.api.openapi_spec import get_openapi_spec
4
5
6 def test_returns_dict():
7 spec = get_openapi_spec()
8 assert isinstance(spec, dict)
9
10
11 def test_has_top_level_keys():
12 spec = get_openapi_spec()
13 for key in ("openapi", "info", "paths", "components"):
14 assert key in spec, f"Missing top-level key: {key}"
15
16
17 def test_openapi_version():
18 spec = get_openapi_spec()
19 assert spec["openapi"].startswith("3.0")
20
21
22 def test_info_section():
23 spec = get_openapi_spec()
24 info = spec["info"]
25 assert "title" in info
26 assert "version" in info
27 assert "PlanOpticon" in info["title"]
28
29
30 def test_expected_paths():
31 spec = get_openapi_spec()
32 expected_paths = [
33 "/analyze",
34 "/jobs/{id}",
35 "/knowledge-graph/{id}/entities",
36 "/knowledge-graph/{id}/relationships",
37 "/knowledge-graph/{id}/query",
38 ]
39 for path in expected_paths:
40 assert path in spec["paths"], f"Missing path: {path}"
41
42
43 def test_analyze_endpoint():
44 spec = get_openapi_spec()
45 analyze = spec["paths"]["/analyze"]
46 assert "post" in analyze
47 post = analyze["post"]
48 assert "summary" in post
49 assert "requestBody" in post
50 assert "responses" in post
51 assert "202" in post["responses"]
52
53
54 def test_jobs_endpoint():
55 spec = get_openapi_spec()
56 jobs = spec["paths"]["/jobs/{id}"]
57 assert "get" in jobs
58 get = jobs["get"]
59 assert "parameters" in get
60 assert get["parameters"][0]["name"] == "id"
61
62
63 def test_entities_endpoint():
64 spec = get_openapi_spec()
65 entities = spec["paths"]["/knowledge-graph/{id}/entities"]
66 assert "get" in entities
67
68
69 def test_relationships_endpoint():
70 spec = get_openapi_spec()
71 rels = spec["paths"]["/knowledge-graph/{id}/relationships"]
72 assert "get" in rels
73
74
75 def test_query_endpoint():
76 spec = get_openapi_spec()
77 query = spec["paths"]["/knowledge-graph/{id}/query"]
78 assert "get" in query
79 params = query["get"]["parameters"]
80 param_names = [p["name"] for p in params]
81 assert "q" in param_names
82
83
84 def test_component_schemas():
85 spec = get_openapi_spec()
86 schemas = spec["components"]["schemas"]
87 for schema_name in ("Job", "Entity", "Relationship"):
88 assert schema_name in schemas, f"Missing schema: {schema_name}"
89
90
91 def test_job_schema_properties():
92 spec = get_openapi_spec()
93 job = spec["components"]["schemas"]["Job"]
94 props = job["properties"]
95 assert "id" in props
96 assert "status" in props
97 assert "progress" in props
98
99
100 def test_job_status_enum():
101 spec = get_openapi_spec()
102 status = spec["components"]["schemas"]["Job"]["properties"]["status"]
103 assert "enum" in status
104 assert "pending" in status["enum"]
105 assert "completed" in status["enum"]
106
107
108 def test_analyze_request_body_schema():
109 spec = get_openapi_spec()
110 schema = spec["paths"]["/analyze"]["post"]["requestBody"]["content"]["application/json"][
111 "schema"
112 ]
113 assert "video_url" in schema["properties"]
114 assert "video_url" in schema["required"]
--- a/tests/test_callbacks.py
+++ b/tests/test_callbacks.py
@@ -0,0 +1,114 @@
1
+"""Tests for video_processor.utils.callbacks.WebhookCallback."""
2
+
3
+import json
4
+from unittest.mock import patch
5
+
6
+import pytest
7
+
8
+from video_processor.utils.callbacks import WebhookCallback
9
+
10
+
11
+@pytest.fixture()
12
+def callback():
13
+ return WebhookCallback(url="https://example.com/webhook")
14
+
15
+
16
+# --- Constructor ---
17
+
18
+
19
+def test_default_headers():
20
+ cb = WebhookCallback(url="https://example.com/hook")
21
+ assert cb.headers == {"Content-Type": "application/json"}
22
+
23
+
24
+def test_custom_headers():
25
+ headers = {"Authorization": "Bearer tok", "Content-Type": "application/json"}
26
+ cb = WebhookCallback(url="https://example.com/hook", headers=headers)
27
+ assert cb.headers["Authorization"] == "Bearer tok"
28
+
29
+
30
+def test_custom_timeout():
31
+ cb = WebhookCallback(url="https://example.com/hook", timeout=5.0)
32
+ assert cb.timeout == 5.0
33
+
34
+
35
+# --- _post ---
36
+
37
+
38
+@patch("urllib.request.urlopen")
39
+@patch("urllib.request.Request")
40
+def test_post_sends_json_payload(mock_request_cls, mock_urlopen, callback):
41
+ callback._post({"event": "test"})
42
+
43
+ mock_request_cls.assert_called_once()
44
+ call_args = mock_request_cls.call_args
45
+ data = json.loads(call_args[1]["data"] if "data" in call_args[1] else call_args[0][1])
46
+ assert data["event"] == "test"
47
+ mock_urlopen.assert_called_once()
48
+
49
+
50
+@patch("urllib.request.urlopen", side_effect=Exception("Connection refused"))
51
+@patch("urllib.request.Request")
52
+def test_post_logs_failure_does_not_raise(mock_request_cls, mock_urlopen, callback):
53
+ # Should not raise
54
+ callback._post({"event": "fail_test"})
55
+
56
+
57
+# --- on_step_start ---
58
+
59
+
60
+@patch.object(WebhookCallback, "_post")
61
+def test_on_step_start_payload(mock_post, callback):
62
+ callback.on_step_start("transcription", 1, 5)
63
+
64
+ mock_post.assert_called_once_with(
65
+ {
66
+ "event": "step_start",
67
+ "step": "transcription",
68
+ "index": 1,
69
+ "total": 5,
70
+ }
71
+ )
72
+
73
+
74
+# --- on_step_complete ---
75
+
76
+
77
+@patch.object(WebhookCallback, "_post")
78
+def test_on_step_complete_payload(mock_post, callback):
79
+ callback.on_step_complete("analysis", 3, 5)
80
+
81
+ mock_post.assert_called_once_with(
82
+ {
83
+ "event": "step_complete",
84
+ "step": "analysis",
85
+ "index": 3,
86
+ "total": 5,
87
+ }
88
+ )
89
+
90
+
91
+# --- on_progress ---
92
+
93
+
94
+@patch.object(WebhookCallback, "_post")
95
+def test_on_progress_payload(mock_post, callback):
96
+ callback.on_progress("transcription", 42.5, "Processing chunk 3/7")
97
+
98
+ mock_post.assert_called_once_with(
99
+ {
100
+ "event": "progress",
101
+ "step": "transcription",
102
+ "percent": 42.5,
103
+ "message": "Processing chunk 3/7",
104
+ }
105
+ )
106
+
107
+
108
+@patch.object(WebhookCallback, "_post")
109
+def test_on_progress_default_message(mock_post, callback):
110
+ callback.on_progress("extraction", 100.0)
111
+
112
+ payload = mock_post.call_args[0][0]
113
+ assert payload["message"] == ""
114
+ assert payload["percent"] == 100.0
--- a/tests/test_callbacks.py
+++ b/tests/test_callbacks.py
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
--- a/tests/test_callbacks.py
+++ b/tests/test_callbacks.py
@@ -0,0 +1,114 @@
1 """Tests for video_processor.utils.callbacks.WebhookCallback."""
2
3 import json
4 from unittest.mock import patch
5
6 import pytest
7
8 from video_processor.utils.callbacks import WebhookCallback
9
10
11 @pytest.fixture()
12 def callback():
13 return WebhookCallback(url="https://example.com/webhook")
14
15
16 # --- Constructor ---
17
18
19 def test_default_headers():
20 cb = WebhookCallback(url="https://example.com/hook")
21 assert cb.headers == {"Content-Type": "application/json"}
22
23
24 def test_custom_headers():
25 headers = {"Authorization": "Bearer tok", "Content-Type": "application/json"}
26 cb = WebhookCallback(url="https://example.com/hook", headers=headers)
27 assert cb.headers["Authorization"] == "Bearer tok"
28
29
30 def test_custom_timeout():
31 cb = WebhookCallback(url="https://example.com/hook", timeout=5.0)
32 assert cb.timeout == 5.0
33
34
35 # --- _post ---
36
37
38 @patch("urllib.request.urlopen")
39 @patch("urllib.request.Request")
40 def test_post_sends_json_payload(mock_request_cls, mock_urlopen, callback):
41 callback._post({"event": "test"})
42
43 mock_request_cls.assert_called_once()
44 call_args = mock_request_cls.call_args
45 data = json.loads(call_args[1]["data"] if "data" in call_args[1] else call_args[0][1])
46 assert data["event"] == "test"
47 mock_urlopen.assert_called_once()
48
49
50 @patch("urllib.request.urlopen", side_effect=Exception("Connection refused"))
51 @patch("urllib.request.Request")
52 def test_post_logs_failure_does_not_raise(mock_request_cls, mock_urlopen, callback):
53 # Should not raise
54 callback._post({"event": "fail_test"})
55
56
57 # --- on_step_start ---
58
59
60 @patch.object(WebhookCallback, "_post")
61 def test_on_step_start_payload(mock_post, callback):
62 callback.on_step_start("transcription", 1, 5)
63
64 mock_post.assert_called_once_with(
65 {
66 "event": "step_start",
67 "step": "transcription",
68 "index": 1,
69 "total": 5,
70 }
71 )
72
73
74 # --- on_step_complete ---
75
76
77 @patch.object(WebhookCallback, "_post")
78 def test_on_step_complete_payload(mock_post, callback):
79 callback.on_step_complete("analysis", 3, 5)
80
81 mock_post.assert_called_once_with(
82 {
83 "event": "step_complete",
84 "step": "analysis",
85 "index": 3,
86 "total": 5,
87 }
88 )
89
90
91 # --- on_progress ---
92
93
94 @patch.object(WebhookCallback, "_post")
95 def test_on_progress_payload(mock_post, callback):
96 callback.on_progress("transcription", 42.5, "Processing chunk 3/7")
97
98 mock_post.assert_called_once_with(
99 {
100 "event": "progress",
101 "step": "transcription",
102 "percent": 42.5,
103 "message": "Processing chunk 3/7",
104 }
105 )
106
107
108 @patch.object(WebhookCallback, "_post")
109 def test_on_progress_default_message(mock_post, callback):
110 callback.on_progress("extraction", 100.0)
111
112 payload = mock_post.call_args[0][0]
113 assert payload["message"] == ""
114 assert payload["percent"] == 100.0
--- tests/test_cli.py
+++ tests/test_cli.py
@@ -9,11 +9,11 @@
99
def test_version(self):
1010
runner = CliRunner()
1111
result = runner.invoke(cli, ["--version"])
1212
assert result.exit_code == 0
1313
assert "PlanOpticon" in result.output
14
- assert "0.2.0" in result.output # matches @click.version_option
14
+ assert "0.4.0" in result.output # matches @click.version_option
1515
1616
def test_help(self):
1717
runner = CliRunner()
1818
result = runner.invoke(cli, ["--help"])
1919
assert result.exit_code == 0
2020
2121
ADDED tests/test_output_formatter.py
2222
ADDED tests/test_sources.py
--- tests/test_cli.py
+++ tests/test_cli.py
@@ -9,11 +9,11 @@
9 def test_version(self):
10 runner = CliRunner()
11 result = runner.invoke(cli, ["--version"])
12 assert result.exit_code == 0
13 assert "PlanOpticon" in result.output
14 assert "0.2.0" in result.output # matches @click.version_option
15
16 def test_help(self):
17 runner = CliRunner()
18 result = runner.invoke(cli, ["--help"])
19 assert result.exit_code == 0
20
21 DDED tests/test_output_formatter.py
22 DDED tests/test_sources.py
--- tests/test_cli.py
+++ tests/test_cli.py
@@ -9,11 +9,11 @@
9 def test_version(self):
10 runner = CliRunner()
11 result = runner.invoke(cli, ["--version"])
12 assert result.exit_code == 0
13 assert "PlanOpticon" in result.output
14 assert "0.4.0" in result.output # matches @click.version_option
15
16 def test_help(self):
17 runner = CliRunner()
18 result = runner.invoke(cli, ["--help"])
19 assert result.exit_code == 0
20
21 DDED tests/test_output_formatter.py
22 DDED tests/test_sources.py
--- a/tests/test_output_formatter.py
+++ b/tests/test_output_formatter.py
@@ -0,0 +1,275 @@
1
+"""Tests for video_processor.cli.output_formatter.OutputFormatter."""
2
+
3
+from pathlib import Path
4
+
5
+import pytest
6
+
7
+from video_processor.cli.output_formatter import OutputFormatter
8
+
9
+
10
+@pytest.fixture()
11
+def tmp_dir(tmp_path):
12
+ """Return a fresh temp directory that is cleaned up automatically."""
13
+ return tmp_path
14
+
15
+
16
+@pytest.fixture()
17
+def formatter(tmp_dir):
18
+ """Return an OutputFormatter pointed at a temp output directory."""
19
+ return OutputFormatter(tmp_dir / "output")
20
+
21
+
22
+# --- Constructor ---
23
+
24
+
25
+def test_constructor_creates_output_dir(tmp_dir):
26
+ out = tmp_dir / "new_output"
27
+ assert not out.exists()
28
+ OutputFormatter(out)
29
+ assert out.is_dir()
30
+
31
+
32
+def test_constructor_accepts_string(tmp_dir):
33
+ fmt = OutputFormatter(str(tmp_dir / "str_output"))
34
+ assert fmt.output_dir.is_dir()
35
+
36
+
37
+# --- organize_outputs ---
38
+
39
+
40
+def _create_file(path: Path, content: str = "test") -> Path:
41
+ path.parent.mkdir(parents=True, exist_ok=True)
42
+ path.write_text(content)
43
+ return path
44
+
45
+
46
+def test_organize_outputs_basic(formatter, tmp_dir):
47
+ md = _create_file(tmp_dir / "analysis.md", "# Title")
48
+ kg = _create_file(tmp_dir / "kg.json", "{}")
49
+
50
+ result = formatter.organize_outputs(
51
+ markdown_path=md,
52
+ knowledge_graph_path=kg,
53
+ diagrams=[],
54
+ )
55
+
56
+ assert "markdown" in result
57
+ assert "knowledge_graph" in result
58
+ assert Path(result["markdown"]).exists()
59
+ assert Path(result["knowledge_graph"]).exists()
60
+ assert result["diagram_images"] == []
61
+ assert result["frames"] == []
62
+ assert result["transcript"] is None
63
+
64
+
65
+def test_organize_outputs_with_transcript(formatter, tmp_dir):
66
+ md = _create_file(tmp_dir / "analysis.md")
67
+ kg = _create_file(tmp_dir / "kg.json")
68
+ transcript = _create_file(tmp_dir / "transcript.txt", "Hello world")
69
+
70
+ result = formatter.organize_outputs(
71
+ markdown_path=md,
72
+ knowledge_graph_path=kg,
73
+ diagrams=[],
74
+ transcript_path=transcript,
75
+ )
76
+
77
+ assert result["transcript"] is not None
78
+ assert Path(result["transcript"]).exists()
79
+
80
+
81
+def test_organize_outputs_with_diagrams(formatter, tmp_dir):
82
+ md = _create_file(tmp_dir / "analysis.md")
83
+ kg = _create_file(tmp_dir / "kg.json")
84
+ img = _create_file(tmp_dir / "diagram1.png", "fake-png")
85
+
86
+ result = formatter.organize_outputs(
87
+ markdown_path=md,
88
+ knowledge_graph_path=kg,
89
+ diagrams=[{"image_path": str(img)}],
90
+ )
91
+
92
+ assert len(result["diagram_images"]) == 1
93
+ assert Path(result["diagram_images"][0]).exists()
94
+
95
+
96
+def test_organize_outputs_skips_missing_diagram(formatter, tmp_dir):
97
+ md = _create_file(tmp_dir / "analysis.md")
98
+ kg = _create_file(tmp_dir / "kg.json")
99
+
100
+ result = formatter.organize_outputs(
101
+ markdown_path=md,
102
+ knowledge_graph_path=kg,
103
+ diagrams=[{"image_path": "/nonexistent/diagram.png"}],
104
+ )
105
+
106
+ assert result["diagram_images"] == []
107
+
108
+
109
+def test_organize_outputs_diagram_without_image_path(formatter, tmp_dir):
110
+ md = _create_file(tmp_dir / "analysis.md")
111
+ kg = _create_file(tmp_dir / "kg.json")
112
+
113
+ result = formatter.organize_outputs(
114
+ markdown_path=md,
115
+ knowledge_graph_path=kg,
116
+ diagrams=[{"description": "A diagram"}],
117
+ )
118
+
119
+ assert result["diagram_images"] == []
120
+
121
+
122
+def test_organize_outputs_with_frames(formatter, tmp_dir):
123
+ md = _create_file(tmp_dir / "analysis.md")
124
+ kg = _create_file(tmp_dir / "kg.json")
125
+ frames_dir = tmp_dir / "frames"
126
+ frames_dir.mkdir()
127
+ for i in range(5):
128
+ _create_file(frames_dir / f"frame_{i:03d}.jpg", f"frame{i}")
129
+
130
+ result = formatter.organize_outputs(
131
+ markdown_path=md,
132
+ knowledge_graph_path=kg,
133
+ diagrams=[],
134
+ frames_dir=frames_dir,
135
+ )
136
+
137
+ assert len(result["frames"]) == 5
138
+
139
+
140
+def test_organize_outputs_limits_frames_to_10(formatter, tmp_dir):
141
+ md = _create_file(tmp_dir / "analysis.md")
142
+ kg = _create_file(tmp_dir / "kg.json")
143
+ frames_dir = tmp_dir / "frames"
144
+ frames_dir.mkdir()
145
+ for i in range(25):
146
+ _create_file(frames_dir / f"frame_{i:03d}.jpg", f"frame{i}")
147
+
148
+ result = formatter.organize_outputs(
149
+ markdown_path=md,
150
+ knowledge_graph_path=kg,
151
+ diagrams=[],
152
+ frames_dir=frames_dir,
153
+ )
154
+
155
+ assert len(result["frames"]) <= 10
156
+
157
+
158
+def test_organize_outputs_missing_frames_dir(formatter, tmp_dir):
159
+ md = _create_file(tmp_dir / "analysis.md")
160
+ kg = _create_file(tmp_dir / "kg.json")
161
+
162
+ result = formatter.organize_outputs(
163
+ markdown_path=md,
164
+ knowledge_graph_path=kg,
165
+ diagrams=[],
166
+ frames_dir=tmp_dir / "nonexistent_frames",
167
+ )
168
+
169
+ assert result["frames"] == []
170
+
171
+
172
+# --- create_html_index ---
173
+
174
+
175
+def test_create_html_index_returns_path(formatter, tmp_dir):
176
+ outputs = {
177
+ "markdown": str(formatter.output_dir / "markdown" / "analysis.md"),
178
+ "knowledge_graph": str(formatter.output_dir / "data" / "kg.json"),
179
+ "diagram_images": [],
180
+ "frames": [],
181
+ "transcript": None,
182
+ }
183
+ # Create the referenced files so relative_to works
184
+ for key in ("markdown", "knowledge_graph"):
185
+ _create_file(Path(outputs[key]))
186
+
187
+ index = formatter.create_html_index(outputs)
188
+ assert index.exists()
189
+ assert index.name == "index.html"
190
+
191
+
192
+def test_create_html_index_contains_analysis_link(formatter, tmp_dir):
193
+ md_path = formatter.output_dir / "markdown" / "analysis.md"
194
+ _create_file(md_path)
195
+ outputs = {
196
+ "markdown": str(md_path),
197
+ "knowledge_graph": None,
198
+ "diagram_images": [],
199
+ "frames": [],
200
+ "transcript": None,
201
+ }
202
+
203
+ index = formatter.create_html_index(outputs)
204
+ content = index.read_text()
205
+ assert "Analysis Report" in content
206
+ assert "analysis.md" in content
207
+
208
+
209
+def test_create_html_index_with_diagrams(formatter, tmp_dir):
210
+ img_path = formatter.output_dir / "diagrams" / "d1.png"
211
+ _create_file(img_path)
212
+ outputs = {
213
+ "markdown": None,
214
+ "knowledge_graph": None,
215
+ "diagram_images": [str(img_path)],
216
+ "frames": [],
217
+ "transcript": None,
218
+ }
219
+
220
+ index = formatter.create_html_index(outputs)
221
+ content = index.read_text()
222
+ assert "Diagrams" in content
223
+ assert "d1.png" in content
224
+
225
+
226
+def test_create_html_index_with_frames(formatter, tmp_dir):
227
+ frame_path = formatter.output_dir / "frames" / "frame_001.jpg"
228
+ _create_file(frame_path)
229
+ outputs = {
230
+ "markdown": None,
231
+ "knowledge_graph": None,
232
+ "diagram_images": [],
233
+ "frames": [str(frame_path)],
234
+ "transcript": None,
235
+ }
236
+
237
+ index = formatter.create_html_index(outputs)
238
+ content = index.read_text()
239
+ assert "Key Frames" in content
240
+ assert "frame_001.jpg" in content
241
+
242
+
243
+def test_create_html_index_with_data_files(formatter, tmp_dir):
244
+ kg_path = formatter.output_dir / "data" / "kg.json"
245
+ transcript_path = formatter.output_dir / "data" / "transcript.txt"
246
+ _create_file(kg_path)
247
+ _create_file(transcript_path)
248
+ outputs = {
249
+ "markdown": None,
250
+ "knowledge_graph": str(kg_path),
251
+ "diagram_images": [],
252
+ "frames": [],
253
+ "transcript": str(transcript_path),
254
+ }
255
+
256
+ index = formatter.create_html_index(outputs)
257
+ content = index.read_text()
258
+ assert "Data Files" in content
259
+ assert "kg.json" in content
260
+ assert "transcript.txt" in content
261
+
262
+
263
+def test_create_html_index_empty_outputs(formatter):
264
+ outputs = {
265
+ "markdown": None,
266
+ "knowledge_graph": None,
267
+ "diagram_images": [],
268
+ "frames": [],
269
+ "transcript": None,
270
+ }
271
+
272
+ index = formatter.create_html_index(outputs)
273
+ content = index.read_text()
274
+ assert "PlanOpticon Analysis Results" in content
275
+ assert "<!DOCTYPE html>" in content
--- a/tests/test_output_formatter.py
+++ b/tests/test_output_formatter.py
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
--- a/tests/test_output_formatter.py
+++ b/tests/test_output_formatter.py
@@ -0,0 +1,275 @@
1 """Tests for video_processor.cli.output_formatter.OutputFormatter."""
2
3 from pathlib import Path
4
5 import pytest
6
7 from video_processor.cli.output_formatter import OutputFormatter
8
9
10 @pytest.fixture()
11 def tmp_dir(tmp_path):
12 """Return a fresh temp directory that is cleaned up automatically."""
13 return tmp_path
14
15
16 @pytest.fixture()
17 def formatter(tmp_dir):
18 """Return an OutputFormatter pointed at a temp output directory."""
19 return OutputFormatter(tmp_dir / "output")
20
21
22 # --- Constructor ---
23
24
25 def test_constructor_creates_output_dir(tmp_dir):
26 out = tmp_dir / "new_output"
27 assert not out.exists()
28 OutputFormatter(out)
29 assert out.is_dir()
30
31
32 def test_constructor_accepts_string(tmp_dir):
33 fmt = OutputFormatter(str(tmp_dir / "str_output"))
34 assert fmt.output_dir.is_dir()
35
36
37 # --- organize_outputs ---
38
39
40 def _create_file(path: Path, content: str = "test") -> Path:
41 path.parent.mkdir(parents=True, exist_ok=True)
42 path.write_text(content)
43 return path
44
45
46 def test_organize_outputs_basic(formatter, tmp_dir):
47 md = _create_file(tmp_dir / "analysis.md", "# Title")
48 kg = _create_file(tmp_dir / "kg.json", "{}")
49
50 result = formatter.organize_outputs(
51 markdown_path=md,
52 knowledge_graph_path=kg,
53 diagrams=[],
54 )
55
56 assert "markdown" in result
57 assert "knowledge_graph" in result
58 assert Path(result["markdown"]).exists()
59 assert Path(result["knowledge_graph"]).exists()
60 assert result["diagram_images"] == []
61 assert result["frames"] == []
62 assert result["transcript"] is None
63
64
65 def test_organize_outputs_with_transcript(formatter, tmp_dir):
66 md = _create_file(tmp_dir / "analysis.md")
67 kg = _create_file(tmp_dir / "kg.json")
68 transcript = _create_file(tmp_dir / "transcript.txt", "Hello world")
69
70 result = formatter.organize_outputs(
71 markdown_path=md,
72 knowledge_graph_path=kg,
73 diagrams=[],
74 transcript_path=transcript,
75 )
76
77 assert result["transcript"] is not None
78 assert Path(result["transcript"]).exists()
79
80
81 def test_organize_outputs_with_diagrams(formatter, tmp_dir):
82 md = _create_file(tmp_dir / "analysis.md")
83 kg = _create_file(tmp_dir / "kg.json")
84 img = _create_file(tmp_dir / "diagram1.png", "fake-png")
85
86 result = formatter.organize_outputs(
87 markdown_path=md,
88 knowledge_graph_path=kg,
89 diagrams=[{"image_path": str(img)}],
90 )
91
92 assert len(result["diagram_images"]) == 1
93 assert Path(result["diagram_images"][0]).exists()
94
95
96 def test_organize_outputs_skips_missing_diagram(formatter, tmp_dir):
97 md = _create_file(tmp_dir / "analysis.md")
98 kg = _create_file(tmp_dir / "kg.json")
99
100 result = formatter.organize_outputs(
101 markdown_path=md,
102 knowledge_graph_path=kg,
103 diagrams=[{"image_path": "/nonexistent/diagram.png"}],
104 )
105
106 assert result["diagram_images"] == []
107
108
109 def test_organize_outputs_diagram_without_image_path(formatter, tmp_dir):
110 md = _create_file(tmp_dir / "analysis.md")
111 kg = _create_file(tmp_dir / "kg.json")
112
113 result = formatter.organize_outputs(
114 markdown_path=md,
115 knowledge_graph_path=kg,
116 diagrams=[{"description": "A diagram"}],
117 )
118
119 assert result["diagram_images"] == []
120
121
122 def test_organize_outputs_with_frames(formatter, tmp_dir):
123 md = _create_file(tmp_dir / "analysis.md")
124 kg = _create_file(tmp_dir / "kg.json")
125 frames_dir = tmp_dir / "frames"
126 frames_dir.mkdir()
127 for i in range(5):
128 _create_file(frames_dir / f"frame_{i:03d}.jpg", f"frame{i}")
129
130 result = formatter.organize_outputs(
131 markdown_path=md,
132 knowledge_graph_path=kg,
133 diagrams=[],
134 frames_dir=frames_dir,
135 )
136
137 assert len(result["frames"]) == 5
138
139
140 def test_organize_outputs_limits_frames_to_10(formatter, tmp_dir):
141 md = _create_file(tmp_dir / "analysis.md")
142 kg = _create_file(tmp_dir / "kg.json")
143 frames_dir = tmp_dir / "frames"
144 frames_dir.mkdir()
145 for i in range(25):
146 _create_file(frames_dir / f"frame_{i:03d}.jpg", f"frame{i}")
147
148 result = formatter.organize_outputs(
149 markdown_path=md,
150 knowledge_graph_path=kg,
151 diagrams=[],
152 frames_dir=frames_dir,
153 )
154
155 assert len(result["frames"]) <= 10
156
157
158 def test_organize_outputs_missing_frames_dir(formatter, tmp_dir):
159 md = _create_file(tmp_dir / "analysis.md")
160 kg = _create_file(tmp_dir / "kg.json")
161
162 result = formatter.organize_outputs(
163 markdown_path=md,
164 knowledge_graph_path=kg,
165 diagrams=[],
166 frames_dir=tmp_dir / "nonexistent_frames",
167 )
168
169 assert result["frames"] == []
170
171
172 # --- create_html_index ---
173
174
175 def test_create_html_index_returns_path(formatter, tmp_dir):
176 outputs = {
177 "markdown": str(formatter.output_dir / "markdown" / "analysis.md"),
178 "knowledge_graph": str(formatter.output_dir / "data" / "kg.json"),
179 "diagram_images": [],
180 "frames": [],
181 "transcript": None,
182 }
183 # Create the referenced files so relative_to works
184 for key in ("markdown", "knowledge_graph"):
185 _create_file(Path(outputs[key]))
186
187 index = formatter.create_html_index(outputs)
188 assert index.exists()
189 assert index.name == "index.html"
190
191
192 def test_create_html_index_contains_analysis_link(formatter, tmp_dir):
193 md_path = formatter.output_dir / "markdown" / "analysis.md"
194 _create_file(md_path)
195 outputs = {
196 "markdown": str(md_path),
197 "knowledge_graph": None,
198 "diagram_images": [],
199 "frames": [],
200 "transcript": None,
201 }
202
203 index = formatter.create_html_index(outputs)
204 content = index.read_text()
205 assert "Analysis Report" in content
206 assert "analysis.md" in content
207
208
209 def test_create_html_index_with_diagrams(formatter, tmp_dir):
210 img_path = formatter.output_dir / "diagrams" / "d1.png"
211 _create_file(img_path)
212 outputs = {
213 "markdown": None,
214 "knowledge_graph": None,
215 "diagram_images": [str(img_path)],
216 "frames": [],
217 "transcript": None,
218 }
219
220 index = formatter.create_html_index(outputs)
221 content = index.read_text()
222 assert "Diagrams" in content
223 assert "d1.png" in content
224
225
226 def test_create_html_index_with_frames(formatter, tmp_dir):
227 frame_path = formatter.output_dir / "frames" / "frame_001.jpg"
228 _create_file(frame_path)
229 outputs = {
230 "markdown": None,
231 "knowledge_graph": None,
232 "diagram_images": [],
233 "frames": [str(frame_path)],
234 "transcript": None,
235 }
236
237 index = formatter.create_html_index(outputs)
238 content = index.read_text()
239 assert "Key Frames" in content
240 assert "frame_001.jpg" in content
241
242
243 def test_create_html_index_with_data_files(formatter, tmp_dir):
244 kg_path = formatter.output_dir / "data" / "kg.json"
245 transcript_path = formatter.output_dir / "data" / "transcript.txt"
246 _create_file(kg_path)
247 _create_file(transcript_path)
248 outputs = {
249 "markdown": None,
250 "knowledge_graph": str(kg_path),
251 "diagram_images": [],
252 "frames": [],
253 "transcript": str(transcript_path),
254 }
255
256 index = formatter.create_html_index(outputs)
257 content = index.read_text()
258 assert "Data Files" in content
259 assert "kg.json" in content
260 assert "transcript.txt" in content
261
262
263 def test_create_html_index_empty_outputs(formatter):
264 outputs = {
265 "markdown": None,
266 "knowledge_graph": None,
267 "diagram_images": [],
268 "frames": [],
269 "transcript": None,
270 }
271
272 index = formatter.create_html_index(outputs)
273 content = index.read_text()
274 assert "PlanOpticon Analysis Results" in content
275 assert "<!DOCTYPE html>" in content
--- a/tests/test_sources.py
+++ b/tests/test_sources.py
@@ -0,0 +1,538 @@
1
+"""Tests for all source connectors: import, instantiation, authenticate, list_videos."""
2
+
3
+import os
4
+from unittest.mock import MagicMock, patch
5
+
6
+import pytest
7
+
8
+from video_proes.base import BaseSource, SourceFile
9
+
10
+# ---------------------------------------------------------------------------
11
+# SourceFile model
12
+# ---------------------------------------------------------------------------
13
+
14
+
15
+def test_source_file_creation():
16
+ sf = SourceFile(name="test.mp4", id="abc123")
17
+ assert sf.name == "test.mp4"
18
+ assert sf.id == "abc123"
19
+ assert sf.size_bytes is None
20
+ assert sf.mime_type is None
21
+
22
+
23
+def test_source_file_with_all_fields():
24
+ sf = SourceFile(
25
+ name="video.mp4",
26
+ id="v1",
27
+ size_bytes=1024,
28
+ mime_type="video/mp4",
29
+ modified_at="2025-01-01",
30
+ path="folder/video.mp4",
31
+ )
32
+ assert sf.size_bytes == 1024
33
+ assert sf.path == "folder/video.mp4"
34
+
35
+
36
+# ---------------------------------------------------------------------------
37
+# YouTubeSource
38
+# ---------------------------------------------------------------------------
39
+
40
+
41
+class TestYouTubeSource:
42
+ def test_import(self):
43
+ from video_processor.sources.youtube_source import YouTubeSource
44
+
45
+ assert YouTubeSource is not None
46
+
47
+ def test_constructor(self):
48
+ from video_processor.sources.youtube_source import YouTubeSource
49
+
50
+ src = YouTubeSource(url="https://www.youtube.com/watch?v=dQw4w9WgXcQ")
51
+ assert src.video_id == "dQw4w9WgXcQ"
52
+ assert src.audio_only is False
53
+
54
+ def test_constructor_audio_only(self):
55
+ from video_processor.sources.youtube_source import YouTubeSource
56
+
57
+ src = YouTubeSource(url="https://youtu.be/dQw4w9WgXcQ", audio_only=True)
58
+ assert src.audio_only is True
59
+
60
+ def test_constructor_shorts_url(self):
61
+ from video_processor.sources.youtube_source import YouTubeSource
62
+
63
+ src = YouTubeSource(url="https://youtube.com/shorts/dQw4w9WgXcQ")
64
+ assert src.video_id == "dQw4w9WgXcQ"
65
+
66
+ def test_constructor_invalid_url(self):
67
+ from video_processor.sources.youtube_source import YouTubeSource
68
+
69
+ with pytest.raises(ValueError, match="Could not extract"):
70
+ YouTubeSource(url="https://example.com/not-youtube")
71
+
72
+ @patch.dict(os.environ, {}, clear=False)
73
+ def test_authenticate_no_ytdlp(self):
74
+ from video_processor.sources.youtube_source import YouTubeSource
75
+
76
+ src = YouTubeSource(url="https://youtube.com/watch?v=dQw4w9WgXcQ")
77
+ with patch.dict("sys.modules", {"yt_dlp": None}):
78
+ # yt_dlp import will fail
79
+ result = src.authenticate()
80
+ # Result depends on whether yt_dlp is installed; just check it returns bool
81
+ assert isinstance(result, bool)
82
+
83
+ def test_list_videos(self):
84
+ from video_processor.sources.youtube_source import YouTubeSource
85
+
86
+ mock_ydl = MagicMock()
87
+ mock_ydl.__enter__ = MagicMock(return_value=mock_ydl)
88
+ mock_ydl.__exit__ = MagicMock(return_value=False)
89
+ mock_ydl.extract_info.return_value = {
90
+ "title": "Test Video",
91
+ "filesize": 1000,
92
+ }
93
+ mock_ydl_cls = MagicMock(return_value=mock_ydl)
94
+ mock_module = MagicMock()
95
+ mock_module.YoutubeDL = mock_ydl_cls
96
+
97
+ with patch.dict("sys.modules", {"yt_dlp": mock_module}):
98
+ src = YouTubeSource(url="https://youtube.com/watch?v=dQw4w9WgXcQ")
99
+ files = src.list_videos()
100
+ assert isinstance(files, list)
101
+ assert len(files) == 1
102
+ assert files[0].name == "Test Video"
103
+
104
+
105
+# ---------------------------------------------------------------------------
106
+# WebSource
107
+# ---------------------------------------------------------------------------
108
+
109
+
110
+class TestWebSource:
111
+ def test_import(self):
112
+ from video_processor.sources.web_source import WebSource
113
+
114
+ assert WebSource is not None
115
+
116
+ def test_constructor(self):
117
+ from video_processor.sources.web_source import WebSource
118
+
119
+ src = WebSource(url="https://example.com/page")
120
+ assert src.url == "https://example.com/page"
121
+
122
+ def test_authenticate(self):
123
+ from video_processor.sources.web_source import WebSource
124
+
125
+ src = WebSource(url="https://example.com")
126
+ assert src.authenticate() is True
127
+
128
+ def test_list_videos(self):
129
+ from video_processor.sources.web_source import WebSource
130
+
131
+ src = WebSource(url="https://example.com/article")
132
+ files = src.list_videos()
133
+ assert isinstance(files, list)
134
+ assert len(files) == 1
135
+ assert files[0].mime_type == "text/html"
136
+
137
+
138
+# ---------------------------------------------------------------------------
139
+# GitHubSource
140
+# ---------------------------------------------------------------------------
141
+
142
+
143
+class TestGitHubSource:
144
+ def test_import(self):
145
+ from video_processor.sources.github_source import GitHubSource
146
+
147
+ assert GitHubSource is not None
148
+
149
+ def test_constructor(self):
150
+ from video_processor.sources.github_source import GitHubSource
151
+
152
+ src = GitHubSource(repo="owner/repo")
153
+ assert src.repo == "owner/repo"
154
+ assert src.include_issues is True
155
+ assert src.include_prs is True
156
+
157
+ @patch.dict(os.environ, {"GITHUB_TOKEN": "ghp_test123"})
158
+ def test_authenticate_with_env_token(self):
159
+ from video_processor.sources.github_source import GitHubSource
160
+
161
+ src = GitHubSource(repo="owner/repo")
162
+ result = src.authenticate()
163
+ assert result is True
164
+ assert src._token == "ghp_test123"
165
+
166
+ @patch("requests.get")
167
+ @patch.dict(os.environ, {"GITHUB_TOKEN": "ghp_test123"})
168
+ def test_list_videos(self, mock_get):
169
+ from video_processor.sources.github_source import GitHubSource
170
+
171
+ # Mock responses for readme, issues, and PRs
172
+ readme_resp = MagicMock()
173
+ readme_resp.ok = True
174
+
175
+ issues_resp = MagicMock()
176
+ issues_resp.ok = True
177
+ issues_resp.json.return_value = [
178
+ {"number": 1, "title": "Bug report", "id": 1},
179
+ {"number": 2, "title": "Feature request", "id": 2, "pull_request": {}},
180
+ ]
181
+
182
+ prs_resp = MagicMock()
183
+ prs_resp.ok = True
184
+ prs_resp.json.return_value = [
185
+ {"number": 3, "title": "Fix bug"},
186
+ ]
187
+
188
+ mock_get.side_effect = [readme_resp, issues_resp, prs_resp]
189
+
190
+ src = GitHubSource(repo="owner/repo")
191
+ src.authenticate()
192
+ files = src.list_videos()
193
+ assert isinstance(files, list)
194
+ # README + 1 issue (one filtered as PR) + 1 PR = 3
195
+ assert len(files) == 3
196
+
197
+
198
+# ---------------------------------------------------------------------------
199
+# RedditSource
200
+# ---------------------------------------------------------------------------
201
+
202
+
203
+class TestRedditSource:
204
+ def test_import(self):
205
+ from video_processor.sources.reddit_source import RedditSource
206
+
207
+ assert RedditSource is not None
208
+
209
+ def test_constructor(self):
210
+ from video_processor.sources.reddit_source import RedditSource
211
+
212
+ src = RedditSource(url="https://reddit.com/r/python/comments/abc123/test/")
213
+ assert src.url == "https://reddit.com/r/python/comments/abc123/test"
214
+
215
+ def test_authenticate(self):
216
+ from video_processor.sources.reddit_source import RedditSource
217
+
218
+ src = RedditSource(url="https://reddit.com/r/test")
219
+ assert src.authenticate() is True
220
+
221
+ def test_list_videos(self):
222
+ from video_processor.sources.reddit_source import RedditSource
223
+
224
+ src = RedditSource(url="https://reddit.com/r/python/comments/abc/post")
225
+ files = src.list_videos()
226
+ assert isinstance(files, list)
227
+ assert len(files) == 1
228
+ assert files[0].mime_type == "text/plain"
229
+
230
+
231
+# ---------------------------------------------------------------------------
232
+# HackerNewsSource
233
+# ---------------------------------------------------------------------------
234
+
235
+
236
+class TestHackerNewsSource:
237
+ def test_import(self):
238
+ from video_processor.sources.hackernews_source import HackerNewsSource
239
+
240
+ assert HackerNewsSource is not None
241
+
242
+ def test_constructor(self):
243
+ from video_processor.sources.hackernews_source import HackerNewsSource
244
+
245
+ src = HackerNewsSource(item_id=12345678)
246
+ assert src.item_id == 12345678
247
+ assert src.max_comments == 200
248
+
249
+ def test_authenticate(self):
250
+ from video_processor.sources.hackernews_source import HackerNewsSource
251
+
252
+ src = HackerNewsSource(item_id=12345678)
253
+ assert src.authenticate() is True
254
+
255
+ def test_list_videos(self):
256
+ from video_processor.sources.hackernews_source import HackerNewsSource
257
+
258
+ src = HackerNewsSource(item_id=99999)
259
+ files = src.list_videos()
260
+ assert isinstance(files, list)
261
+ assert len(files) == 1
262
+ assert files[0].id == "99999"
263
+
264
+
265
+# ---------------------------------------------------------------------------
266
+# RSSSource
267
+# ---------------------------------------------------------------------------
268
+
269
+
270
+class TestRSSSource:
271
+ def test_import(self):
272
+ from video_processor.sources.rss_source import RSSSource
273
+
274
+ assert RSSSource is not None
275
+
276
+ def test_constructor(self):
277
+ from video_processor.sources.rss_source import RSSSource
278
+
279
+ src = RSSSource(url="https://example.com/feed.xml", max_entries=20)
280
+ assert src.url == "https://example.com/feed.xml"
281
+ assert src.max_entries == 20
282
+
283
+ def test_authenticate(self):
284
+ from video_processor.sources.rss_source import RSSSource
285
+
286
+ src = RSSSource(url="https://example.com/feed.xml")
287
+ assert src.authenticate() is True
288
+
289
+ @patch("requests.get")
290
+ def test_list_videos(self, mock_get):
291
+ from video_processor.sources.rss_source import RSSSource
292
+
293
+ rss_xml = """<?xml version="1.0"?>
294
+ <rss version="2.0">
295
+ <channel>
296
+ <item>
297
+ <title>Entry 1</title>
298
+ <link>https://example.com/1</link>
299
+ <description>First entry</description>
300
+ <pubDate>Mon, 01 Jan 2025 00:00:00 GMT</pubDate>
301
+ </item>
302
+ </channel>
303
+ </rss>"""
304
+ mock_resp = MagicMock()
305
+ mock_resp.text = rss_xml
306
+ mock_resp.raise_for_status = MagicMock()
307
+ mock_get.return_value = mock_resp
308
+
309
+ src = RSSSource(url="https://example.com/feed.xml")
310
+ files = src.list_videos()
311
+ assert isinstance(files, list)
312
+ assert len(files) >= 1
313
+
314
+
315
+# ---------------------------------------------------------------------------
316
+# PodcastSource
317
+# ---------------------------------------------------------------------------
318
+
319
+
320
+class TestPodcastSource:
321
+ def test_import(self):
322
+ from video_processor.sources.podcast_source import PodcastSource
323
+
324
+ assert PodcastSource is not None
325
+
326
+ def test_constructor(self):
327
+ from video_processor.sources.podcast_source import PodcastSource
328
+
329
+ src = PodcastSource(feed_url="https://example.com/podcast.xml", max_episodes=5)
330
+ assert src.feed_url == "https://example.com/podcast.xml"
331
+ assert src.max_episodes == 5
332
+
333
+ def test_authenticate(self):
334
+ from video_processor.sources.podcast_source import PodcastSource
335
+
336
+ src = PodcastSource(feed_url="https://example.com/podcast.xml")
337
+ assert src.authenticate() is True
338
+
339
+ @patch("requests.get")
340
+ def test_list_videos(self, mock_get):
341
+ from video_processor.sources.podcast_source import PodcastSource
342
+
343
+ podcast_xml = """<?xml version="1.0"?>
344
+ <rss version="2.0">
345
+ <channel>
346
+ <item>
347
+ <title>Episode 1</title>
348
+ <enclosure url="https://example.com/ep1.mp3" type="audio/mpeg" />
349
+ <pubDate>Mon, 01 Jan 2025 00:00:00 GMT</pubDate>
350
+ </item>
351
+ </channel>
352
+ </rss>"""
353
+ mock_resp = MagicMock()
354
+ mock_resp.text = podcast_xml
355
+ mock_resp.raise_for_status = MagicMock()
356
+ mock_get.return_value = mock_resp
357
+
358
+ src = PodcastSource(feed_url="https://example.com/podcast.xml")
359
+ files = src.list_videos()
360
+ assert isinstance(files, list)
361
+ assert len(files) == 1
362
+ assert files[0].mime_type == "audio/mpeg"
363
+
364
+
365
+# ---------------------------------------------------------------------------
366
+# TwitterSource
367
+# ---------------------------------------------------------------------------
368
+
369
+
370
+class TestTwitterSource:
371
+ def test_import(self):
372
+ from video_processor.sources.twitter_source import TwitterSource
373
+
374
+ assert TwitterSource is not None
375
+
376
+ def test_constructor(self):
377
+ from video_processor.sources.twitter_source import TwitterSource
378
+
379
+ src = TwitterSource(url="https://twitter.com/user/status/123456")
380
+ assert src.url == "https://twitter.com/user/status/123456"
381
+
382
+ @patch.dict(os.environ, {"TWITTER_BEARER_TOKEN": "test_token"})
383
+ def test_authenticate_with_bearer_token(self):
384
+ from video_processor.sources.twitter_source import TwitterSource
385
+
386
+ src = TwitterSource(url="https://twitter.com/user/status/123456")
387
+ assert src.authenticate() is True
388
+
389
+ @patch.dict(os.environ, {}, clear=True)
390
+ def test_authenticate_no_token_no_gallery_dl(self):
391
+ from video_processor.sources.twitter_source import TwitterSource
392
+
393
+ src = TwitterSource(url="https://twitter.com/user/status/123456")
394
+ with patch.dict("sys.modules", {"gallery_dl": None}):
395
+ result = src.authenticate()
396
+ assert isinstance(result, bool)
397
+
398
+ def test_list_videos(self):
399
+ from video_processor.sources.twitter_source import TwitterSource
400
+
401
+ src = TwitterSource(url="https://twitter.com/user/status/123456")
402
+ files = src.list_videos()
403
+ assert isinstance(files, list)
404
+ assert len(files) == 1
405
+
406
+
407
+# ---------------------------------------------------------------------------
408
+# ArxivSource
409
+# ---------------------------------------------------------------------------
410
+
411
+
412
+class TestArxivSource:
413
+ def test_import(self):
414
+ from video_processor.sources.arxiv_source import ArxivSource
415
+
416
+ assert ArxivSource is not None
417
+
418
+ def test_constructor(self):
419
+ from video_processor.sources.arxiv_source import ArxivSource
420
+
421
+ src = ArxivSource(url_or_id="2301.07041")
422
+ assert src.arxiv_id == "2301.07041"
423
+
424
+ def test_constructor_from_url(self):
425
+ from video_processor.sources.arxiv_source import ArxivSource
426
+
427
+ src = ArxivSource(url_or_id="https://arxiv.org/abs/2301.07041v2")
428
+ assert src.arxiv_id == "2301.07041v2"
429
+
430
+ def test_constructor_invalid(self):
431
+ from video_processor.sources.arxiv_source import ArxivSource
432
+
433
+ with pytest.raises(ValueError, match="Could not extract"):
434
+ ArxivSource(url_or_id="not-an-arxiv-id")
435
+
436
+ def test_authenticate(self):
437
+ from video_processor.sources.arxiv_source import ArxivSource
438
+
439
+ src = ArxivSource(url_or_id="2301.07041")
440
+ assert src.authenticate() is True
441
+
442
+ @patch("requests.get")
443
+ def test_list_videos(self, mock_get):
444
+ from video_processor.sources.arxiv_source import ArxivSource
445
+
446
+ atom_xml = """<?xml version="1.0"?>
447
+ <feed xmlns="http://www.w3.org/2005/Atom"
448
+ xmlns:arxiv="http://arxiv.org/schemas/atom">
449
+ <entry>
450
+ <title>Test Paper</title>
451
+ <summary>Abstract text here.</summary>
452
+ <author><name>Author One</name></author>
453
+ <published>2023-01-15T00:00:00Z</published>
454
+ </entry>
455
+ </feed>"""
456
+ mock_resp = MagicMock()
457
+ mock_resp.text = atom_xml
458
+ mock_resp.raise_for_status = MagicMock()
459
+ mock_get.return_value = mock_resp
460
+
461
+ src = ArxivSource(url_or_id="2301.07041")
462
+ files = src.list_videos()
463
+ assert isinstance(files, list)
464
+ assert len(files) == 2 # metadata + pdf
465
+
466
+
467
+# ---------------------------------------------------------------------------
468
+# S3Source
469
+# ---------------------------------------------------------------------------
470
+
471
+
472
+class TestS3Source:
473
+ def test_import(self):
474
+ from video_processor.sources.s3_source import S3Source
475
+
476
+ assert S3Source is not None
477
+
478
+ def test_constructor(self):
479
+ from video_processor.sources.s3_source import S3Source
480
+
481
+ src = S3Source(bucket="my-bucket", prefix="videos/", region="us-east-1")
482
+ assert src.bucket == "my-bucket"
483
+ assert src.prefix == "videos/"
484
+ assert src.region == "us-east-1"
485
+
486
+ def test_authenticate_success(self):
487
+ from video_processor.sources.s3_source import S3Source
488
+
489
+ mock_client = MagicMock()
490
+ mock_client.head_bucket.return_value = {}
491
+ mock_boto3 = MagicMock()
492
+ mock_boto3.client.return_value = mock_client
493
+
494
+ with patch.dict("sys.modules", {"boto3": mock_boto3}):
495
+ src = S3Source(bucket="my-bucket")
496
+ assert src.authenticate() is True
497
+
498
+ def test_authenticate_failure(self):
499
+ from video_processor.sources.s3_source import S3Source
500
+
501
+ mock_client = MagicMock()
502
+ mock_client.head_bucket.side_effect = Exception("Access Denied")
503
+ mock_boto3 = MagicMock()
504
+ mock_boto3.client.return_value = mock_client
505
+
506
+ with patch.dict("sys.modules", {"boto3": mock_boto3}):
507
+ src = S3Source(bucket="bad-bucket")
508
+ assert src.authenticate() is False
509
+
510
+ def test_list_videos(self):
511
+ from video_processor.sources.s3_source import S3Source
512
+
513
+ mock_client = MagicMock()
514
+ mock_client.head_bucket.return_value = {}
515
+ paginator = MagicMock()
516
+ mock_client.get_paginator.return_value = paginator
517
+ paginator.paginate.return_value = [
518
+ {
519
+ "Contents": [
520
+ {"Key": "videos/clip.mp4", "Size": 5000},
521
+ {"Key": "videos/notes.txt", "Size": 100},
522
+ {"Key": "videos/movie.mkv", "Size": 90000},
523
+ ]
524
+ }
525
+ ]
526
+ mock_boto3 = MagicMock()
527
+ mock_boto3.client.return_value = mock_client
528
+
529
+ with patch.dict("sys.modules", {"boto3": mock_boto3}):
530
+ src = S3Source(bucket="my-bucket")
531
+ src.authenticate()
532
+ files = src.list_videos()
533
+ assert isinstance(files, list)
534
+ # Only .mp4 and .mkv are video extensions
535
+ assert len(files) == 2
536
+ names = [f.name for f in files]
537
+ assert "clip.mp4" in names
538
+ a
--- a/tests/test_sources.py
+++ b/tests/test_sources.py
@@ -0,0 +1,538 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
--- a/tests/test_sources.py
+++ b/tests/test_sources.py
@@ -0,0 +1,538 @@
1 """Tests for all source connectors: import, instantiation, authenticate, list_videos."""
2
3 import os
4 from unittest.mock import MagicMock, patch
5
6 import pytest
7
8 from video_proes.base import BaseSource, SourceFile
9
10 # ---------------------------------------------------------------------------
11 # SourceFile model
12 # ---------------------------------------------------------------------------
13
14
15 def test_source_file_creation():
16 sf = SourceFile(name="test.mp4", id="abc123")
17 assert sf.name == "test.mp4"
18 assert sf.id == "abc123"
19 assert sf.size_bytes is None
20 assert sf.mime_type is None
21
22
23 def test_source_file_with_all_fields():
24 sf = SourceFile(
25 name="video.mp4",
26 id="v1",
27 size_bytes=1024,
28 mime_type="video/mp4",
29 modified_at="2025-01-01",
30 path="folder/video.mp4",
31 )
32 assert sf.size_bytes == 1024
33 assert sf.path == "folder/video.mp4"
34
35
36 # ---------------------------------------------------------------------------
37 # YouTubeSource
38 # ---------------------------------------------------------------------------
39
40
41 class TestYouTubeSource:
42 def test_import(self):
43 from video_processor.sources.youtube_source import YouTubeSource
44
45 assert YouTubeSource is not None
46
47 def test_constructor(self):
48 from video_processor.sources.youtube_source import YouTubeSource
49
50 src = YouTubeSource(url="https://www.youtube.com/watch?v=dQw4w9WgXcQ")
51 assert src.video_id == "dQw4w9WgXcQ"
52 assert src.audio_only is False
53
54 def test_constructor_audio_only(self):
55 from video_processor.sources.youtube_source import YouTubeSource
56
57 src = YouTubeSource(url="https://youtu.be/dQw4w9WgXcQ", audio_only=True)
58 assert src.audio_only is True
59
60 def test_constructor_shorts_url(self):
61 from video_processor.sources.youtube_source import YouTubeSource
62
63 src = YouTubeSource(url="https://youtube.com/shorts/dQw4w9WgXcQ")
64 assert src.video_id == "dQw4w9WgXcQ"
65
66 def test_constructor_invalid_url(self):
67 from video_processor.sources.youtube_source import YouTubeSource
68
69 with pytest.raises(ValueError, match="Could not extract"):
70 YouTubeSource(url="https://example.com/not-youtube")
71
72 @patch.dict(os.environ, {}, clear=False)
73 def test_authenticate_no_ytdlp(self):
74 from video_processor.sources.youtube_source import YouTubeSource
75
76 src = YouTubeSource(url="https://youtube.com/watch?v=dQw4w9WgXcQ")
77 with patch.dict("sys.modules", {"yt_dlp": None}):
78 # yt_dlp import will fail
79 result = src.authenticate()
80 # Result depends on whether yt_dlp is installed; just check it returns bool
81 assert isinstance(result, bool)
82
83 def test_list_videos(self):
84 from video_processor.sources.youtube_source import YouTubeSource
85
86 mock_ydl = MagicMock()
87 mock_ydl.__enter__ = MagicMock(return_value=mock_ydl)
88 mock_ydl.__exit__ = MagicMock(return_value=False)
89 mock_ydl.extract_info.return_value = {
90 "title": "Test Video",
91 "filesize": 1000,
92 }
93 mock_ydl_cls = MagicMock(return_value=mock_ydl)
94 mock_module = MagicMock()
95 mock_module.YoutubeDL = mock_ydl_cls
96
97 with patch.dict("sys.modules", {"yt_dlp": mock_module}):
98 src = YouTubeSource(url="https://youtube.com/watch?v=dQw4w9WgXcQ")
99 files = src.list_videos()
100 assert isinstance(files, list)
101 assert len(files) == 1
102 assert files[0].name == "Test Video"
103
104
105 # ---------------------------------------------------------------------------
106 # WebSource
107 # ---------------------------------------------------------------------------
108
109
110 class TestWebSource:
111 def test_import(self):
112 from video_processor.sources.web_source import WebSource
113
114 assert WebSource is not None
115
116 def test_constructor(self):
117 from video_processor.sources.web_source import WebSource
118
119 src = WebSource(url="https://example.com/page")
120 assert src.url == "https://example.com/page"
121
122 def test_authenticate(self):
123 from video_processor.sources.web_source import WebSource
124
125 src = WebSource(url="https://example.com")
126 assert src.authenticate() is True
127
128 def test_list_videos(self):
129 from video_processor.sources.web_source import WebSource
130
131 src = WebSource(url="https://example.com/article")
132 files = src.list_videos()
133 assert isinstance(files, list)
134 assert len(files) == 1
135 assert files[0].mime_type == "text/html"
136
137
138 # ---------------------------------------------------------------------------
139 # GitHubSource
140 # ---------------------------------------------------------------------------
141
142
143 class TestGitHubSource:
144 def test_import(self):
145 from video_processor.sources.github_source import GitHubSource
146
147 assert GitHubSource is not None
148
149 def test_constructor(self):
150 from video_processor.sources.github_source import GitHubSource
151
152 src = GitHubSource(repo="owner/repo")
153 assert src.repo == "owner/repo"
154 assert src.include_issues is True
155 assert src.include_prs is True
156
157 @patch.dict(os.environ, {"GITHUB_TOKEN": "ghp_test123"})
158 def test_authenticate_with_env_token(self):
159 from video_processor.sources.github_source import GitHubSource
160
161 src = GitHubSource(repo="owner/repo")
162 result = src.authenticate()
163 assert result is True
164 assert src._token == "ghp_test123"
165
166 @patch("requests.get")
167 @patch.dict(os.environ, {"GITHUB_TOKEN": "ghp_test123"})
168 def test_list_videos(self, mock_get):
169 from video_processor.sources.github_source import GitHubSource
170
171 # Mock responses for readme, issues, and PRs
172 readme_resp = MagicMock()
173 readme_resp.ok = True
174
175 issues_resp = MagicMock()
176 issues_resp.ok = True
177 issues_resp.json.return_value = [
178 {"number": 1, "title": "Bug report", "id": 1},
179 {"number": 2, "title": "Feature request", "id": 2, "pull_request": {}},
180 ]
181
182 prs_resp = MagicMock()
183 prs_resp.ok = True
184 prs_resp.json.return_value = [
185 {"number": 3, "title": "Fix bug"},
186 ]
187
188 mock_get.side_effect = [readme_resp, issues_resp, prs_resp]
189
190 src = GitHubSource(repo="owner/repo")
191 src.authenticate()
192 files = src.list_videos()
193 assert isinstance(files, list)
194 # README + 1 issue (one filtered as PR) + 1 PR = 3
195 assert len(files) == 3
196
197
198 # ---------------------------------------------------------------------------
199 # RedditSource
200 # ---------------------------------------------------------------------------
201
202
203 class TestRedditSource:
204 def test_import(self):
205 from video_processor.sources.reddit_source import RedditSource
206
207 assert RedditSource is not None
208
209 def test_constructor(self):
210 from video_processor.sources.reddit_source import RedditSource
211
212 src = RedditSource(url="https://reddit.com/r/python/comments/abc123/test/")
213 assert src.url == "https://reddit.com/r/python/comments/abc123/test"
214
215 def test_authenticate(self):
216 from video_processor.sources.reddit_source import RedditSource
217
218 src = RedditSource(url="https://reddit.com/r/test")
219 assert src.authenticate() is True
220
221 def test_list_videos(self):
222 from video_processor.sources.reddit_source import RedditSource
223
224 src = RedditSource(url="https://reddit.com/r/python/comments/abc/post")
225 files = src.list_videos()
226 assert isinstance(files, list)
227 assert len(files) == 1
228 assert files[0].mime_type == "text/plain"
229
230
231 # ---------------------------------------------------------------------------
232 # HackerNewsSource
233 # ---------------------------------------------------------------------------
234
235
236 class TestHackerNewsSource:
237 def test_import(self):
238 from video_processor.sources.hackernews_source import HackerNewsSource
239
240 assert HackerNewsSource is not None
241
242 def test_constructor(self):
243 from video_processor.sources.hackernews_source import HackerNewsSource
244
245 src = HackerNewsSource(item_id=12345678)
246 assert src.item_id == 12345678
247 assert src.max_comments == 200
248
249 def test_authenticate(self):
250 from video_processor.sources.hackernews_source import HackerNewsSource
251
252 src = HackerNewsSource(item_id=12345678)
253 assert src.authenticate() is True
254
255 def test_list_videos(self):
256 from video_processor.sources.hackernews_source import HackerNewsSource
257
258 src = HackerNewsSource(item_id=99999)
259 files = src.list_videos()
260 assert isinstance(files, list)
261 assert len(files) == 1
262 assert files[0].id == "99999"
263
264
265 # ---------------------------------------------------------------------------
266 # RSSSource
267 # ---------------------------------------------------------------------------
268
269
270 class TestRSSSource:
271 def test_import(self):
272 from video_processor.sources.rss_source import RSSSource
273
274 assert RSSSource is not None
275
276 def test_constructor(self):
277 from video_processor.sources.rss_source import RSSSource
278
279 src = RSSSource(url="https://example.com/feed.xml", max_entries=20)
280 assert src.url == "https://example.com/feed.xml"
281 assert src.max_entries == 20
282
283 def test_authenticate(self):
284 from video_processor.sources.rss_source import RSSSource
285
286 src = RSSSource(url="https://example.com/feed.xml")
287 assert src.authenticate() is True
288
289 @patch("requests.get")
290 def test_list_videos(self, mock_get):
291 from video_processor.sources.rss_source import RSSSource
292
293 rss_xml = """<?xml version="1.0"?>
294 <rss version="2.0">
295 <channel>
296 <item>
297 <title>Entry 1</title>
298 <link>https://example.com/1</link>
299 <description>First entry</description>
300 <pubDate>Mon, 01 Jan 2025 00:00:00 GMT</pubDate>
301 </item>
302 </channel>
303 </rss>"""
304 mock_resp = MagicMock()
305 mock_resp.text = rss_xml
306 mock_resp.raise_for_status = MagicMock()
307 mock_get.return_value = mock_resp
308
309 src = RSSSource(url="https://example.com/feed.xml")
310 files = src.list_videos()
311 assert isinstance(files, list)
312 assert len(files) >= 1
313
314
315 # ---------------------------------------------------------------------------
316 # PodcastSource
317 # ---------------------------------------------------------------------------
318
319
320 class TestPodcastSource:
321 def test_import(self):
322 from video_processor.sources.podcast_source import PodcastSource
323
324 assert PodcastSource is not None
325
326 def test_constructor(self):
327 from video_processor.sources.podcast_source import PodcastSource
328
329 src = PodcastSource(feed_url="https://example.com/podcast.xml", max_episodes=5)
330 assert src.feed_url == "https://example.com/podcast.xml"
331 assert src.max_episodes == 5
332
333 def test_authenticate(self):
334 from video_processor.sources.podcast_source import PodcastSource
335
336 src = PodcastSource(feed_url="https://example.com/podcast.xml")
337 assert src.authenticate() is True
338
339 @patch("requests.get")
340 def test_list_videos(self, mock_get):
341 from video_processor.sources.podcast_source import PodcastSource
342
343 podcast_xml = """<?xml version="1.0"?>
344 <rss version="2.0">
345 <channel>
346 <item>
347 <title>Episode 1</title>
348 <enclosure url="https://example.com/ep1.mp3" type="audio/mpeg" />
349 <pubDate>Mon, 01 Jan 2025 00:00:00 GMT</pubDate>
350 </item>
351 </channel>
352 </rss>"""
353 mock_resp = MagicMock()
354 mock_resp.text = podcast_xml
355 mock_resp.raise_for_status = MagicMock()
356 mock_get.return_value = mock_resp
357
358 src = PodcastSource(feed_url="https://example.com/podcast.xml")
359 files = src.list_videos()
360 assert isinstance(files, list)
361 assert len(files) == 1
362 assert files[0].mime_type == "audio/mpeg"
363
364
365 # ---------------------------------------------------------------------------
366 # TwitterSource
367 # ---------------------------------------------------------------------------
368
369
370 class TestTwitterSource:
371 def test_import(self):
372 from video_processor.sources.twitter_source import TwitterSource
373
374 assert TwitterSource is not None
375
376 def test_constructor(self):
377 from video_processor.sources.twitter_source import TwitterSource
378
379 src = TwitterSource(url="https://twitter.com/user/status/123456")
380 assert src.url == "https://twitter.com/user/status/123456"
381
382 @patch.dict(os.environ, {"TWITTER_BEARER_TOKEN": "test_token"})
383 def test_authenticate_with_bearer_token(self):
384 from video_processor.sources.twitter_source import TwitterSource
385
386 src = TwitterSource(url="https://twitter.com/user/status/123456")
387 assert src.authenticate() is True
388
389 @patch.dict(os.environ, {}, clear=True)
390 def test_authenticate_no_token_no_gallery_dl(self):
391 from video_processor.sources.twitter_source import TwitterSource
392
393 src = TwitterSource(url="https://twitter.com/user/status/123456")
394 with patch.dict("sys.modules", {"gallery_dl": None}):
395 result = src.authenticate()
396 assert isinstance(result, bool)
397
398 def test_list_videos(self):
399 from video_processor.sources.twitter_source import TwitterSource
400
401 src = TwitterSource(url="https://twitter.com/user/status/123456")
402 files = src.list_videos()
403 assert isinstance(files, list)
404 assert len(files) == 1
405
406
407 # ---------------------------------------------------------------------------
408 # ArxivSource
409 # ---------------------------------------------------------------------------
410
411
412 class TestArxivSource:
413 def test_import(self):
414 from video_processor.sources.arxiv_source import ArxivSource
415
416 assert ArxivSource is not None
417
418 def test_constructor(self):
419 from video_processor.sources.arxiv_source import ArxivSource
420
421 src = ArxivSource(url_or_id="2301.07041")
422 assert src.arxiv_id == "2301.07041"
423
424 def test_constructor_from_url(self):
425 from video_processor.sources.arxiv_source import ArxivSource
426
427 src = ArxivSource(url_or_id="https://arxiv.org/abs/2301.07041v2")
428 assert src.arxiv_id == "2301.07041v2"
429
430 def test_constructor_invalid(self):
431 from video_processor.sources.arxiv_source import ArxivSource
432
433 with pytest.raises(ValueError, match="Could not extract"):
434 ArxivSource(url_or_id="not-an-arxiv-id")
435
436 def test_authenticate(self):
437 from video_processor.sources.arxiv_source import ArxivSource
438
439 src = ArxivSource(url_or_id="2301.07041")
440 assert src.authenticate() is True
441
442 @patch("requests.get")
443 def test_list_videos(self, mock_get):
444 from video_processor.sources.arxiv_source import ArxivSource
445
446 atom_xml = """<?xml version="1.0"?>
447 <feed xmlns="http://www.w3.org/2005/Atom"
448 xmlns:arxiv="http://arxiv.org/schemas/atom">
449 <entry>
450 <title>Test Paper</title>
451 <summary>Abstract text here.</summary>
452 <author><name>Author One</name></author>
453 <published>2023-01-15T00:00:00Z</published>
454 </entry>
455 </feed>"""
456 mock_resp = MagicMock()
457 mock_resp.text = atom_xml
458 mock_resp.raise_for_status = MagicMock()
459 mock_get.return_value = mock_resp
460
461 src = ArxivSource(url_or_id="2301.07041")
462 files = src.list_videos()
463 assert isinstance(files, list)
464 assert len(files) == 2 # metadata + pdf
465
466
467 # ---------------------------------------------------------------------------
468 # S3Source
469 # ---------------------------------------------------------------------------
470
471
472 class TestS3Source:
473 def test_import(self):
474 from video_processor.sources.s3_source import S3Source
475
476 assert S3Source is not None
477
478 def test_constructor(self):
479 from video_processor.sources.s3_source import S3Source
480
481 src = S3Source(bucket="my-bucket", prefix="videos/", region="us-east-1")
482 assert src.bucket == "my-bucket"
483 assert src.prefix == "videos/"
484 assert src.region == "us-east-1"
485
486 def test_authenticate_success(self):
487 from video_processor.sources.s3_source import S3Source
488
489 mock_client = MagicMock()
490 mock_client.head_bucket.return_value = {}
491 mock_boto3 = MagicMock()
492 mock_boto3.client.return_value = mock_client
493
494 with patch.dict("sys.modules", {"boto3": mock_boto3}):
495 src = S3Source(bucket="my-bucket")
496 assert src.authenticate() is True
497
498 def test_authenticate_failure(self):
499 from video_processor.sources.s3_source import S3Source
500
501 mock_client = MagicMock()
502 mock_client.head_bucket.side_effect = Exception("Access Denied")
503 mock_boto3 = MagicMock()
504 mock_boto3.client.return_value = mock_client
505
506 with patch.dict("sys.modules", {"boto3": mock_boto3}):
507 src = S3Source(bucket="bad-bucket")
508 assert src.authenticate() is False
509
510 def test_list_videos(self):
511 from video_processor.sources.s3_source import S3Source
512
513 mock_client = MagicMock()
514 mock_client.head_bucket.return_value = {}
515 paginator = MagicMock()
516 mock_client.get_paginator.return_value = paginator
517 paginator.paginate.return_value = [
518 {
519 "Contents": [
520 {"Key": "videos/clip.mp4", "Size": 5000},
521 {"Key": "videos/notes.txt", "Size": 100},
522 {"Key": "videos/movie.mkv", "Size": 90000},
523 ]
524 }
525 ]
526 mock_boto3 = MagicMock()
527 mock_boto3.client.return_value = mock_client
528
529 with patch.dict("sys.modules", {"boto3": mock_boto3}):
530 src = S3Source(bucket="my-bucket")
531 src.authenticate()
532 files = src.list_videos()
533 assert isinstance(files, list)
534 # Only .mp4 and .mkv are video extensions
535 assert len(files) == 2
536 names = [f.name for f in files]
537 assert "clip.mp4" in names
538 a

Keyboard Shortcuts

Open search /
Next entry (timeline) j
Previous entry (timeline) k
Open focused entry Enter
Show this help ?
Toggle theme Top nav button