julee 0.1.4__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. julee/__init__.py +1 -1
  2. julee/api/tests/routers/test_assembly_specifications.py +2 -0
  3. julee/api/tests/routers/test_documents.py +2 -0
  4. julee/api/tests/routers/test_knowledge_service_configs.py +2 -0
  5. julee/api/tests/routers/test_knowledge_service_queries.py +2 -0
  6. julee/api/tests/routers/test_system.py +2 -0
  7. julee/api/tests/routers/test_workflows.py +2 -0
  8. julee/api/tests/test_app.py +2 -0
  9. julee/api/tests/test_dependencies.py +2 -0
  10. julee/api/tests/test_requests.py +2 -0
  11. julee/contrib/polling/__init__.py +22 -19
  12. julee/contrib/polling/apps/__init__.py +17 -0
  13. julee/contrib/polling/apps/worker/__init__.py +17 -0
  14. julee/contrib/polling/apps/worker/pipelines.py +288 -0
  15. julee/contrib/polling/domain/__init__.py +7 -9
  16. julee/contrib/polling/domain/models/__init__.py +6 -7
  17. julee/contrib/polling/domain/models/polling_config.py +18 -1
  18. julee/contrib/polling/domain/services/__init__.py +6 -5
  19. julee/contrib/polling/domain/services/poller.py +1 -1
  20. julee/contrib/polling/infrastructure/__init__.py +9 -8
  21. julee/contrib/polling/infrastructure/services/__init__.py +6 -5
  22. julee/contrib/polling/infrastructure/services/polling/__init__.py +6 -5
  23. julee/contrib/polling/infrastructure/services/polling/http/__init__.py +6 -5
  24. julee/contrib/polling/infrastructure/services/polling/http/http_poller_service.py +5 -2
  25. julee/contrib/polling/infrastructure/temporal/__init__.py +12 -12
  26. julee/contrib/polling/infrastructure/temporal/activities.py +1 -1
  27. julee/contrib/polling/infrastructure/temporal/manager.py +291 -0
  28. julee/contrib/polling/infrastructure/temporal/proxies.py +1 -1
  29. julee/contrib/polling/tests/unit/apps/worker/test_pipelines.py +580 -0
  30. julee/contrib/polling/tests/unit/infrastructure/services/polling/http/test_http_poller_service.py +40 -2
  31. julee/contrib/polling/tests/unit/infrastructure/temporal/__init__.py +7 -0
  32. julee/contrib/polling/tests/unit/infrastructure/temporal/test_manager.py +475 -0
  33. julee/domain/models/assembly/tests/test_assembly.py +2 -0
  34. julee/domain/models/assembly_specification/tests/test_assembly_specification.py +2 -0
  35. julee/domain/models/assembly_specification/tests/test_knowledge_service_query.py +2 -0
  36. julee/domain/models/custom_fields/tests/test_custom_fields.py +2 -0
  37. julee/domain/models/document/tests/test_document.py +2 -0
  38. julee/domain/models/policy/tests/test_document_policy_validation.py +2 -0
  39. julee/domain/models/policy/tests/test_policy.py +2 -0
  40. julee/domain/use_cases/tests/test_extract_assemble_data.py +2 -0
  41. julee/domain/use_cases/tests/test_initialize_system_data.py +2 -0
  42. julee/domain/use_cases/tests/test_validate_document.py +2 -0
  43. julee/maintenance/release.py +10 -5
  44. julee/repositories/memory/tests/test_document.py +2 -0
  45. julee/repositories/memory/tests/test_document_policy_validation.py +2 -0
  46. julee/repositories/memory/tests/test_policy.py +2 -0
  47. julee/repositories/minio/tests/test_assembly.py +2 -0
  48. julee/repositories/minio/tests/test_assembly_specification.py +2 -0
  49. julee/repositories/minio/tests/test_client_protocol.py +3 -0
  50. julee/repositories/minio/tests/test_document.py +2 -0
  51. julee/repositories/minio/tests/test_document_policy_validation.py +2 -0
  52. julee/repositories/minio/tests/test_knowledge_service_config.py +2 -0
  53. julee/repositories/minio/tests/test_knowledge_service_query.py +2 -0
  54. julee/repositories/minio/tests/test_policy.py +2 -0
  55. julee/services/knowledge_service/anthropic/tests/test_knowledge_service.py +2 -0
  56. julee/services/knowledge_service/memory/test_knowledge_service.py +2 -0
  57. julee/services/knowledge_service/test_factory.py +2 -0
  58. julee/util/tests/test_decorators.py +2 -0
  59. julee-0.1.5.dist-info/METADATA +103 -0
  60. {julee-0.1.4.dist-info → julee-0.1.5.dist-info}/RECORD +63 -56
  61. julee-0.1.4.dist-info/METADATA +0 -197
  62. {julee-0.1.4.dist-info → julee-0.1.5.dist-info}/WHEEL +0 -0
  63. {julee-0.1.4.dist-info → julee-0.1.5.dist-info}/licenses/LICENSE +0 -0
  64. {julee-0.1.4.dist-info → julee-0.1.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,580 @@
1
+ """
2
+ Unit tests for polling worker pipelines.
3
+
4
+ This module tests the NewDataDetectionPipeline workflow using Temporal's test
5
+ environment, which provides realistic workflow execution with time-skipping
6
+ capabilities while maintaining fast test performance.
7
+
8
+ The tests mock external dependencies (activities) while testing the actual
9
+ workflow orchestration logic and temporal behaviors.
10
+ """
11
+
12
+ import hashlib
13
+ import uuid
14
+ from datetime import datetime, timezone
15
+ from unittest.mock import AsyncMock, patch
16
+
17
+ import pytest
18
+ from temporalio import activity
19
+ from temporalio.client import WorkflowFailureError
20
+ from temporalio.contrib.pydantic import pydantic_data_converter
21
+ from temporalio.testing import WorkflowEnvironment
22
+ from temporalio.worker import Worker
23
+
24
+ from julee.contrib.polling.apps.worker.pipelines import NewDataDetectionPipeline
25
+ from julee.contrib.polling.domain.models.polling_config import (
26
+ PollingConfig,
27
+ PollingProtocol,
28
+ PollingResult,
29
+ )
30
+
31
+
32
+ @pytest.fixture
33
+ async def workflow_env():
34
+ """Provide a Temporal test environment with time skipping."""
35
+ async with await WorkflowEnvironment.start_time_skipping(
36
+ data_converter=pydantic_data_converter
37
+ ) as env:
38
+ yield env
39
+
40
+
41
+ @pytest.fixture
42
+ def sample_config():
43
+ """Provide a sample PollingConfig for testing."""
44
+ return PollingConfig(
45
+ endpoint_identifier="test-api",
46
+ polling_protocol=PollingProtocol.HTTP,
47
+ connection_params={"url": "https://api.example.com/data"},
48
+ timeout_seconds=30,
49
+ )
50
+
51
+
52
+ @pytest.fixture
53
+ def mock_polling_results():
54
+ """Provide sample polling results for different scenarios."""
55
+ return {
56
+ "first_data": PollingResult(
57
+ success=True,
58
+ content=b"first response data",
59
+ polled_at=datetime.now(timezone.utc),
60
+ content_hash=hashlib.sha256(b"first response data").hexdigest(),
61
+ ),
62
+ "changed_data": PollingResult(
63
+ success=True,
64
+ content=b"changed response data",
65
+ polled_at=datetime.now(timezone.utc),
66
+ content_hash=hashlib.sha256(b"changed response data").hexdigest(),
67
+ ),
68
+ "same_data": PollingResult(
69
+ success=True,
70
+ content=b"first response data", # Same as first_data
71
+ polled_at=datetime.now(timezone.utc),
72
+ content_hash=hashlib.sha256(b"first response data").hexdigest(),
73
+ ),
74
+ "failed_polling": PollingResult(
75
+ success=False,
76
+ content=b"",
77
+ polled_at=datetime.now(timezone.utc),
78
+ error_message="Connection timeout",
79
+ ),
80
+ }
81
+
82
+
83
+ # Mock activity for polling operations - will be patched in tests
84
+ @activity.defn(name="julee.contrib.polling.poll_endpoint")
85
+ async def mock_poll_endpoint(config: PollingConfig) -> PollingResult:
86
+ """Mock polling activity - should be patched in tests."""
87
+ return PollingResult(
88
+ success=True,
89
+ content=b"default mock response",
90
+ polled_at=datetime.now(timezone.utc),
91
+ )
92
+
93
+
94
+ class TestNewDataDetectionPipelineFirstRun:
95
+ """Test first run scenarios (no previous completion)."""
96
+
97
+ @pytest.mark.asyncio
98
+ async def test_first_run_detects_new_data(
99
+ self, workflow_env, sample_config, mock_polling_results
100
+ ):
101
+ """Test first run always detects new data."""
102
+
103
+ # Create a mock activity function that returns the desired response
104
+ @activity.defn(name="julee.contrib.polling.poll_endpoint")
105
+ async def test_mock_activity(config: PollingConfig) -> PollingResult:
106
+ content_str = "first response data"
107
+ return PollingResult(
108
+ success=True,
109
+ content=content_str.encode(),
110
+ polled_at=datetime.now(timezone.utc),
111
+ content_hash=hashlib.sha256(content_str.encode()).hexdigest(),
112
+ )
113
+
114
+ async with Worker(
115
+ workflow_env.client,
116
+ task_queue="test-queue",
117
+ workflows=[NewDataDetectionPipeline],
118
+ activities=[test_mock_activity],
119
+ ):
120
+ # Execute workflow with no previous completion
121
+ result = await workflow_env.client.execute_workflow(
122
+ NewDataDetectionPipeline.run,
123
+ args=[
124
+ sample_config,
125
+ None,
126
+ ], # config, downstream_pipeline
127
+ id=str(uuid.uuid4()),
128
+ task_queue="test-queue",
129
+ )
130
+
131
+ # Verify first run behavior
132
+ assert result["detection_result"]["has_new_data"] is True
133
+ assert result["detection_result"]["previous_hash"] is None
134
+ assert result["downstream_triggered"] is False
135
+ assert result["endpoint_id"] == "test-api"
136
+
137
+ # Verify polling result structure
138
+ polling_result = result["polling_result"]
139
+ assert polling_result["success"] is True
140
+ assert (
141
+ polling_result["content_hash"]
142
+ == hashlib.sha256(b"first response data").hexdigest()
143
+ )
144
+ assert "polled_at" in polling_result
145
+ assert "content_length" in polling_result
146
+
147
+ @pytest.mark.asyncio
148
+ async def test_first_run_with_downstream_pipeline(
149
+ self, workflow_env, sample_config, mock_polling_results
150
+ ):
151
+ """Test first run with downstream pipeline triggering."""
152
+
153
+ # Create a mock activity function that returns the desired response
154
+ @activity.defn(name="julee.contrib.polling.poll_endpoint")
155
+ async def test_mock_activity(config: PollingConfig) -> PollingResult:
156
+ content_bytes = b"first response data"
157
+ return PollingResult(
158
+ success=True,
159
+ content=content_bytes,
160
+ polled_at=datetime.now(timezone.utc),
161
+ content_hash=hashlib.sha256(content_bytes).hexdigest(),
162
+ )
163
+
164
+ # Mock workflow.start_workflow to avoid trying to start actual downstream workflows
165
+ with patch(
166
+ "julee.contrib.polling.apps.worker.pipelines.workflow.start_child_workflow",
167
+ new_callable=AsyncMock,
168
+ ) as mock_start:
169
+ async with Worker(
170
+ workflow_env.client,
171
+ task_queue="test-queue",
172
+ workflows=[NewDataDetectionPipeline],
173
+ activities=[test_mock_activity],
174
+ ):
175
+ result = await workflow_env.client.execute_workflow(
176
+ NewDataDetectionPipeline.run,
177
+ args=[
178
+ sample_config,
179
+ "TestDownstreamWorkflow",
180
+ ], # config, downstream_pipeline
181
+ id=str(uuid.uuid4()),
182
+ task_queue="test-queue",
183
+ )
184
+
185
+ # Verify downstream was triggered
186
+ assert result["downstream_triggered"] is True
187
+ mock_start.assert_called_once()
188
+
189
+ # Verify downstream workflow call parameters
190
+ call_args = mock_start.call_args
191
+ # For start_child_workflow, the workflow name is the first positional arg
192
+ assert call_args[0][0] == "TestDownstreamWorkflow" # Workflow name
193
+ # The args parameter is passed as a keyword argument
194
+ assert call_args[1]["args"] == [
195
+ None,
196
+ b"first response data",
197
+ ] # Args: previous_data, new_data
198
+ assert (
199
+ "downstream-test-api-" in call_args[1]["id"]
200
+ ) # Workflow ID contains endpoint
201
+ assert call_args[1]["task_queue"] == "downstream-processing-queue"
202
+
203
+
204
+ class TestNewDataDetectionPipelineSubsequentRuns:
205
+ """Test subsequent runs with previous completion data."""
206
+
207
+ @pytest.mark.asyncio
208
+ async def test_no_changes_detected(
209
+ self, workflow_env, sample_config, mock_polling_results
210
+ ):
211
+ """Test when content hasn't changed since last run."""
212
+
213
+ # Create a mock activity function that returns the desired response
214
+ @activity.defn(name="julee.contrib.polling.poll_endpoint")
215
+ async def test_mock_activity(config: PollingConfig) -> PollingResult:
216
+ content_bytes = b"first response data" # Same as first_data
217
+ return PollingResult(
218
+ success=True,
219
+ content=content_bytes,
220
+ polled_at=datetime.now(timezone.utc),
221
+ content_hash=hashlib.sha256(content_bytes).hexdigest(),
222
+ )
223
+
224
+ # Mock workflow.get_last_completion_result to return previous completion
225
+ previous_completion = {
226
+ "polling_result": {
227
+ "content_hash": hashlib.sha256(b"first response data").hexdigest(),
228
+ "content": "first response data",
229
+ "success": True,
230
+ },
231
+ "detection_result": {
232
+ "has_new_data": True,
233
+ "previous_hash": None,
234
+ "current_hash": hashlib.sha256(b"first response data").hexdigest(),
235
+ },
236
+ "downstream_triggered": False,
237
+ "endpoint_id": "test-api",
238
+ "completed_at": "2023-01-01T00:00:00Z",
239
+ }
240
+
241
+ async with Worker(
242
+ workflow_env.client,
243
+ task_queue="test-queue",
244
+ workflows=[NewDataDetectionPipeline],
245
+ activities=[test_mock_activity],
246
+ ):
247
+ # Use mock to simulate last completion result
248
+ with patch(
249
+ "temporalio.workflow.get_last_completion_result"
250
+ ) as mock_get_last:
251
+ mock_get_last.return_value = previous_completion
252
+
253
+ result = await workflow_env.client.execute_workflow(
254
+ NewDataDetectionPipeline.run,
255
+ args=[
256
+ sample_config,
257
+ None,
258
+ ], # config, downstream_pipeline
259
+ id=str(uuid.uuid4()),
260
+ task_queue="test-queue",
261
+ )
262
+
263
+ # Verify no changes detected
264
+ assert result["detection_result"]["has_new_data"] is False
265
+ assert result["downstream_triggered"] is False
266
+ assert result["detection_result"]["previous_hash"] is not None
267
+
268
+ @pytest.mark.asyncio
269
+ async def test_changes_detected(
270
+ self, workflow_env, sample_config, mock_polling_results
271
+ ):
272
+ """Test when content has changed since last run."""
273
+
274
+ # Create a mock activity function that returns the desired response
275
+ @activity.defn(name="julee.contrib.polling.poll_endpoint")
276
+ async def test_mock_activity(config: PollingConfig) -> PollingResult:
277
+ content_bytes = b"changed response data"
278
+ return PollingResult(
279
+ success=True,
280
+ content=content_bytes,
281
+ polled_at=datetime.now(timezone.utc),
282
+ content_hash=hashlib.sha256(content_bytes).hexdigest(),
283
+ )
284
+
285
+ # Mock workflow.get_last_completion_result to return previous completion with different hash
286
+ previous_completion = {
287
+ "polling_result": {
288
+ "content_hash": hashlib.sha256(b"first response data").hexdigest(),
289
+ "content": "first response data",
290
+ "success": True,
291
+ },
292
+ "detection_result": {
293
+ "has_new_data": True,
294
+ "previous_hash": None,
295
+ "current_hash": hashlib.sha256(b"first response data").hexdigest(),
296
+ },
297
+ "downstream_triggered": False,
298
+ "endpoint_id": "test-api",
299
+ "completed_at": "2023-01-01T00:00:00Z",
300
+ }
301
+
302
+ with patch(
303
+ "julee.contrib.polling.apps.worker.pipelines.workflow.start_child_workflow",
304
+ new_callable=AsyncMock,
305
+ ) as mock_start:
306
+ async with Worker(
307
+ workflow_env.client,
308
+ task_queue="test-queue",
309
+ workflows=[NewDataDetectionPipeline],
310
+ activities=[test_mock_activity],
311
+ ):
312
+ # Use mock to simulate last completion result
313
+ with patch(
314
+ "temporalio.workflow.get_last_completion_result"
315
+ ) as mock_get_last:
316
+ mock_get_last.return_value = previous_completion
317
+
318
+ result = await workflow_env.client.execute_workflow(
319
+ NewDataDetectionPipeline.run,
320
+ args=[
321
+ sample_config,
322
+ "TestDownstreamWorkflow",
323
+ ], # config, downstream_pipeline
324
+ id=str(uuid.uuid4()),
325
+ task_queue="test-queue",
326
+ )
327
+
328
+ # Verify changes detected and downstream triggered
329
+ assert result["detection_result"]["has_new_data"] is True
330
+ assert result["downstream_triggered"] is True
331
+ assert (
332
+ result["detection_result"]["current_hash"]
333
+ != result["detection_result"]["previous_hash"]
334
+ )
335
+ mock_start.assert_called_once()
336
+
337
+
338
+ class TestNewDataDetectionPipelineWorkflowQueries:
339
+ """Test workflow query methods during execution."""
340
+
341
+ @pytest.mark.asyncio
342
+ async def test_workflow_queries(
343
+ self, workflow_env, sample_config, mock_polling_results
344
+ ):
345
+ """Test that workflow queries return correct state information."""
346
+
347
+ # Create a slow mock activity to allow time for queries
348
+ @activity.defn(name="julee.contrib.polling.poll_endpoint")
349
+ async def test_mock_activity(config: PollingConfig) -> PollingResult:
350
+ await workflow_env.sleep(1) # Add delay to allow queries
351
+ content_bytes = b"first response data"
352
+ return PollingResult(
353
+ success=True,
354
+ content=content_bytes,
355
+ polled_at=datetime.now(timezone.utc),
356
+ content_hash=hashlib.sha256(content_bytes).hexdigest(),
357
+ )
358
+
359
+ async with Worker(
360
+ workflow_env.client,
361
+ task_queue="test-queue",
362
+ workflows=[NewDataDetectionPipeline],
363
+ activities=[test_mock_activity],
364
+ ):
365
+ # Start workflow
366
+ handle = await workflow_env.client.start_workflow(
367
+ NewDataDetectionPipeline.run,
368
+ args=[
369
+ sample_config,
370
+ None,
371
+ ], # config, downstream_pipeline
372
+ id=str(uuid.uuid4()),
373
+ task_queue="test-queue",
374
+ )
375
+
376
+ # Query initial state
377
+ current_step = await handle.query(NewDataDetectionPipeline.get_current_step)
378
+ endpoint_id = await handle.query(NewDataDetectionPipeline.get_endpoint_id)
379
+ has_new_data = await handle.query(NewDataDetectionPipeline.get_has_new_data)
380
+
381
+ # Verify initial query responses
382
+ assert current_step in [
383
+ "initialized",
384
+ "polling_endpoint",
385
+ "detecting_changes",
386
+ "completed",
387
+ ]
388
+ assert endpoint_id == "test-api"
389
+ assert isinstance(has_new_data, bool)
390
+
391
+ # Wait for completion
392
+ await handle.result()
393
+
394
+ # Query final state
395
+ final_step = await handle.query(NewDataDetectionPipeline.get_current_step)
396
+ final_has_new_data = await handle.query(
397
+ NewDataDetectionPipeline.get_has_new_data
398
+ )
399
+
400
+ assert final_step == "completed"
401
+ assert final_has_new_data is True # First run should detect new data
402
+
403
+
404
+ class TestNewDataDetectionPipelineErrorHandling:
405
+ """Test error handling and failure scenarios."""
406
+
407
+ @pytest.mark.asyncio
408
+ async def test_polling_activity_failure(
409
+ self, workflow_env, sample_config, mock_polling_results
410
+ ):
411
+ """Test workflow behavior when polling activity fails."""
412
+
413
+ # Create a failing mock activity
414
+ @activity.defn(name="julee.contrib.polling.poll_endpoint")
415
+ async def test_mock_activity(config: PollingConfig) -> PollingResult:
416
+ raise RuntimeError("Polling failed")
417
+
418
+ async with Worker(
419
+ workflow_env.client,
420
+ task_queue="test-queue",
421
+ workflows=[NewDataDetectionPipeline],
422
+ activities=[test_mock_activity],
423
+ ):
424
+ # Workflow should fail and re-raise the exception
425
+ with pytest.raises(WorkflowFailureError):
426
+ await workflow_env.client.execute_workflow(
427
+ NewDataDetectionPipeline.run,
428
+ args=[
429
+ sample_config,
430
+ None,
431
+ ], # config, downstream_pipeline
432
+ id=str(uuid.uuid4()),
433
+ task_queue="test-queue",
434
+ )
435
+
436
+ @pytest.mark.asyncio
437
+ async def test_downstream_trigger_failure_doesnt_fail_workflow(
438
+ self, workflow_env, sample_config, mock_polling_results
439
+ ):
440
+ """Test that downstream pipeline failures don't fail the main workflow."""
441
+
442
+ # Create a mock activity function that returns the desired response
443
+ @activity.defn(name="julee.contrib.polling.poll_endpoint")
444
+ async def test_mock_activity(config: PollingConfig) -> PollingResult:
445
+ content_bytes = b"first response data"
446
+ return PollingResult(
447
+ success=True,
448
+ content=content_bytes,
449
+ polled_at=datetime.now(timezone.utc),
450
+ content_hash=hashlib.sha256(content_bytes).hexdigest(),
451
+ )
452
+
453
+ # Mock workflow.start_workflow to raise an exception
454
+ with patch(
455
+ "julee.contrib.polling.apps.worker.pipelines.workflow.start_child_workflow",
456
+ side_effect=RuntimeError("Downstream failed"),
457
+ ):
458
+ async with Worker(
459
+ workflow_env.client,
460
+ task_queue="test-queue",
461
+ workflows=[NewDataDetectionPipeline],
462
+ activities=[test_mock_activity],
463
+ ):
464
+ # Workflow should complete successfully despite downstream failure
465
+ result = await workflow_env.client.execute_workflow(
466
+ NewDataDetectionPipeline.run,
467
+ args=[
468
+ sample_config,
469
+ "TestDownstreamWorkflow",
470
+ None,
471
+ ], # config, downstream_pipeline, previous_completion
472
+ id=str(uuid.uuid4()),
473
+ task_queue="test-queue",
474
+ )
475
+
476
+ # Verify workflow completed but downstream triggering failed
477
+ assert result["detection_result"]["has_new_data"] is True
478
+ assert (
479
+ result["downstream_triggered"] is False
480
+ ) # Should be False due to failure
481
+
482
+
483
+ class TestNewDataDetectionPipelineIntegration:
484
+ """Integration tests for complete workflow scenarios."""
485
+
486
+ @pytest.mark.asyncio
487
+ async def test_complete_polling_cycle(
488
+ self, workflow_env, sample_config, mock_polling_results
489
+ ):
490
+ """Test a complete polling cycle: first run -> no changes -> changes detected."""
491
+ responses = [
492
+ mock_polling_results["first_data"],
493
+ mock_polling_results["same_data"],
494
+ mock_polling_results["changed_data"],
495
+ ]
496
+ response_index = 0
497
+
498
+ # Create a cycling mock activity that returns different responses
499
+ @activity.defn(name="julee.contrib.polling.poll_endpoint")
500
+ async def test_mock_activity(config: PollingConfig) -> PollingResult:
501
+ nonlocal response_index
502
+ if response_index == 0:
503
+ content_bytes = b"first response data"
504
+ elif response_index == 1:
505
+ content_bytes = b"first response data" # Same as first
506
+ else:
507
+ content_bytes = b"changed response data"
508
+
509
+ result = PollingResult(
510
+ success=True,
511
+ content=content_bytes,
512
+ polled_at=datetime.now(timezone.utc),
513
+ content_hash=hashlib.sha256(content_bytes).hexdigest(),
514
+ )
515
+ response_index = min(response_index + 1, len(responses) - 1)
516
+ return result
517
+
518
+ with patch(
519
+ "julee.contrib.polling.apps.worker.pipelines.workflow.start_child_workflow",
520
+ new_callable=AsyncMock,
521
+ ) as mock_start:
522
+ async with Worker(
523
+ workflow_env.client,
524
+ task_queue="test-queue",
525
+ workflows=[NewDataDetectionPipeline],
526
+ activities=[test_mock_activity],
527
+ ):
528
+ # Workflow should complete successfully despite downstream failure
529
+ # First run - should detect new data (no previous completion)
530
+ result1 = await workflow_env.client.execute_workflow(
531
+ NewDataDetectionPipeline.run,
532
+ args=[
533
+ sample_config,
534
+ "TestDownstreamWorkflow",
535
+ ], # config, downstream_pipeline
536
+ id=str(uuid.uuid4()),
537
+ task_queue="test-queue",
538
+ )
539
+
540
+ assert result1["detection_result"]["has_new_data"] is True
541
+ assert result1["downstream_triggered"] is True
542
+
543
+ # Second run - same content, no changes
544
+ with patch(
545
+ "temporalio.workflow.get_last_completion_result"
546
+ ) as mock_get_last:
547
+ mock_get_last.return_value = result1
548
+ result2 = await workflow_env.client.execute_workflow(
549
+ NewDataDetectionPipeline.run,
550
+ args=[
551
+ sample_config,
552
+ "TestDownstreamWorkflow",
553
+ ], # config, downstream_pipeline
554
+ id=str(uuid.uuid4()),
555
+ task_queue="test-queue",
556
+ )
557
+
558
+ assert result2["detection_result"]["has_new_data"] is False
559
+ assert result2["downstream_triggered"] is False
560
+
561
+ # Third run - changed content, should detect changes
562
+ with patch(
563
+ "temporalio.workflow.get_last_completion_result"
564
+ ) as mock_get_last:
565
+ mock_get_last.return_value = result2
566
+ result3 = await workflow_env.client.execute_workflow(
567
+ NewDataDetectionPipeline.run,
568
+ args=[
569
+ sample_config,
570
+ "TestDownstreamWorkflow",
571
+ ], # config, downstream_pipeline
572
+ id=str(uuid.uuid4()),
573
+ task_queue="test-queue",
574
+ )
575
+
576
+ assert result3["detection_result"]["has_new_data"] is True
577
+ assert result3["downstream_triggered"] is True
578
+
579
+ # Verify downstream was called twice (run 1 and run 3)
580
+ assert mock_start.call_count == 2
@@ -11,11 +11,16 @@ import hashlib
11
11
  import httpx
12
12
  import pytest
13
13
 
14
- from julee.contrib.polling.domain.models import PollingConfig, PollingProtocol
15
- from julee.contrib.polling.infrastructure.services.polling.http import (
14
+ from julee.contrib.polling.domain.models.polling_config import (
15
+ PollingConfig,
16
+ PollingProtocol,
17
+ )
18
+ from julee.contrib.polling.infrastructure.services.polling.http.http_poller_service import (
16
19
  HttpPollerService,
17
20
  )
18
21
 
22
+ pytestmark = pytest.mark.unit
23
+
19
24
 
20
25
  class TestHttpPollerServicePollEndpoint:
21
26
  """Test the poll_endpoint method of HttpPollerService."""
@@ -161,3 +166,36 @@ class TestHttpPollerServicePollEndpoint:
161
166
  assert str(captured_request.url) == "https://api.example.com/data"
162
167
  assert captured_request.method == "POST"
163
168
  assert captured_request.headers["Authorization"] == "Bearer token123"
169
+
170
+ @pytest.mark.asyncio
171
+ async def test_poll_endpoint_with_dict_config(self):
172
+ """Test that poll_endpoint works with dict config (for schedule compatibility)."""
173
+
174
+ def handler(request):
175
+ return httpx.Response(status_code=200, content=b"dict config test")
176
+
177
+ mock_transport = httpx.MockTransport(handler)
178
+
179
+ async with HttpPollerService() as service:
180
+ service.client = httpx.AsyncClient(transport=mock_transport)
181
+
182
+ # Create a dict that represents a serialized PollingConfig (as from Temporal schedule)
183
+ config_dict = {
184
+ "endpoint_identifier": "test-api-dict",
185
+ "polling_protocol": "http",
186
+ "connection_params": {
187
+ "url": "https://api.example.com/scheduled",
188
+ "headers": {"X-Source": "schedule"},
189
+ },
190
+ "polling_params": {},
191
+ "timeout_seconds": 30,
192
+ }
193
+
194
+ # Convert dict to PollingConfig (simulating what the workflow does)
195
+ config = PollingConfig.model_validate(config_dict)
196
+
197
+ result = await service.poll_endpoint(config)
198
+
199
+ assert result.success is True
200
+ assert result.content == b"dict config test"
201
+ assert result.metadata["status_code"] == 200
@@ -0,0 +1,7 @@
1
+ """
2
+ Temporal infrastructure tests for the polling contrib module.
3
+
4
+ This module contains unit tests for the temporal-specific infrastructure
5
+ implementations of the polling contrib module, including polling managers,
6
+ workflow proxies, and activity implementations.
7
+ """