pyworkflow-engine 0.1.7__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (145) hide show
  1. pyworkflow/__init__.py +10 -1
  2. pyworkflow/celery/tasks.py +272 -24
  3. pyworkflow/cli/__init__.py +4 -1
  4. pyworkflow/cli/commands/runs.py +4 -4
  5. pyworkflow/cli/commands/setup.py +203 -4
  6. pyworkflow/cli/utils/config_generator.py +76 -3
  7. pyworkflow/cli/utils/docker_manager.py +232 -0
  8. pyworkflow/context/__init__.py +13 -0
  9. pyworkflow/context/base.py +26 -0
  10. pyworkflow/context/local.py +80 -0
  11. pyworkflow/context/step_context.py +295 -0
  12. pyworkflow/core/registry.py +6 -1
  13. pyworkflow/core/step.py +141 -0
  14. pyworkflow/core/workflow.py +56 -0
  15. pyworkflow/engine/events.py +30 -0
  16. pyworkflow/engine/replay.py +39 -0
  17. pyworkflow/primitives/child_workflow.py +1 -1
  18. pyworkflow/runtime/local.py +1 -1
  19. pyworkflow/storage/__init__.py +14 -0
  20. pyworkflow/storage/base.py +35 -0
  21. pyworkflow/storage/cassandra.py +1747 -0
  22. pyworkflow/storage/config.py +69 -0
  23. pyworkflow/storage/dynamodb.py +31 -2
  24. pyworkflow/storage/file.py +28 -0
  25. pyworkflow/storage/memory.py +18 -0
  26. pyworkflow/storage/mysql.py +1159 -0
  27. pyworkflow/storage/postgres.py +27 -2
  28. pyworkflow/storage/schemas.py +4 -3
  29. pyworkflow/storage/sqlite.py +25 -2
  30. {pyworkflow_engine-0.1.7.dist-info → pyworkflow_engine-0.1.9.dist-info}/METADATA +7 -4
  31. pyworkflow_engine-0.1.9.dist-info/RECORD +91 -0
  32. pyworkflow_engine-0.1.9.dist-info/top_level.txt +1 -0
  33. dashboard/backend/app/__init__.py +0 -1
  34. dashboard/backend/app/config.py +0 -32
  35. dashboard/backend/app/controllers/__init__.py +0 -6
  36. dashboard/backend/app/controllers/run_controller.py +0 -86
  37. dashboard/backend/app/controllers/workflow_controller.py +0 -33
  38. dashboard/backend/app/dependencies/__init__.py +0 -5
  39. dashboard/backend/app/dependencies/storage.py +0 -50
  40. dashboard/backend/app/repositories/__init__.py +0 -6
  41. dashboard/backend/app/repositories/run_repository.py +0 -80
  42. dashboard/backend/app/repositories/workflow_repository.py +0 -27
  43. dashboard/backend/app/rest/__init__.py +0 -8
  44. dashboard/backend/app/rest/v1/__init__.py +0 -12
  45. dashboard/backend/app/rest/v1/health.py +0 -33
  46. dashboard/backend/app/rest/v1/runs.py +0 -133
  47. dashboard/backend/app/rest/v1/workflows.py +0 -41
  48. dashboard/backend/app/schemas/__init__.py +0 -23
  49. dashboard/backend/app/schemas/common.py +0 -16
  50. dashboard/backend/app/schemas/event.py +0 -24
  51. dashboard/backend/app/schemas/hook.py +0 -25
  52. dashboard/backend/app/schemas/run.py +0 -54
  53. dashboard/backend/app/schemas/step.py +0 -28
  54. dashboard/backend/app/schemas/workflow.py +0 -31
  55. dashboard/backend/app/server.py +0 -87
  56. dashboard/backend/app/services/__init__.py +0 -6
  57. dashboard/backend/app/services/run_service.py +0 -240
  58. dashboard/backend/app/services/workflow_service.py +0 -155
  59. dashboard/backend/main.py +0 -18
  60. docs/concepts/cancellation.mdx +0 -362
  61. docs/concepts/continue-as-new.mdx +0 -434
  62. docs/concepts/events.mdx +0 -266
  63. docs/concepts/fault-tolerance.mdx +0 -370
  64. docs/concepts/hooks.mdx +0 -552
  65. docs/concepts/limitations.mdx +0 -167
  66. docs/concepts/schedules.mdx +0 -775
  67. docs/concepts/sleep.mdx +0 -312
  68. docs/concepts/steps.mdx +0 -301
  69. docs/concepts/workflows.mdx +0 -255
  70. docs/guides/cli.mdx +0 -942
  71. docs/guides/configuration.mdx +0 -560
  72. docs/introduction.mdx +0 -155
  73. docs/quickstart.mdx +0 -279
  74. examples/__init__.py +0 -1
  75. examples/celery/__init__.py +0 -1
  76. examples/celery/durable/docker-compose.yml +0 -55
  77. examples/celery/durable/pyworkflow.config.yaml +0 -12
  78. examples/celery/durable/workflows/__init__.py +0 -122
  79. examples/celery/durable/workflows/basic.py +0 -87
  80. examples/celery/durable/workflows/batch_processing.py +0 -102
  81. examples/celery/durable/workflows/cancellation.py +0 -273
  82. examples/celery/durable/workflows/child_workflow_patterns.py +0 -240
  83. examples/celery/durable/workflows/child_workflows.py +0 -202
  84. examples/celery/durable/workflows/continue_as_new.py +0 -260
  85. examples/celery/durable/workflows/fault_tolerance.py +0 -210
  86. examples/celery/durable/workflows/hooks.py +0 -211
  87. examples/celery/durable/workflows/idempotency.py +0 -112
  88. examples/celery/durable/workflows/long_running.py +0 -99
  89. examples/celery/durable/workflows/retries.py +0 -101
  90. examples/celery/durable/workflows/schedules.py +0 -209
  91. examples/celery/transient/01_basic_workflow.py +0 -91
  92. examples/celery/transient/02_fault_tolerance.py +0 -257
  93. examples/celery/transient/__init__.py +0 -20
  94. examples/celery/transient/pyworkflow.config.yaml +0 -25
  95. examples/local/__init__.py +0 -1
  96. examples/local/durable/01_basic_workflow.py +0 -94
  97. examples/local/durable/02_file_storage.py +0 -132
  98. examples/local/durable/03_retries.py +0 -169
  99. examples/local/durable/04_long_running.py +0 -119
  100. examples/local/durable/05_event_log.py +0 -145
  101. examples/local/durable/06_idempotency.py +0 -148
  102. examples/local/durable/07_hooks.py +0 -334
  103. examples/local/durable/08_cancellation.py +0 -233
  104. examples/local/durable/09_child_workflows.py +0 -198
  105. examples/local/durable/10_child_workflow_patterns.py +0 -265
  106. examples/local/durable/11_continue_as_new.py +0 -249
  107. examples/local/durable/12_schedules.py +0 -198
  108. examples/local/durable/__init__.py +0 -1
  109. examples/local/transient/01_quick_tasks.py +0 -87
  110. examples/local/transient/02_retries.py +0 -130
  111. examples/local/transient/03_sleep.py +0 -141
  112. examples/local/transient/__init__.py +0 -1
  113. pyworkflow_engine-0.1.7.dist-info/RECORD +0 -196
  114. pyworkflow_engine-0.1.7.dist-info/top_level.txt +0 -5
  115. tests/examples/__init__.py +0 -0
  116. tests/integration/__init__.py +0 -0
  117. tests/integration/test_cancellation.py +0 -330
  118. tests/integration/test_child_workflows.py +0 -439
  119. tests/integration/test_continue_as_new.py +0 -428
  120. tests/integration/test_dynamodb_storage.py +0 -1146
  121. tests/integration/test_fault_tolerance.py +0 -369
  122. tests/integration/test_schedule_storage.py +0 -484
  123. tests/unit/__init__.py +0 -0
  124. tests/unit/backends/__init__.py +0 -1
  125. tests/unit/backends/test_dynamodb_storage.py +0 -1554
  126. tests/unit/backends/test_postgres_storage.py +0 -1281
  127. tests/unit/backends/test_sqlite_storage.py +0 -1460
  128. tests/unit/conftest.py +0 -41
  129. tests/unit/test_cancellation.py +0 -364
  130. tests/unit/test_child_workflows.py +0 -680
  131. tests/unit/test_continue_as_new.py +0 -441
  132. tests/unit/test_event_limits.py +0 -316
  133. tests/unit/test_executor.py +0 -320
  134. tests/unit/test_fault_tolerance.py +0 -334
  135. tests/unit/test_hooks.py +0 -495
  136. tests/unit/test_registry.py +0 -261
  137. tests/unit/test_replay.py +0 -420
  138. tests/unit/test_schedule_schemas.py +0 -285
  139. tests/unit/test_schedule_utils.py +0 -286
  140. tests/unit/test_scheduled_workflow.py +0 -274
  141. tests/unit/test_step.py +0 -353
  142. tests/unit/test_workflow.py +0 -243
  143. {pyworkflow_engine-0.1.7.dist-info → pyworkflow_engine-0.1.9.dist-info}/WHEEL +0 -0
  144. {pyworkflow_engine-0.1.7.dist-info → pyworkflow_engine-0.1.9.dist-info}/entry_points.txt +0 -0
  145. {pyworkflow_engine-0.1.7.dist-info → pyworkflow_engine-0.1.9.dist-info}/licenses/LICENSE +0 -0
@@ -1,316 +0,0 @@
1
- """Unit tests for event limit validation."""
2
-
3
- import warnings
4
-
5
- import pytest
6
- from loguru import logger
7
-
8
- from pyworkflow.config import configure, get_config, reset_config
9
- from pyworkflow.context import LocalContext, set_context
10
- from pyworkflow.core.exceptions import EventLimitExceededError
11
- from pyworkflow.engine.events import create_step_completed_event
12
- from pyworkflow.storage.memory import InMemoryStorageBackend
13
-
14
-
15
- @pytest.fixture
16
- def capture_logs():
17
- """Fixture to capture loguru logs for testing."""
18
- captured = []
19
-
20
- def sink(message):
21
- captured.append(str(message))
22
-
23
- handler_id = logger.add(sink, format="{message}", level="WARNING")
24
- yield captured
25
- logger.remove(handler_id)
26
-
27
-
28
- class TestEventLimitValidation:
29
- """Test event limit validation."""
30
-
31
- @pytest.fixture(autouse=True)
32
- def reset_config_fixture(self):
33
- """Reset config before and after each test."""
34
- reset_config()
35
- yield
36
- reset_config()
37
-
38
- @pytest.mark.asyncio
39
- async def test_hard_limit_raises_error(self):
40
- """Test that exceeding hard limit raises EventLimitExceededError."""
41
- # Configure with low limits for testing
42
- configure(event_hard_limit=10)
43
-
44
- storage = InMemoryStorageBackend()
45
- ctx = LocalContext(
46
- run_id="test_run",
47
- workflow_name="test_workflow",
48
- storage=storage,
49
- durable=True,
50
- )
51
-
52
- # Add events to reach hard limit
53
- for i in range(10):
54
- event = create_step_completed_event(
55
- run_id="test_run", step_id=f"step_{i}", result="test", step_name="test_step"
56
- )
57
- await storage.record_event(event)
58
-
59
- # Validation should raise
60
- with pytest.raises(EventLimitExceededError) as exc_info:
61
- await ctx.validate_event_limits()
62
-
63
- assert exc_info.value.event_count == 10
64
- assert exc_info.value.limit == 10
65
- assert exc_info.value.run_id == "test_run"
66
-
67
- @pytest.mark.asyncio
68
- async def test_hard_limit_message(self):
69
- """Test that EventLimitExceededError has correct message."""
70
- configure(event_hard_limit=5)
71
-
72
- storage = InMemoryStorageBackend()
73
- ctx = LocalContext(
74
- run_id="my_run",
75
- workflow_name="test_workflow",
76
- storage=storage,
77
- durable=True,
78
- )
79
-
80
- # Add events to reach hard limit
81
- for i in range(5):
82
- event = create_step_completed_event(
83
- run_id="my_run", step_id=f"step_{i}", result="test", step_name="test_step"
84
- )
85
- await storage.record_event(event)
86
-
87
- with pytest.raises(EventLimitExceededError) as exc_info:
88
- await ctx.validate_event_limits()
89
-
90
- assert "my_run" in str(exc_info.value)
91
- assert "5 >= 5" in str(exc_info.value)
92
-
93
- @pytest.mark.asyncio
94
- async def test_soft_limit_logs_warning(self, capture_logs):
95
- """Test that reaching soft limit logs warning."""
96
- configure(event_soft_limit=5, event_hard_limit=100)
97
-
98
- storage = InMemoryStorageBackend()
99
- ctx = LocalContext(
100
- run_id="test_run",
101
- workflow_name="test_workflow",
102
- storage=storage,
103
- durable=True,
104
- )
105
-
106
- # Add events to reach soft limit
107
- for i in range(5):
108
- event = create_step_completed_event(
109
- run_id="test_run", step_id=f"step_{i}", result="test", step_name="test_step"
110
- )
111
- await storage.record_event(event)
112
-
113
- # Validation should log warning
114
- await ctx.validate_event_limits()
115
-
116
- # Check captured logs
117
- log_text = "\n".join(capture_logs)
118
- assert "approaching event limit" in log_text
119
- assert "5/100" in log_text
120
-
121
- @pytest.mark.asyncio
122
- async def test_below_soft_limit_no_warning(self, capture_logs):
123
- """Test that below soft limit does not log warning."""
124
- configure(event_soft_limit=10, event_hard_limit=100)
125
-
126
- storage = InMemoryStorageBackend()
127
- ctx = LocalContext(
128
- run_id="test_run",
129
- workflow_name="test_workflow",
130
- storage=storage,
131
- durable=True,
132
- )
133
-
134
- # Add events below soft limit
135
- for i in range(5):
136
- event = create_step_completed_event(
137
- run_id="test_run", step_id=f"step_{i}", result="test", step_name="test_step"
138
- )
139
- await storage.record_event(event)
140
-
141
- # Validation should not log warning
142
- await ctx.validate_event_limits()
143
-
144
- # Check captured logs
145
- log_text = "\n".join(capture_logs)
146
- assert "approaching event limit" not in log_text
147
-
148
- @pytest.mark.asyncio
149
- async def test_warning_interval(self, capture_logs):
150
- """Test warnings are logged every N events after soft limit."""
151
- configure(event_soft_limit=5, event_hard_limit=100, event_warning_interval=3)
152
-
153
- storage = InMemoryStorageBackend()
154
- ctx = LocalContext(
155
- run_id="test_run",
156
- workflow_name="test_workflow",
157
- storage=storage,
158
- durable=True,
159
- )
160
- set_context(ctx)
161
-
162
- try:
163
- # Add 5 events (soft limit)
164
- for i in range(5):
165
- event = create_step_completed_event(
166
- run_id="test_run", step_id=f"step_{i}", result="test", step_name="test_step"
167
- )
168
- await storage.record_event(event)
169
-
170
- # First validation - should warn (at 5)
171
- await ctx.validate_event_limits()
172
- log_text = "\n".join(capture_logs)
173
- assert "approaching event limit" in log_text
174
- assert "5/100" in log_text
175
-
176
- # Clear captured logs
177
- capture_logs.clear()
178
-
179
- # Add 2 more events (not at interval yet - 7 events total)
180
- for i in range(5, 7):
181
- event = create_step_completed_event(
182
- run_id="test_run", step_id=f"step_{i}", result="test", step_name="test_step"
183
- )
184
- await storage.record_event(event)
185
-
186
- # Validation should NOT warn (7 < 5 + 3 = 8)
187
- await ctx.validate_event_limits()
188
- log_text = "\n".join(capture_logs)
189
- assert "approaching event limit" not in log_text
190
-
191
- # Add 1 more (now at 8 events, should warn because 8 >= 5 + 3)
192
- event = create_step_completed_event(
193
- run_id="test_run", step_id="step_7", result="test", step_name="test_step"
194
- )
195
- await storage.record_event(event)
196
-
197
- await ctx.validate_event_limits()
198
- log_text = "\n".join(capture_logs)
199
- assert "approaching event limit" in log_text
200
- assert "8/100" in log_text
201
- finally:
202
- set_context(None)
203
-
204
- @pytest.mark.asyncio
205
- async def test_transient_mode_skips_validation(self):
206
- """Test that transient mode skips validation."""
207
- configure(event_hard_limit=1) # Very low limit
208
-
209
- ctx = LocalContext(
210
- run_id="test_run",
211
- workflow_name="test_workflow",
212
- durable=False, # Transient mode
213
- )
214
-
215
- # Should not raise even though we would be over the limit
216
- await ctx.validate_event_limits() # No error
217
-
218
- @pytest.mark.asyncio
219
- async def test_no_storage_skips_validation(self):
220
- """Test that missing storage skips validation."""
221
- configure(event_hard_limit=1) # Very low limit
222
-
223
- ctx = LocalContext(
224
- run_id="test_run",
225
- workflow_name="test_workflow",
226
- storage=None, # No storage
227
- durable=True,
228
- )
229
-
230
- # Should not raise - no storage means validation is skipped
231
- await ctx.validate_event_limits() # No error
232
-
233
- def test_configure_warns_on_limit_change(self):
234
- """Test that configure() warns when modifying event limits."""
235
- with warnings.catch_warnings(record=True) as w:
236
- warnings.simplefilter("always")
237
- configure(event_hard_limit=100000)
238
-
239
- assert len(w) == 1
240
- assert "not recommended" in str(w[0].message)
241
- assert "event_hard_limit" in str(w[0].message)
242
-
243
- def test_configure_warns_on_multiple_limit_changes(self):
244
- """Test that configure() warns when modifying multiple event limits."""
245
- with warnings.catch_warnings(record=True) as w:
246
- warnings.simplefilter("always")
247
- configure(event_soft_limit=5000, event_hard_limit=100000)
248
-
249
- assert len(w) == 1
250
- assert "not recommended" in str(w[0].message)
251
- # Both should be mentioned
252
- assert "event_hard_limit" in str(w[0].message)
253
- assert "event_soft_limit" in str(w[0].message)
254
-
255
- def test_configure_no_warning_for_other_options(self):
256
- """Test that configure() doesn't warn for non-limit options."""
257
- with warnings.catch_warnings(record=True) as w:
258
- warnings.simplefilter("always")
259
- configure(default_retries=5)
260
-
261
- # No warnings for non-limit options
262
- assert len(w) == 0
263
-
264
-
265
- class TestEventLimitDefaults:
266
- """Test default event limit values."""
267
-
268
- @pytest.fixture(autouse=True)
269
- def reset_config_fixture(self):
270
- """Reset config before and after each test."""
271
- reset_config()
272
- yield
273
- reset_config()
274
-
275
- def test_default_soft_limit(self):
276
- """Test default soft limit is 10,000."""
277
- config = get_config()
278
- assert config.event_soft_limit == 10_000
279
-
280
- def test_default_hard_limit(self):
281
- """Test default hard limit is 50,000."""
282
- config = get_config()
283
- assert config.event_hard_limit == 50_000
284
-
285
- def test_default_warning_interval(self):
286
- """Test default warning interval is 100."""
287
- config = get_config()
288
- assert config.event_warning_interval == 100
289
-
290
-
291
- class TestEventLimitExceededError:
292
- """Test EventLimitExceededError exception."""
293
-
294
- def test_exception_attributes(self):
295
- """Test that exception has correct attributes."""
296
- error = EventLimitExceededError(
297
- run_id="run_123",
298
- event_count=50000,
299
- limit=50000,
300
- )
301
-
302
- assert error.run_id == "run_123"
303
- assert error.event_count == 50000
304
- assert error.limit == 50000
305
-
306
- def test_exception_inherits_from_fatal_error(self):
307
- """Test that EventLimitExceededError inherits from FatalError."""
308
- from pyworkflow.core.exceptions import FatalError
309
-
310
- error = EventLimitExceededError(
311
- run_id="run_123",
312
- event_count=50000,
313
- limit=50000,
314
- )
315
-
316
- assert isinstance(error, FatalError)
@@ -1,320 +0,0 @@
1
- """
2
- Unit tests for workflow executor.
3
-
4
- Tests use the unified start/resume API with local runtime.
5
- """
6
-
7
- import pytest
8
-
9
- from pyworkflow import configure, reset_config, resume, start
10
- from pyworkflow.core.exceptions import (
11
- WorkflowNotFoundError,
12
- )
13
- from pyworkflow.core.step import step
14
- from pyworkflow.core.workflow import workflow
15
- from pyworkflow.engine.executor import get_workflow_events, get_workflow_run
16
- from pyworkflow.primitives.sleep import sleep
17
- from pyworkflow.storage.file import FileStorageBackend
18
- from pyworkflow.storage.schemas import RunStatus
19
-
20
-
21
- @pytest.fixture(autouse=True)
22
- def reset_config_fixture():
23
- """Reset configuration before each test."""
24
- reset_config()
25
- yield
26
- reset_config()
27
-
28
-
29
- class TestWorkflowStart:
30
- """Test starting workflows."""
31
-
32
- @pytest.mark.asyncio
33
- async def test_start_basic_workflow(self, tmp_path):
34
- """Test starting a basic workflow."""
35
-
36
- @workflow(name="test_start_workflow")
37
- async def my_workflow(x: int):
38
- return x * 2
39
-
40
- storage = FileStorageBackend(base_path=str(tmp_path))
41
- run_id = await start(my_workflow, 5, durable=True, storage=storage)
42
-
43
- # Check run was created
44
- assert run_id is not None
45
- assert run_id.startswith("run_")
46
-
47
- # Check run status
48
- run = await storage.get_run(run_id)
49
- assert run is not None
50
- assert run.status == RunStatus.COMPLETED
51
- assert run.workflow_name == "test_start_workflow"
52
-
53
- @pytest.mark.asyncio
54
- async def test_start_workflow_with_kwargs(self, tmp_path):
55
- """Test starting workflow with keyword arguments."""
56
-
57
- @workflow(name="kwargs_workflow")
58
- async def kwargs_workflow(a: int, b: int):
59
- return a + b
60
-
61
- storage = FileStorageBackend(base_path=str(tmp_path))
62
- run_id = await start(kwargs_workflow, 10, b=20, durable=True, storage=storage)
63
-
64
- # Check result was stored
65
- run = await storage.get_run(run_id)
66
- assert run.status == RunStatus.COMPLETED
67
-
68
- @pytest.mark.asyncio
69
- async def test_start_with_idempotency_key(self, tmp_path):
70
- """Test workflow idempotency."""
71
-
72
- @workflow(name="idempotent_workflow")
73
- async def my_workflow():
74
- return "done"
75
-
76
- storage = FileStorageBackend(base_path=str(tmp_path))
77
-
78
- # First execution
79
- run_id1 = await start(
80
- my_workflow, durable=True, storage=storage, idempotency_key="unique-key-123"
81
- )
82
-
83
- # Second execution with same key - should return same run_id
84
- run_id2 = await start(
85
- my_workflow, durable=True, storage=storage, idempotency_key="unique-key-123"
86
- )
87
-
88
- assert run_id1 == run_id2
89
-
90
- @pytest.mark.asyncio
91
- async def test_start_workflow_with_failure(self, tmp_path):
92
- """Test starting a workflow that fails."""
93
-
94
- @workflow(name="failing_workflow")
95
- async def failing_workflow():
96
- raise ValueError("Test failure")
97
-
98
- storage = FileStorageBackend(base_path=str(tmp_path))
99
-
100
- with pytest.raises(ValueError, match="Test failure"):
101
- await start(failing_workflow, durable=True, storage=storage)
102
-
103
- # Check that run was marked as failed
104
- # (We need to get the run_id from storage somehow)
105
- # For now, just verify the exception was raised
106
-
107
- @pytest.mark.asyncio
108
- async def test_start_workflow_with_suspension(self, tmp_path):
109
- """Test starting a workflow that suspends."""
110
-
111
- @workflow(name="suspending_workflow")
112
- async def suspending_workflow():
113
- await sleep("5s")
114
- return "completed"
115
-
116
- storage = FileStorageBackend(base_path=str(tmp_path))
117
- run_id = await start(suspending_workflow, durable=True, storage=storage)
118
-
119
- # Workflow should have suspended
120
- run = await storage.get_run(run_id)
121
- assert run.status == RunStatus.SUSPENDED
122
-
123
- @pytest.mark.asyncio
124
- async def test_start_workflow_with_steps(self, tmp_path):
125
- """Test starting a workflow with steps."""
126
-
127
- @step()
128
- async def add_step(a: int, b: int):
129
- return a + b
130
-
131
- @workflow(name="step_workflow")
132
- async def step_workflow(x: int):
133
- result = await add_step(x, 10)
134
- return result
135
-
136
- storage = FileStorageBackend(base_path=str(tmp_path))
137
- run_id = await start(step_workflow, 5, durable=True, storage=storage)
138
-
139
- # Verify completion
140
- run = await storage.get_run(run_id)
141
- assert run.status == RunStatus.COMPLETED
142
-
143
- # Verify events include step events
144
- events = await storage.get_events(run_id)
145
- event_types = [e.type.value for e in events]
146
- assert "step.started" in event_types
147
- assert "step.completed" in event_types
148
-
149
-
150
- class TestWorkflowResume:
151
- """Test resuming workflows."""
152
-
153
- @pytest.mark.asyncio
154
- async def test_resume_suspended_workflow(self, tmp_path):
155
- """Test resuming a suspended workflow."""
156
-
157
- @workflow(name="resumable_workflow")
158
- async def resumable_workflow():
159
- await sleep("1s")
160
- return "resumed"
161
-
162
- storage = FileStorageBackend(base_path=str(tmp_path))
163
-
164
- # Start and suspend
165
- run_id = await start(resumable_workflow, durable=True, storage=storage)
166
- run = await storage.get_run(run_id)
167
- assert run.status == RunStatus.SUSPENDED
168
-
169
- # Resume workflow
170
- await resume(run_id, storage=storage)
171
-
172
- # Should complete now
173
- # Note: This will still suspend because sleep hasn't actually elapsed
174
- # In a real scenario, we'd need time to pass or mock the time check
175
-
176
- @pytest.mark.asyncio
177
- async def test_resume_nonexistent_workflow(self, tmp_path):
178
- """Test resuming a workflow that doesn't exist."""
179
- storage = FileStorageBackend(base_path=str(tmp_path))
180
-
181
- with pytest.raises(WorkflowNotFoundError):
182
- await resume("nonexistent_run_id", storage=storage)
183
-
184
- @pytest.mark.asyncio
185
- async def test_resume_with_replay(self, tmp_path):
186
- """Test that resume replays previous events."""
187
- execution_count = 0
188
-
189
- @step()
190
- async def counting_step():
191
- nonlocal execution_count
192
- execution_count += 1
193
- return "done"
194
-
195
- @workflow(name="replay_workflow")
196
- async def replay_workflow():
197
- await counting_step()
198
- await sleep("1s")
199
- await counting_step() # This should use cached result on resume
200
- return "completed"
201
-
202
- storage = FileStorageBackend(base_path=str(tmp_path))
203
-
204
- # Start workflow - will execute first step and suspend
205
- run_id = await start(replay_workflow, durable=True, storage=storage)
206
- assert execution_count == 1
207
-
208
- # Resume - should replay first step (not execute) and suspend again
209
- await resume(run_id, storage=storage)
210
-
211
- # First step should have been replayed, not re-executed
212
- # So execution_count should still be 1
213
- # (Second step hasn't executed yet because sleep hasn't elapsed)
214
-
215
-
216
- class TestWorkflowQueries:
217
- """Test workflow query functions."""
218
-
219
- @pytest.mark.asyncio
220
- async def test_get_workflow_run(self, tmp_path):
221
- """Test getting workflow run information."""
222
-
223
- @workflow(name="query_workflow")
224
- async def query_workflow():
225
- return "done"
226
-
227
- storage = FileStorageBackend(base_path=str(tmp_path))
228
- run_id = await start(query_workflow, durable=True, storage=storage)
229
-
230
- # Query the run
231
- run = await get_workflow_run(run_id, storage=storage)
232
-
233
- assert run is not None
234
- assert run.run_id == run_id
235
- assert run.workflow_name == "query_workflow"
236
- assert run.status == RunStatus.COMPLETED
237
-
238
- @pytest.mark.asyncio
239
- async def test_get_workflow_run_nonexistent(self, tmp_path):
240
- """Test getting a nonexistent workflow run."""
241
- storage = FileStorageBackend(base_path=str(tmp_path))
242
-
243
- run = await get_workflow_run("nonexistent", storage=storage)
244
- assert run is None
245
-
246
- @pytest.mark.asyncio
247
- async def test_get_workflow_events(self, tmp_path):
248
- """Test getting workflow events."""
249
-
250
- @step()
251
- async def event_step():
252
- return "done"
253
-
254
- @workflow(name="events_workflow")
255
- async def events_workflow():
256
- await event_step()
257
- return "completed"
258
-
259
- storage = FileStorageBackend(base_path=str(tmp_path))
260
- run_id = await start(events_workflow, durable=True, storage=storage)
261
-
262
- # Get events
263
- events = await get_workflow_events(run_id, storage=storage)
264
-
265
- assert len(events) > 0
266
-
267
- # Check event types
268
- event_types = [e.type.value for e in events]
269
- assert "workflow.started" in event_types
270
- assert "step.started" in event_types
271
- assert "step.completed" in event_types
272
- assert "workflow.completed" in event_types
273
-
274
- @pytest.mark.asyncio
275
- async def test_workflow_max_duration_stored(self, tmp_path):
276
- """Test that workflow max_duration is stored correctly."""
277
-
278
- @workflow(name="timed_workflow", max_duration="1h", tags=["test", "backend"])
279
- async def timed_workflow():
280
- return "done"
281
-
282
- storage = FileStorageBackend(base_path=str(tmp_path))
283
- run_id = await start(timed_workflow, durable=True, storage=storage)
284
-
285
- # Check max_duration was stored on run
286
- run = await storage.get_run(run_id)
287
- assert run.max_duration == "1h"
288
-
289
- # Check tags were stored on workflow metadata (not run)
290
- from pyworkflow.core.registry import get_workflow
291
-
292
- workflow_meta = get_workflow("timed_workflow")
293
- assert workflow_meta.tags == ["test", "backend"]
294
-
295
-
296
- class TestWorkflowDefaultStorage:
297
- """Test workflows with default storage backend."""
298
-
299
- @pytest.mark.asyncio
300
- async def test_start_without_storage_param(self, tmp_path):
301
- """Test that configured storage is used by default."""
302
- from pyworkflow.storage.memory import InMemoryStorageBackend
303
-
304
- storage = InMemoryStorageBackend()
305
- configure(storage=storage, default_durable=True)
306
-
307
- @workflow(name="default_storage_workflow")
308
- async def default_workflow():
309
- return "done"
310
-
311
- # Start without providing storage (uses configured default)
312
- run_id = await start(default_workflow)
313
-
314
- assert run_id is not None
315
- assert run_id.startswith("run_")
316
-
317
- # Verify run was stored
318
- run = await storage.get_run(run_id)
319
- assert run is not None
320
- assert run.status == RunStatus.COMPLETED