pyworkflow-engine 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. dashboard/backend/app/__init__.py +1 -0
  2. dashboard/backend/app/config.py +32 -0
  3. dashboard/backend/app/controllers/__init__.py +6 -0
  4. dashboard/backend/app/controllers/run_controller.py +86 -0
  5. dashboard/backend/app/controllers/workflow_controller.py +33 -0
  6. dashboard/backend/app/dependencies/__init__.py +5 -0
  7. dashboard/backend/app/dependencies/storage.py +50 -0
  8. dashboard/backend/app/repositories/__init__.py +6 -0
  9. dashboard/backend/app/repositories/run_repository.py +80 -0
  10. dashboard/backend/app/repositories/workflow_repository.py +27 -0
  11. dashboard/backend/app/rest/__init__.py +8 -0
  12. dashboard/backend/app/rest/v1/__init__.py +12 -0
  13. dashboard/backend/app/rest/v1/health.py +33 -0
  14. dashboard/backend/app/rest/v1/runs.py +133 -0
  15. dashboard/backend/app/rest/v1/workflows.py +41 -0
  16. dashboard/backend/app/schemas/__init__.py +23 -0
  17. dashboard/backend/app/schemas/common.py +16 -0
  18. dashboard/backend/app/schemas/event.py +24 -0
  19. dashboard/backend/app/schemas/hook.py +25 -0
  20. dashboard/backend/app/schemas/run.py +54 -0
  21. dashboard/backend/app/schemas/step.py +28 -0
  22. dashboard/backend/app/schemas/workflow.py +31 -0
  23. dashboard/backend/app/server.py +87 -0
  24. dashboard/backend/app/services/__init__.py +6 -0
  25. dashboard/backend/app/services/run_service.py +240 -0
  26. dashboard/backend/app/services/workflow_service.py +155 -0
  27. dashboard/backend/main.py +18 -0
  28. docs/concepts/cancellation.mdx +362 -0
  29. docs/concepts/continue-as-new.mdx +434 -0
  30. docs/concepts/events.mdx +266 -0
  31. docs/concepts/fault-tolerance.mdx +370 -0
  32. docs/concepts/hooks.mdx +552 -0
  33. docs/concepts/limitations.mdx +167 -0
  34. docs/concepts/schedules.mdx +775 -0
  35. docs/concepts/sleep.mdx +312 -0
  36. docs/concepts/steps.mdx +301 -0
  37. docs/concepts/workflows.mdx +255 -0
  38. docs/guides/cli.mdx +942 -0
  39. docs/guides/configuration.mdx +560 -0
  40. docs/introduction.mdx +155 -0
  41. docs/quickstart.mdx +279 -0
  42. examples/__init__.py +1 -0
  43. examples/celery/__init__.py +1 -0
  44. examples/celery/durable/docker-compose.yml +55 -0
  45. examples/celery/durable/pyworkflow.config.yaml +12 -0
  46. examples/celery/durable/workflows/__init__.py +122 -0
  47. examples/celery/durable/workflows/basic.py +87 -0
  48. examples/celery/durable/workflows/batch_processing.py +102 -0
  49. examples/celery/durable/workflows/cancellation.py +273 -0
  50. examples/celery/durable/workflows/child_workflow_patterns.py +240 -0
  51. examples/celery/durable/workflows/child_workflows.py +202 -0
  52. examples/celery/durable/workflows/continue_as_new.py +260 -0
  53. examples/celery/durable/workflows/fault_tolerance.py +210 -0
  54. examples/celery/durable/workflows/hooks.py +211 -0
  55. examples/celery/durable/workflows/idempotency.py +112 -0
  56. examples/celery/durable/workflows/long_running.py +99 -0
  57. examples/celery/durable/workflows/retries.py +101 -0
  58. examples/celery/durable/workflows/schedules.py +209 -0
  59. examples/celery/transient/01_basic_workflow.py +91 -0
  60. examples/celery/transient/02_fault_tolerance.py +257 -0
  61. examples/celery/transient/__init__.py +20 -0
  62. examples/celery/transient/pyworkflow.config.yaml +25 -0
  63. examples/local/__init__.py +1 -0
  64. examples/local/durable/01_basic_workflow.py +94 -0
  65. examples/local/durable/02_file_storage.py +132 -0
  66. examples/local/durable/03_retries.py +169 -0
  67. examples/local/durable/04_long_running.py +119 -0
  68. examples/local/durable/05_event_log.py +145 -0
  69. examples/local/durable/06_idempotency.py +148 -0
  70. examples/local/durable/07_hooks.py +334 -0
  71. examples/local/durable/08_cancellation.py +233 -0
  72. examples/local/durable/09_child_workflows.py +198 -0
  73. examples/local/durable/10_child_workflow_patterns.py +265 -0
  74. examples/local/durable/11_continue_as_new.py +249 -0
  75. examples/local/durable/12_schedules.py +198 -0
  76. examples/local/durable/__init__.py +1 -0
  77. examples/local/transient/01_quick_tasks.py +87 -0
  78. examples/local/transient/02_retries.py +130 -0
  79. examples/local/transient/03_sleep.py +141 -0
  80. examples/local/transient/__init__.py +1 -0
  81. pyworkflow/__init__.py +256 -0
  82. pyworkflow/aws/__init__.py +68 -0
  83. pyworkflow/aws/context.py +234 -0
  84. pyworkflow/aws/handler.py +184 -0
  85. pyworkflow/aws/testing.py +310 -0
  86. pyworkflow/celery/__init__.py +41 -0
  87. pyworkflow/celery/app.py +198 -0
  88. pyworkflow/celery/scheduler.py +315 -0
  89. pyworkflow/celery/tasks.py +1746 -0
  90. pyworkflow/cli/__init__.py +132 -0
  91. pyworkflow/cli/__main__.py +6 -0
  92. pyworkflow/cli/commands/__init__.py +1 -0
  93. pyworkflow/cli/commands/hooks.py +640 -0
  94. pyworkflow/cli/commands/quickstart.py +495 -0
  95. pyworkflow/cli/commands/runs.py +773 -0
  96. pyworkflow/cli/commands/scheduler.py +130 -0
  97. pyworkflow/cli/commands/schedules.py +794 -0
  98. pyworkflow/cli/commands/setup.py +703 -0
  99. pyworkflow/cli/commands/worker.py +413 -0
  100. pyworkflow/cli/commands/workflows.py +1257 -0
  101. pyworkflow/cli/output/__init__.py +1 -0
  102. pyworkflow/cli/output/formatters.py +321 -0
  103. pyworkflow/cli/output/styles.py +121 -0
  104. pyworkflow/cli/utils/__init__.py +1 -0
  105. pyworkflow/cli/utils/async_helpers.py +30 -0
  106. pyworkflow/cli/utils/config.py +130 -0
  107. pyworkflow/cli/utils/config_generator.py +344 -0
  108. pyworkflow/cli/utils/discovery.py +53 -0
  109. pyworkflow/cli/utils/docker_manager.py +651 -0
  110. pyworkflow/cli/utils/interactive.py +364 -0
  111. pyworkflow/cli/utils/storage.py +115 -0
  112. pyworkflow/config.py +329 -0
  113. pyworkflow/context/__init__.py +63 -0
  114. pyworkflow/context/aws.py +230 -0
  115. pyworkflow/context/base.py +416 -0
  116. pyworkflow/context/local.py +930 -0
  117. pyworkflow/context/mock.py +381 -0
  118. pyworkflow/core/__init__.py +0 -0
  119. pyworkflow/core/exceptions.py +353 -0
  120. pyworkflow/core/registry.py +313 -0
  121. pyworkflow/core/scheduled.py +328 -0
  122. pyworkflow/core/step.py +494 -0
  123. pyworkflow/core/workflow.py +294 -0
  124. pyworkflow/discovery.py +248 -0
  125. pyworkflow/engine/__init__.py +0 -0
  126. pyworkflow/engine/events.py +879 -0
  127. pyworkflow/engine/executor.py +682 -0
  128. pyworkflow/engine/replay.py +273 -0
  129. pyworkflow/observability/__init__.py +19 -0
  130. pyworkflow/observability/logging.py +234 -0
  131. pyworkflow/primitives/__init__.py +33 -0
  132. pyworkflow/primitives/child_handle.py +174 -0
  133. pyworkflow/primitives/child_workflow.py +372 -0
  134. pyworkflow/primitives/continue_as_new.py +101 -0
  135. pyworkflow/primitives/define_hook.py +150 -0
  136. pyworkflow/primitives/hooks.py +97 -0
  137. pyworkflow/primitives/resume_hook.py +210 -0
  138. pyworkflow/primitives/schedule.py +545 -0
  139. pyworkflow/primitives/shield.py +96 -0
  140. pyworkflow/primitives/sleep.py +100 -0
  141. pyworkflow/runtime/__init__.py +21 -0
  142. pyworkflow/runtime/base.py +179 -0
  143. pyworkflow/runtime/celery.py +310 -0
  144. pyworkflow/runtime/factory.py +101 -0
  145. pyworkflow/runtime/local.py +706 -0
  146. pyworkflow/scheduler/__init__.py +9 -0
  147. pyworkflow/scheduler/local.py +248 -0
  148. pyworkflow/serialization/__init__.py +0 -0
  149. pyworkflow/serialization/decoder.py +146 -0
  150. pyworkflow/serialization/encoder.py +162 -0
  151. pyworkflow/storage/__init__.py +54 -0
  152. pyworkflow/storage/base.py +612 -0
  153. pyworkflow/storage/config.py +185 -0
  154. pyworkflow/storage/dynamodb.py +1315 -0
  155. pyworkflow/storage/file.py +827 -0
  156. pyworkflow/storage/memory.py +549 -0
  157. pyworkflow/storage/postgres.py +1161 -0
  158. pyworkflow/storage/schemas.py +486 -0
  159. pyworkflow/storage/sqlite.py +1136 -0
  160. pyworkflow/utils/__init__.py +0 -0
  161. pyworkflow/utils/duration.py +177 -0
  162. pyworkflow/utils/schedule.py +391 -0
  163. pyworkflow_engine-0.1.7.dist-info/METADATA +687 -0
  164. pyworkflow_engine-0.1.7.dist-info/RECORD +196 -0
  165. pyworkflow_engine-0.1.7.dist-info/WHEEL +5 -0
  166. pyworkflow_engine-0.1.7.dist-info/entry_points.txt +2 -0
  167. pyworkflow_engine-0.1.7.dist-info/licenses/LICENSE +21 -0
  168. pyworkflow_engine-0.1.7.dist-info/top_level.txt +5 -0
  169. tests/examples/__init__.py +0 -0
  170. tests/integration/__init__.py +0 -0
  171. tests/integration/test_cancellation.py +330 -0
  172. tests/integration/test_child_workflows.py +439 -0
  173. tests/integration/test_continue_as_new.py +428 -0
  174. tests/integration/test_dynamodb_storage.py +1146 -0
  175. tests/integration/test_fault_tolerance.py +369 -0
  176. tests/integration/test_schedule_storage.py +484 -0
  177. tests/unit/__init__.py +0 -0
  178. tests/unit/backends/__init__.py +1 -0
  179. tests/unit/backends/test_dynamodb_storage.py +1554 -0
  180. tests/unit/backends/test_postgres_storage.py +1281 -0
  181. tests/unit/backends/test_sqlite_storage.py +1460 -0
  182. tests/unit/conftest.py +41 -0
  183. tests/unit/test_cancellation.py +364 -0
  184. tests/unit/test_child_workflows.py +680 -0
  185. tests/unit/test_continue_as_new.py +441 -0
  186. tests/unit/test_event_limits.py +316 -0
  187. tests/unit/test_executor.py +320 -0
  188. tests/unit/test_fault_tolerance.py +334 -0
  189. tests/unit/test_hooks.py +495 -0
  190. tests/unit/test_registry.py +261 -0
  191. tests/unit/test_replay.py +420 -0
  192. tests/unit/test_schedule_schemas.py +285 -0
  193. tests/unit/test_schedule_utils.py +286 -0
  194. tests/unit/test_scheduled_workflow.py +274 -0
  195. tests/unit/test_step.py +353 -0
  196. tests/unit/test_workflow.py +243 -0
@@ -0,0 +1,249 @@
1
+ """
2
+ Durable Workflow - Continue-As-New
3
+
4
+ This example demonstrates continue_as_new() for long-running workflows:
5
+ - Polling workflows that need fresh event history
6
+ - Batch processing with continuation
7
+ - Tracking workflow chains
8
+ - Using get_workflow_chain() to view the full history
9
+
10
+ Run: python examples/local/durable/11_continue_as_new.py 2>/dev/null
11
+ """
12
+
13
+ import asyncio
14
+ import tempfile
15
+
16
+ from pyworkflow import (
17
+ configure,
18
+ continue_as_new,
19
+ get_workflow_chain,
20
+ reset_config,
21
+ start,
22
+ step,
23
+ workflow,
24
+ )
25
+ from pyworkflow.storage import FileStorageBackend
26
+
27
+
28
+ # --- Steps ---
29
+ @step()
30
+ async def fetch_batch(offset: int, batch_size: int) -> list:
31
+ """Fetch a batch of items to process."""
32
+ # Simulate fetching items - returns empty when done
33
+ total_items = 25 # Simulate 25 total items
34
+ if offset >= total_items:
35
+ return []
36
+ end = min(offset + batch_size, total_items)
37
+ items = list(range(offset, end))
38
+ print(f" [Step] Fetched items {offset} to {end - 1}")
39
+ return items
40
+
41
+
42
+ @step()
43
+ async def process_batch_item(item: int) -> dict:
44
+ """Process a single item."""
45
+ await asyncio.sleep(0.01) # Simulate work
46
+ return {"item": item, "processed": True}
47
+
48
+
49
+ @step()
50
+ async def check_for_updates(cursor: str | None) -> tuple[str | None, list]:
51
+ """Check for new updates since cursor."""
52
+ # Simulate polling - returns new cursor and items
53
+ if cursor is None:
54
+ return "cursor_1", [{"id": 1, "data": "first"}]
55
+ elif cursor == "cursor_1":
56
+ return "cursor_2", [{"id": 2, "data": "second"}]
57
+ elif cursor == "cursor_2":
58
+ return "cursor_3", [{"id": 3, "data": "third"}]
59
+ else:
60
+ # No more updates
61
+ return None, []
62
+
63
+
64
+ # --- Example 1: Batch Processing Workflow ---
65
+ @workflow(durable=True, tags=["local", "durable"])
66
+ async def batch_processor(offset: int = 0, batch_size: int = 10) -> str:
67
+ """
68
+ Process items in batches, continuing as new for each batch.
69
+
70
+ This pattern prevents event history from growing unbounded
71
+ when processing large datasets.
72
+ """
73
+ print(f"\n [Workflow] Processing batch starting at offset {offset}")
74
+
75
+ # Fetch batch
76
+ items = await fetch_batch(offset, batch_size)
77
+
78
+ if not items:
79
+ # No more items - we're done!
80
+ return f"Completed! Processed {offset} total items"
81
+
82
+ # Process each item in this batch
83
+ for item in items:
84
+ await process_batch_item(item)
85
+
86
+ print(" [Workflow] Batch complete. Continuing with next batch...")
87
+
88
+ # Continue with next batch - fresh event history!
89
+ continue_as_new(offset=offset + batch_size, batch_size=batch_size)
90
+
91
+
92
+ # --- Example 2: Polling Workflow ---
93
+ @workflow(durable=True, tags=["local", "durable"])
94
+ async def polling_workflow(cursor: str | None = None, poll_count: int = 0) -> str:
95
+ """
96
+ Poll for updates indefinitely, continuing as new to reset history.
97
+
98
+ This pattern is useful for:
99
+ - Webhook listeners
100
+ - Queue consumers
101
+ - Real-time sync workflows
102
+ """
103
+ print(f"\n [Workflow] Poll #{poll_count + 1}, cursor: {cursor}")
104
+
105
+ # Check for updates
106
+ new_cursor, updates = await check_for_updates(cursor)
107
+
108
+ if updates:
109
+ print(f" [Workflow] Found {len(updates)} update(s)")
110
+ for update in updates:
111
+ print(f" - Processing: {update}")
112
+
113
+ if new_cursor is None:
114
+ # No more updates - done polling
115
+ return f"Polling complete after {poll_count + 1} polls"
116
+
117
+ # Continue polling with new cursor
118
+ print(f" [Workflow] Continuing with new cursor: {new_cursor}")
119
+ continue_as_new(cursor=new_cursor, poll_count=poll_count + 1)
120
+
121
+
122
+ # --- Example 3: Counter Workflow (Simple Demo) ---
123
+ @workflow(durable=True, tags=["local", "durable"])
124
+ async def countdown_workflow(count: int) -> str:
125
+ """
126
+ Simple countdown that demonstrates continue_as_new.
127
+ Each continuation has fresh event history.
128
+ """
129
+ print(f"\n [Workflow] Count: {count}")
130
+
131
+ if count <= 0:
132
+ return "Countdown complete!"
133
+
134
+ # Continue with decremented count
135
+ continue_as_new(count=count - 1)
136
+
137
+
138
+ async def example_batch_processing(storage):
139
+ """Example 1: Batch processing with continuation."""
140
+ print("\n--- Example 1: Batch Processing ---")
141
+ print("Processing 25 items in batches of 10...")
142
+
143
+ # Start batch processor
144
+ run_id = await start(batch_processor, offset=0, batch_size=10)
145
+
146
+ # Wait for completion
147
+ await asyncio.sleep(0.5)
148
+
149
+ # Get the workflow chain
150
+ chain = await get_workflow_chain(run_id, storage=storage)
151
+
152
+ print(f"\n Workflow chain has {len(chain)} runs:")
153
+ for i, run in enumerate(chain):
154
+ status = run.status.value
155
+ result = run.result if run.result else "-"
156
+ print(f" {i + 1}. {run.run_id[:20]}... - {status}")
157
+ if "Completed" in str(result):
158
+ print(f" Result: {result}")
159
+
160
+ return run_id
161
+
162
+
163
+ async def example_polling(storage):
164
+ """Example 2: Polling workflow."""
165
+ print("\n--- Example 2: Polling Workflow ---")
166
+ print("Polling for updates until no more available...")
167
+
168
+ # Start polling workflow
169
+ run_id = await start(polling_workflow)
170
+
171
+ # Wait for completion
172
+ await asyncio.sleep(0.5)
173
+
174
+ # Get the workflow chain
175
+ chain = await get_workflow_chain(run_id, storage=storage)
176
+
177
+ print(f"\n Polling chain has {len(chain)} runs")
178
+
179
+ # Get final result
180
+ final_run = chain[-1]
181
+ if final_run.result:
182
+ print(f" Final result: {final_run.result}")
183
+
184
+ return run_id
185
+
186
+
187
+ async def example_countdown(storage):
188
+ """Example 3: Simple countdown."""
189
+ print("\n--- Example 3: Countdown ---")
190
+ print("Counting down from 3...")
191
+
192
+ # Start countdown
193
+ run_id = await start(countdown_workflow, count=3)
194
+
195
+ # Wait for completion
196
+ await asyncio.sleep(0.3)
197
+
198
+ # Get the workflow chain
199
+ chain = await get_workflow_chain(run_id, storage=storage)
200
+
201
+ print(f"\n Chain: {' -> '.join(r.run_id[:8] + '...' for r in chain)}")
202
+ print(f" Total runs: {len(chain)}")
203
+
204
+ return run_id
205
+
206
+
207
+ async def example_view_chain_details(storage, run_id: str):
208
+ """Show detailed chain information."""
209
+ print("\n--- Chain Details ---")
210
+
211
+ chain = await get_workflow_chain(run_id, storage=storage)
212
+
213
+ for i, run in enumerate(chain):
214
+ position = "START" if i == 0 else ("CURRENT" if i == len(chain) - 1 else f"#{i + 1}")
215
+ print(f"\n [{position}] {run.run_id}")
216
+ print(f" Status: {run.status.value}")
217
+ print(f" Continued from: {run.continued_from_run_id or '(none)'}")
218
+ print(f" Continued to: {run.continued_to_run_id or '(none)'}")
219
+
220
+
221
+ async def main():
222
+ # Use temp directory
223
+ with tempfile.TemporaryDirectory() as tmpdir:
224
+ print("=== Durable Workflow - Continue-As-New ===")
225
+
226
+ # Configure with FileStorageBackend
227
+ reset_config()
228
+ storage = FileStorageBackend(base_path=tmpdir)
229
+ configure(storage=storage, default_durable=True)
230
+
231
+ # Run examples
232
+ batch_run_id = await example_batch_processing(storage)
233
+ await example_polling(storage)
234
+ await example_countdown(storage)
235
+
236
+ # Show detailed chain for batch processing
237
+ await example_view_chain_details(storage, batch_run_id)
238
+
239
+ print("\n=== Key Takeaways ===")
240
+ print(" - continue_as_new() completes current run and starts fresh")
241
+ print(" - Each continuation has clean event history")
242
+ print(" - Use for long-running polling, batch processing, recurring tasks")
243
+ print(" - get_workflow_chain() retrieves all runs in the chain")
244
+ print(" - Runs are linked via continued_from_run_id/continued_to_run_id")
245
+ print(" - Requires at least one argument (explicit args required)")
246
+
247
+
248
+ if __name__ == "__main__":
249
+ asyncio.run(main())
@@ -0,0 +1,198 @@
1
+ """
2
+ Durable Workflow - Schedules Example
3
+
4
+ This example demonstrates scheduled workflow execution in local runtime.
5
+ - Cron-based scheduling (every minute)
6
+ - Interval-based scheduling (every 30 seconds)
7
+ - Overlap policies to control concurrent executions
8
+ - Schedule management (create, pause, resume, trigger)
9
+ - LocalScheduler for automatic execution
10
+
11
+ Run: python examples/local/durable/12_schedules.py 2>/dev/null
12
+
13
+ Or use the CLI:
14
+ pyworkflow --module examples.local.durable.12_schedules scheduler run --duration 65
15
+ """
16
+
17
+ import asyncio
18
+ from datetime import datetime
19
+
20
+ from pyworkflow import (
21
+ LocalScheduler,
22
+ OverlapPolicy,
23
+ ScheduleSpec,
24
+ configure,
25
+ create_schedule,
26
+ get_schedule,
27
+ list_schedules,
28
+ pause_schedule,
29
+ reset_config,
30
+ resume_schedule,
31
+ step,
32
+ trigger_schedule,
33
+ workflow,
34
+ )
35
+ from pyworkflow.storage import InMemoryStorageBackend
36
+
37
+
38
+ # --- Steps ---
39
+ @step()
40
+ async def collect_metrics() -> dict:
41
+ """Collect system metrics."""
42
+ timestamp = datetime.now().isoformat()
43
+ print(f" [Step] Collecting metrics at {timestamp}...")
44
+ return {
45
+ "timestamp": timestamp,
46
+ "cpu_usage": 45.2,
47
+ "memory_usage": 62.8,
48
+ "disk_usage": 78.1,
49
+ }
50
+
51
+
52
+ @step()
53
+ async def store_metrics(metrics: dict) -> dict:
54
+ """Store metrics (simulated)."""
55
+ print(" [Step] Storing metrics...")
56
+ return {**metrics, "stored": True}
57
+
58
+
59
+ @step()
60
+ async def check_alerts(metrics: dict) -> dict:
61
+ """Check if any metrics exceed thresholds."""
62
+ alerts = []
63
+ if metrics.get("cpu_usage", 0) > 80:
64
+ alerts.append("High CPU usage")
65
+ if metrics.get("memory_usage", 0) > 90:
66
+ alerts.append("High memory usage")
67
+ if metrics.get("disk_usage", 0) > 85:
68
+ alerts.append("High disk usage")
69
+
70
+ print(f" [Step] Alert check: {alerts or 'None'}")
71
+ return {**metrics, "alerts": alerts}
72
+
73
+
74
+ # --- Workflow ---
75
+ @workflow(durable=True)
76
+ async def metrics_workflow() -> dict:
77
+ """
78
+ Metrics collection workflow.
79
+
80
+ Collects system metrics, stores them, and checks for alerts.
81
+ """
82
+ metrics = await collect_metrics()
83
+ metrics = await store_metrics(metrics)
84
+ metrics = await check_alerts(metrics)
85
+ return metrics
86
+
87
+
88
+ async def main():
89
+ # Configure with InMemoryStorageBackend
90
+ reset_config()
91
+ storage = InMemoryStorageBackend()
92
+ configure(storage=storage, default_durable=True)
93
+
94
+ print("=== Durable Workflow - Schedules Example ===\n")
95
+
96
+ # Create a schedule that runs every minute
97
+ print("Creating schedule (runs every minute)...")
98
+ spec = ScheduleSpec(
99
+ cron="* * * * *", # Every minute
100
+ timezone="UTC",
101
+ )
102
+
103
+ schedule = await create_schedule(
104
+ workflow_name="metrics_workflow",
105
+ spec=spec,
106
+ overlap_policy=OverlapPolicy.SKIP,
107
+ schedule_id="metrics-every-minute",
108
+ )
109
+
110
+ print("\nSchedule created:")
111
+ print(f" ID: {schedule.schedule_id}")
112
+ print(f" Workflow: {schedule.workflow_name}")
113
+ print(f" Cron: {schedule.spec.cron}")
114
+ print(f" Next run: {schedule.next_run_time}")
115
+ print(f" Overlap policy: {schedule.overlap_policy.value}")
116
+
117
+ # Also create an interval-based schedule
118
+ print("\nCreating interval schedule (every 30 seconds)...")
119
+ interval_spec = ScheduleSpec(interval="30s", timezone="UTC")
120
+
121
+ interval_schedule = await create_schedule(
122
+ workflow_name="metrics_workflow",
123
+ spec=interval_spec,
124
+ overlap_policy=OverlapPolicy.SKIP,
125
+ schedule_id="metrics-30s-interval",
126
+ )
127
+ print(f" ID: {interval_schedule.schedule_id}")
128
+ print(f" Interval: {interval_schedule.spec.interval}")
129
+
130
+ # Show all schedules
131
+ print("\n=== All Schedules ===")
132
+ schedules = await list_schedules()
133
+ for sched in schedules:
134
+ print(f" {sched.schedule_id}: {sched.status.value}")
135
+
136
+ # Demonstrate pause/resume
137
+ print("\n=== Pause/Resume Demo ===")
138
+ print(f"Pausing {interval_schedule.schedule_id}...")
139
+ await pause_schedule(interval_schedule.schedule_id)
140
+
141
+ schedules = await list_schedules()
142
+ for sched in schedules:
143
+ print(f" {sched.schedule_id}: {sched.status.value}")
144
+
145
+ print(f"\nResuming {interval_schedule.schedule_id}...")
146
+ await resume_schedule(interval_schedule.schedule_id)
147
+
148
+ # Demonstrate manual trigger
149
+ print("\n=== Manual Trigger Demo ===")
150
+ print("Triggering schedule immediately (bypasses cron)...")
151
+ run_id = await trigger_schedule(schedule.schedule_id)
152
+ print(f"Triggered run: {run_id}")
153
+
154
+ # Check stats after trigger
155
+ schedule = await get_schedule(schedule.schedule_id)
156
+ print("\nSchedule stats after trigger:")
157
+ print(f" Total runs: {schedule.total_runs}")
158
+ print(f" Successful: {schedule.successful_runs}")
159
+ print(f" Failed: {schedule.failed_runs}")
160
+
161
+ # Run the LocalScheduler
162
+ print("\n" + "=" * 50)
163
+ print("Starting LocalScheduler to demonstrate automatic execution.")
164
+ print("The scheduler will poll for due schedules every 5 seconds.")
165
+ print("Watch for workflow executions when schedules become due!")
166
+ print("=" * 50)
167
+
168
+ # Create and run the local scheduler
169
+ local_scheduler = LocalScheduler(
170
+ storage=storage,
171
+ poll_interval=5.0,
172
+ )
173
+
174
+ print("\nScheduler running for 65 seconds...")
175
+ await local_scheduler.run(duration=65.0)
176
+
177
+ # Final stats
178
+ print("\n=== Final Schedule Stats ===")
179
+ for sched_id in ["metrics-every-minute", "metrics-30s-interval"]:
180
+ sched = await get_schedule(sched_id)
181
+ if sched:
182
+ print(f"\n{sched.schedule_id}:")
183
+ print(f" Total runs: {sched.total_runs}")
184
+ print(f" Successful: {sched.successful_runs}")
185
+ print(f" Last run: {sched.last_run_at}")
186
+
187
+ print("\n=== Key Takeaways ===")
188
+ print("- Schedules created with cron and interval specs")
189
+ print("- LocalScheduler polls storage and triggers due schedules")
190
+ print("- Overlap policies prevent concurrent runs")
191
+ print("- Pause/resume controls schedule execution")
192
+ print("- Manual trigger bypasses schedule timing")
193
+ print("- For production, use: pyworkflow scheduler run")
194
+ print("- For Celery, use: pyworkflow worker run --beat")
195
+
196
+
197
+ if __name__ == "__main__":
198
+ asyncio.run(main())
@@ -0,0 +1 @@
1
+ # PyWorkflow Local Durable Examples Package
@@ -0,0 +1,87 @@
1
+ """
2
+ Transient Workflow - Quick Tasks
3
+
4
+ This example demonstrates simple transient mode execution.
5
+ - Simple 3-step order workflow
6
+ - No storage backend required
7
+ - Fast, direct execution
8
+ - No event recording
9
+
10
+ Run: python examples/local/transient/01_quick_tasks.py 2>/dev/null
11
+ """
12
+
13
+ import asyncio
14
+
15
+ from pyworkflow import (
16
+ configure,
17
+ reset_config,
18
+ start,
19
+ step,
20
+ workflow,
21
+ )
22
+
23
+
24
+ # --- Steps ---
25
+ @step()
26
+ async def process_order(order_id: str) -> dict:
27
+ """Process the order and validate it."""
28
+ print(f" Processing order {order_id}...")
29
+ return {"order_id": order_id, "status": "processed"}
30
+
31
+
32
+ @step()
33
+ async def charge_payment(order: dict, amount: float) -> dict:
34
+ """Charge the payment for the order."""
35
+ print(f" Charging payment: ${amount:.2f}...")
36
+ return {**order, "charged": amount}
37
+
38
+
39
+ @step()
40
+ async def send_notification(order: dict) -> dict:
41
+ """Send order confirmation notification."""
42
+ print(f" Sending notification for order {order['order_id']}...")
43
+ return {**order, "notified": True}
44
+
45
+
46
+ # --- Workflow ---
47
+ @workflow(durable=False, tags=["local", "transient"])
48
+ async def order_workflow(order_id: str, amount: float) -> dict:
49
+ """Complete order processing workflow (transient mode)."""
50
+ order = await process_order(order_id)
51
+ order = await charge_payment(order, amount)
52
+ order = await send_notification(order)
53
+ return order
54
+
55
+
56
+ async def main():
57
+ # Configure for transient mode (no storage backend needed)
58
+ reset_config()
59
+ configure(default_durable=False)
60
+
61
+ print("=== Transient Workflow - Quick Tasks ===\n")
62
+ print("Running order workflow in transient mode...\n")
63
+
64
+ # Start workflow
65
+ run_id = await start(order_workflow, "order-123", 99.99)
66
+
67
+ print(f"\nWorkflow completed: {run_id}")
68
+
69
+ print("\n=== Key Characteristics ===")
70
+ print("✓ No storage backend required")
71
+ print("✓ Fast execution (no event recording overhead)")
72
+ print("✓ Perfect for scripts and CLI tools")
73
+ print("✓ State lost on process exit (no crash recovery)")
74
+
75
+ print("\n=== When to Use Transient Mode ===")
76
+ print("✓ Short-lived workflows (seconds to minutes)")
77
+ print("✓ CLI tools and data processing scripts")
78
+ print("✓ Development and testing")
79
+ print("✓ Tasks where simplicity > durability")
80
+
81
+ print("\n=== Comparison with Durable Mode ===")
82
+ print("For crash recovery and persistence, see:")
83
+ print(" examples/local/durable/01_basic_workflow.py")
84
+
85
+
86
+ if __name__ == "__main__":
87
+ asyncio.run(main())
@@ -0,0 +1,130 @@
1
+ """
2
+ Transient Workflow - Retry Mechanics
3
+
4
+ This example demonstrates inline retry behavior in transient mode.
5
+ - Shows @step(max_retries=...) in action
6
+ - Simulates flaky API (fails 2x, succeeds on 3rd try)
7
+ - Retry logic works without event sourcing
8
+ - Global counter tracks retry attempts
9
+
10
+ Run: python examples/local/transient/02_retries.py 2>/dev/null
11
+ """
12
+
13
+ import asyncio
14
+
15
+ from pyworkflow import (
16
+ FatalError,
17
+ configure,
18
+ reset_config,
19
+ start,
20
+ step,
21
+ workflow,
22
+ )
23
+
24
+ # Global counter to track API call attempts
25
+ attempt_count = 0
26
+
27
+
28
+ # --- Steps ---
29
+ @step()
30
+ async def validate_request(request_id: str) -> dict:
31
+ """Validate the request."""
32
+ print(f" Validating request {request_id}...")
33
+ return {"request_id": request_id, "valid": True}
34
+
35
+
36
+ @step(max_retries=3, retry_delay=1)
37
+ async def call_flaky_api(request: dict) -> dict:
38
+ """Simulate unreliable external API - fails twice then succeeds."""
39
+ global attempt_count
40
+ attempt_count += 1
41
+
42
+ print(f" Calling external API (attempt {attempt_count})...")
43
+
44
+ if attempt_count < 3:
45
+ # Simulate temporary failure
46
+ print(" ✗ API call failed (timeout)")
47
+ raise Exception(f"API timeout - connection refused (attempt {attempt_count})")
48
+
49
+ # Third attempt succeeds
50
+ print(" ✓ API call successful!")
51
+ return {**request, "api_response": "success", "attempts": attempt_count}
52
+
53
+
54
+ @step()
55
+ async def process_response(request: dict) -> dict:
56
+ """Process the successful API response."""
57
+ print(f" Processing API response for {request['request_id']}...")
58
+ return {**request, "processed": True}
59
+
60
+
61
+ @step()
62
+ async def validate_input(value: int) -> int:
63
+ """Validate input - demonstrates FatalError (no retry)."""
64
+ if value < 0:
65
+ raise FatalError("Negative values not allowed")
66
+ return value
67
+
68
+
69
+ # --- Workflows ---
70
+ @workflow(durable=False, tags=["local", "transient"])
71
+ async def api_workflow(request_id: str) -> dict:
72
+ """Workflow with automatic retry logic."""
73
+ request = await validate_request(request_id)
74
+ request = await call_flaky_api(request) # Will retry on failure
75
+ request = await process_response(request)
76
+ return request
77
+
78
+
79
+ @workflow(durable=False, tags=["local", "transient"])
80
+ async def validation_workflow(value: int) -> int:
81
+ """Workflow with fatal error (no retry)."""
82
+ return await validate_input(value)
83
+
84
+
85
+ async def main():
86
+ global attempt_count
87
+
88
+ # Configure for transient mode
89
+ reset_config()
90
+ configure(default_durable=False)
91
+
92
+ print("=== Transient Workflow - Retry Mechanics ===\n")
93
+
94
+ # Example 1: Successful retry
95
+ print("Example 1: API call with retries\n")
96
+ attempt_count = 0 # Reset counter
97
+
98
+ run_id = await start(api_workflow, "request-123")
99
+ print(f"\nWorkflow completed: {run_id}")
100
+ print(f"Total attempts: {attempt_count}")
101
+
102
+ # Example 2: FatalError (no retry)
103
+ print("\n" + "=" * 60)
104
+ print("\nExample 2: FatalError (no retry)\n")
105
+
106
+ try:
107
+ await start(validation_workflow, -5)
108
+ except FatalError as e:
109
+ print(f"✗ Workflow failed with FatalError: {e}")
110
+ print(" (No retries attempted)")
111
+
112
+ print("\n=== Retry Behavior ===")
113
+ print("✓ max_retries=3 means: 1 initial + 3 retries = 4 total attempts")
114
+ print("✓ retry_delay=1 adds 1 second delay between retries")
115
+ print("✓ Retries happen inline (no event log needed)")
116
+ print("✓ FatalError skips retries and fails immediately")
117
+
118
+ print("\n=== Error Types ===")
119
+ print("Exception - Will retry if max_retries > 0")
120
+ print("FatalError - Never retries, fails immediately")
121
+ print("RetryableError - Will retry (same as Exception)")
122
+
123
+ print("\n=== Difference from Durable Mode ===")
124
+ print("Transient: Retries happen inline, not recorded")
125
+ print("Durable: Retries recorded in event log for audit")
126
+ print("\nSee examples/local/durable/03_retries.py for comparison")
127
+
128
+
129
+ if __name__ == "__main__":
130
+ asyncio.run(main())